rig/providers/
groq.rs

1//! Groq API client and Rig integration
2//!
3//! # Example
4//! ```
5//! use rig::providers::groq;
6//!
7//! let client = groq::Client::new("YOUR_API_KEY");
8//!
9//! let gpt4o = client.completion_model(groq::GPT_4O);
10//! ```
11use super::openai::{send_compatible_streaming_request, CompletionResponse, TranscriptionResponse};
12use crate::client::{CompletionClient, TranscriptionClient};
13use crate::json_utils::merge;
14use crate::providers::openai;
15use crate::streaming::StreamingCompletionResponse;
16use crate::{
17    completion::{self, CompletionError, CompletionRequest},
18    json_utils,
19    message::{self, MessageError},
20    providers::openai::ToolDefinition,
21    transcription::{self, TranscriptionError},
22    OneOrMany,
23};
24use reqwest::multipart::Part;
25use rig::client::ProviderClient;
26use rig::impl_conversion_traits;
27use serde::{Deserialize, Serialize};
28use serde_json::{json, Value};
29
30// ================================================================
31// Main Groq Client
32// ================================================================
33const GROQ_API_BASE_URL: &str = "https://api.groq.com/openai/v1";
34
35#[derive(Clone, Debug)]
36pub struct Client {
37    base_url: String,
38    http_client: reqwest::Client,
39}
40
41impl Client {
42    /// Create a new Groq client with the given API key.
43    pub fn new(api_key: &str) -> Self {
44        Self::from_url(api_key, GROQ_API_BASE_URL)
45    }
46
47    /// Create a new Groq client with the given API key and base API URL.
48    pub fn from_url(api_key: &str, base_url: &str) -> Self {
49        Self {
50            base_url: base_url.to_string(),
51            http_client: reqwest::Client::builder()
52                .default_headers({
53                    let mut headers = reqwest::header::HeaderMap::new();
54                    headers.insert(
55                        "Authorization",
56                        format!("Bearer {api_key}")
57                            .parse()
58                            .expect("Bearer token should parse"),
59                    );
60                    headers
61                })
62                .build()
63                .expect("Groq reqwest client should build"),
64        }
65    }
66
67    fn post(&self, path: &str) -> reqwest::RequestBuilder {
68        let url = format!("{}/{}", self.base_url, path).replace("//", "/");
69        self.http_client.post(url)
70    }
71}
72
73impl ProviderClient for Client {
74    /// Create a new Groq client from the `GROQ_API_KEY` environment variable.
75    /// Panics if the environment variable is not set.
76    fn from_env() -> Self {
77        let api_key = std::env::var("GROQ_API_KEY").expect("GROQ_API_KEY not set");
78        Self::new(&api_key)
79    }
80}
81
82impl CompletionClient for Client {
83    type CompletionModel = CompletionModel;
84
85    /// Create a completion model with the given name.
86    ///
87    /// # Example
88    /// ```
89    /// use rig::providers::groq::{Client, self};
90    ///
91    /// // Initialize the Groq client
92    /// let groq = Client::new("your-groq-api-key");
93    ///
94    /// let gpt4 = groq.completion_model(groq::GPT_4);
95    /// ```
96    fn completion_model(&self, model: &str) -> CompletionModel {
97        CompletionModel::new(self.clone(), model)
98    }
99}
100
101impl TranscriptionClient for Client {
102    type TranscriptionModel = TranscriptionModel;
103
104    /// Create a transcription model with the given name.
105    ///
106    /// # Example
107    /// ```
108    /// use rig::providers::groq::{Client, self};
109    ///
110    /// // Initialize the Groq client
111    /// let groq = Client::new("your-groq-api-key");
112    ///
113    /// let gpt4 = groq.transcription_model(groq::WHISPER_LARGE_V3);
114    /// ```
115    fn transcription_model(&self, model: &str) -> TranscriptionModel {
116        TranscriptionModel::new(self.clone(), model)
117    }
118}
119
120impl_conversion_traits!(
121    AsEmbeddings,
122    AsImageGeneration,
123    AsAudioGeneration for Client
124);
125
126#[derive(Debug, Deserialize)]
127struct ApiErrorResponse {
128    message: String,
129}
130
131#[derive(Debug, Deserialize)]
132#[serde(untagged)]
133enum ApiResponse<T> {
134    Ok(T),
135    Err(ApiErrorResponse),
136}
137
138#[derive(Debug, Serialize, Deserialize)]
139pub struct Message {
140    pub role: String,
141    pub content: Option<String>,
142}
143
144impl TryFrom<Message> for message::Message {
145    type Error = message::MessageError;
146
147    fn try_from(message: Message) -> Result<Self, Self::Error> {
148        match message.role.as_str() {
149            "user" => Ok(Self::User {
150                content: OneOrMany::one(
151                    message
152                        .content
153                        .map(|content| message::UserContent::text(&content))
154                        .ok_or_else(|| {
155                            message::MessageError::ConversionError("Empty user message".to_string())
156                        })?,
157                ),
158            }),
159            "assistant" => Ok(Self::Assistant {
160                content: OneOrMany::one(
161                    message
162                        .content
163                        .map(|content| message::AssistantContent::text(&content))
164                        .ok_or_else(|| {
165                            message::MessageError::ConversionError(
166                                "Empty assistant message".to_string(),
167                            )
168                        })?,
169                ),
170            }),
171            _ => Err(message::MessageError::ConversionError(format!(
172                "Unknown role: {}",
173                message.role
174            ))),
175        }
176    }
177}
178
179impl TryFrom<message::Message> for Message {
180    type Error = message::MessageError;
181
182    fn try_from(message: message::Message) -> Result<Self, Self::Error> {
183        match message {
184            message::Message::User { content } => Ok(Self {
185                role: "user".to_string(),
186                content: content.iter().find_map(|c| match c {
187                    message::UserContent::Text(text) => Some(text.text.clone()),
188                    _ => None,
189                }),
190            }),
191            message::Message::Assistant { content } => {
192                let mut text_content: Option<String> = None;
193
194                for c in content.iter() {
195                    match c {
196                        message::AssistantContent::Text(text) => {
197                            text_content = Some(
198                                text_content
199                                    .map(|mut existing| {
200                                        existing.push('\n');
201                                        existing.push_str(&text.text);
202                                        existing
203                                    })
204                                    .unwrap_or_else(|| text.text.clone()),
205                            );
206                        }
207                        message::AssistantContent::ToolCall(_tool_call) => {
208                            return Err(MessageError::ConversionError(
209                                "Tool calls do not exist on this message".into(),
210                            ))
211                        }
212                    }
213                }
214
215                Ok(Self {
216                    role: "assistant".to_string(),
217                    content: text_content,
218                })
219            }
220        }
221    }
222}
223
224// ================================================================
225// Groq Completion API
226// ================================================================
227/// The `deepseek-r1-distill-llama-70b` model. Used for chat completion.
228pub const DEEPSEEK_R1_DISTILL_LLAMA_70B: &str = "deepseek-r1-distill-llama-70b";
229/// The `gemma2-9b-it` model. Used for chat completion.
230pub const GEMMA2_9B_IT: &str = "gemma2-9b-it";
231/// The `llama-3.1-8b-instant` model. Used for chat completion.
232pub const LLAMA_3_1_8B_INSTANT: &str = "llama-3.1-8b-instant";
233/// The `llama-3.2-11b-vision-preview` model. Used for chat completion.
234pub const LLAMA_3_2_11B_VISION_PREVIEW: &str = "llama-3.2-11b-vision-preview";
235/// The `llama-3.2-1b-preview` model. Used for chat completion.
236pub const LLAMA_3_2_1B_PREVIEW: &str = "llama-3.2-1b-preview";
237/// The `llama-3.2-3b-preview` model. Used for chat completion.
238pub const LLAMA_3_2_3B_PREVIEW: &str = "llama-3.2-3b-preview";
239/// The `llama-3.2-90b-vision-preview` model. Used for chat completion.
240pub const LLAMA_3_2_90B_VISION_PREVIEW: &str = "llama-3.2-90b-vision-preview";
241/// The `llama-3.2-70b-specdec` model. Used for chat completion.
242pub const LLAMA_3_2_70B_SPECDEC: &str = "llama-3.2-70b-specdec";
243/// The `llama-3.2-70b-versatile` model. Used for chat completion.
244pub const LLAMA_3_2_70B_VERSATILE: &str = "llama-3.2-70b-versatile";
245/// The `llama-guard-3-8b` model. Used for chat completion.
246pub const LLAMA_GUARD_3_8B: &str = "llama-guard-3-8b";
247/// The `llama3-70b-8192` model. Used for chat completion.
248pub const LLAMA_3_70B_8192: &str = "llama3-70b-8192";
249/// The `llama3-8b-8192` model. Used for chat completion.
250pub const LLAMA_3_8B_8192: &str = "llama3-8b-8192";
251/// The `mixtral-8x7b-32768` model. Used for chat completion.
252pub const MIXTRAL_8X7B_32768: &str = "mixtral-8x7b-32768";
253
254#[derive(Clone, Debug)]
255pub struct CompletionModel {
256    client: Client,
257    /// Name of the model (e.g.: deepseek-r1-distill-llama-70b)
258    pub model: String,
259}
260
261impl CompletionModel {
262    pub fn new(client: Client, model: &str) -> Self {
263        Self {
264            client,
265            model: model.to_string(),
266        }
267    }
268
269    fn create_completion_request(
270        &self,
271        completion_request: CompletionRequest,
272    ) -> Result<Value, CompletionError> {
273        // Build up the order of messages (context, chat_history, prompt)
274        let mut partial_history = vec![];
275        if let Some(docs) = completion_request.normalized_documents() {
276            partial_history.push(docs);
277        }
278        partial_history.extend(completion_request.chat_history);
279
280        // Initialize full history with preamble (or empty if non-existent)
281        let mut full_history: Vec<Message> =
282            completion_request
283                .preamble
284                .map_or_else(Vec::new, |preamble| {
285                    vec![Message {
286                        role: "system".to_string(),
287                        content: Some(preamble),
288                    }]
289                });
290
291        // Convert and extend the rest of the history
292        full_history.extend(
293            partial_history
294                .into_iter()
295                .map(message::Message::try_into)
296                .collect::<Result<Vec<Message>, _>>()?,
297        );
298
299        let request = if completion_request.tools.is_empty() {
300            json!({
301                "model": self.model,
302                "messages": full_history,
303                "temperature": completion_request.temperature,
304            })
305        } else {
306            json!({
307                "model": self.model,
308                "messages": full_history,
309                "temperature": completion_request.temperature,
310                "tools": completion_request.tools.into_iter().map(ToolDefinition::from).collect::<Vec<_>>(),
311                "tool_choice": "auto",
312            })
313        };
314
315        let request = if let Some(params) = completion_request.additional_params {
316            json_utils::merge(request, params)
317        } else {
318            request
319        };
320
321        Ok(request)
322    }
323}
324
325impl completion::CompletionModel for CompletionModel {
326    type Response = CompletionResponse;
327    type StreamingResponse = openai::StreamingCompletionResponse;
328
329    #[cfg_attr(feature = "worker", worker::send)]
330    async fn completion(
331        &self,
332        completion_request: CompletionRequest,
333    ) -> Result<completion::CompletionResponse<CompletionResponse>, CompletionError> {
334        let request = self.create_completion_request(completion_request)?;
335
336        let response = self
337            .client
338            .post("/chat/completions")
339            .json(&request)
340            .send()
341            .await?;
342
343        if response.status().is_success() {
344            match response.json::<ApiResponse<CompletionResponse>>().await? {
345                ApiResponse::Ok(response) => {
346                    tracing::info!(target: "rig",
347                        "groq completion token usage: {:?}",
348                        response.usage.clone().map(|usage| format!("{usage}")).unwrap_or("N/A".to_string())
349                    );
350                    response.try_into()
351                }
352                ApiResponse::Err(err) => Err(CompletionError::ProviderError(err.message)),
353            }
354        } else {
355            Err(CompletionError::ProviderError(response.text().await?))
356        }
357    }
358
359    #[cfg_attr(feature = "worker", worker::send)]
360    async fn stream(
361        &self,
362        request: CompletionRequest,
363    ) -> Result<StreamingCompletionResponse<Self::StreamingResponse>, CompletionError> {
364        let mut request = self.create_completion_request(request)?;
365
366        request = merge(
367            request,
368            json!({"stream": true, "stream_options": {"include_usage": true}}),
369        );
370
371        let builder = self.client.post("/chat/completions").json(&request);
372
373        send_compatible_streaming_request(builder).await
374    }
375}
376
377// ================================================================
378// Groq Transcription API
379// ================================================================
380pub const WHISPER_LARGE_V3: &str = "whisper-large-v3";
381pub const WHISPER_LARGE_V3_TURBO: &str = "whisper-large-v3-turbo";
382pub const DISTIL_WHISPER_LARGE_V3: &str = "distil-whisper-large-v3-en";
383
384#[derive(Clone)]
385pub struct TranscriptionModel {
386    client: Client,
387    /// Name of the model (e.g.: gpt-3.5-turbo-1106)
388    pub model: String,
389}
390
391impl TranscriptionModel {
392    pub fn new(client: Client, model: &str) -> Self {
393        Self {
394            client,
395            model: model.to_string(),
396        }
397    }
398}
399impl transcription::TranscriptionModel for TranscriptionModel {
400    type Response = TranscriptionResponse;
401
402    #[cfg_attr(feature = "worker", worker::send)]
403    async fn transcription(
404        &self,
405        request: transcription::TranscriptionRequest,
406    ) -> Result<
407        transcription::TranscriptionResponse<Self::Response>,
408        transcription::TranscriptionError,
409    > {
410        let data = request.data;
411
412        let mut body = reqwest::multipart::Form::new()
413            .text("model", self.model.clone())
414            .text("language", request.language)
415            .part(
416                "file",
417                Part::bytes(data).file_name(request.filename.clone()),
418            );
419
420        if let Some(prompt) = request.prompt {
421            body = body.text("prompt", prompt.clone());
422        }
423
424        if let Some(ref temperature) = request.temperature {
425            body = body.text("temperature", temperature.to_string());
426        }
427
428        if let Some(ref additional_params) = request.additional_params {
429            for (key, value) in additional_params
430                .as_object()
431                .expect("Additional Parameters to OpenAI Transcription should be a map")
432            {
433                body = body.text(key.to_owned(), value.to_string());
434            }
435        }
436
437        let response = self
438            .client
439            .post("audio/transcriptions")
440            .multipart(body)
441            .send()
442            .await?;
443
444        if response.status().is_success() {
445            match response
446                .json::<ApiResponse<TranscriptionResponse>>()
447                .await?
448            {
449                ApiResponse::Ok(response) => response.try_into(),
450                ApiResponse::Err(api_error_response) => Err(TranscriptionError::ProviderError(
451                    api_error_response.message,
452                )),
453            }
454        } else {
455            Err(TranscriptionError::ProviderError(response.text().await?))
456        }
457    }
458}