1use bytes::Bytes;
6use serde::{Deserialize, Serialize};
7use serde_json::Value;
8use tracing::{Instrument, Level, enabled, info_span};
9
10use super::api::{ApiResponse, Message, ToolDefinition};
11use super::client::Client;
12use crate::OneOrMany;
13use crate::completion::{self, CompletionError, CompletionRequest};
14use crate::http_client::HttpClientExt;
15use crate::providers::openai::completion::ToolChoice;
16use crate::providers::openai::responses_api::streaming::StreamingCompletionResponse;
17use crate::providers::openai::responses_api::{Output, ResponsesUsage};
18use crate::streaming::StreamingCompletionResponse as BaseStreamingCompletionResponse;
19
20pub const GROK_2_1212: &str = "grok-2-1212";
22pub const GROK_2_VISION_1212: &str = "grok-2-vision-1212";
23pub const GROK_3: &str = "grok-3";
24pub const GROK_3_FAST: &str = "grok-3-fast";
25pub const GROK_3_MINI: &str = "grok-3-mini";
26pub const GROK_3_MINI_FAST: &str = "grok-3-mini-fast";
27pub const GROK_2_IMAGE_1212: &str = "grok-2-image-1212";
28pub const GROK_4: &str = "grok-4-0709";
29
30#[derive(Debug, Serialize, Deserialize)]
35pub(super) struct XAICompletionRequest {
36 model: String,
37 pub input: Vec<Message>,
38 #[serde(skip_serializing_if = "Option::is_none")]
39 temperature: Option<f64>,
40 #[serde(skip_serializing_if = "Option::is_none")]
41 max_output_tokens: Option<u64>,
42 #[serde(skip_serializing_if = "Vec::is_empty")]
43 tools: Vec<Value>,
44 #[serde(skip_serializing_if = "Option::is_none")]
45 tool_choice: Option<ToolChoice>,
46 #[serde(flatten, skip_serializing_if = "Option::is_none")]
47 pub additional_params: Option<serde_json::Value>,
48}
49
50impl TryFrom<(&str, CompletionRequest)> for XAICompletionRequest {
51 type Error = CompletionError;
52
53 fn try_from((model, req): (&str, CompletionRequest)) -> Result<Self, Self::Error> {
54 if req.output_schema.is_some() {
55 tracing::warn!("Structured outputs currently not supported for xAI");
56 }
57 let model = req.model.clone().unwrap_or_else(|| model.to_string());
58 let mut input: Vec<Message> = req
59 .preamble
60 .as_ref()
61 .map_or_else(Vec::new, |p| vec![Message::system(p)]);
62
63 if let Some(docs) = req.normalized_documents() {
64 let docs: Vec<Message> = docs.try_into()?;
65 input.extend(docs);
66 }
67
68 let mut additional_params_payload = req.additional_params.unwrap_or(Value::Null);
69
70 for msg in req.chat_history {
71 let msg: Vec<Message> = msg.try_into()?;
72 input.extend(msg);
73 }
74
75 let tool_choice = req.tool_choice.map(ToolChoice::try_from).transpose()?;
76 let mut additional_tools =
77 extract_tools_from_additional_params(&mut additional_params_payload)?;
78 let mut tools = req
79 .tools
80 .into_iter()
81 .map(ToolDefinition::from)
82 .map(serde_json::to_value)
83 .collect::<Result<Vec<_>, _>>()?;
84 tools.append(&mut additional_tools);
85 let additional_params = if additional_params_payload.is_null() {
86 None
87 } else {
88 Some(additional_params_payload)
89 };
90
91 Ok(Self {
92 model: model.to_string(),
93 input,
94 temperature: req.temperature,
95 max_output_tokens: req.max_tokens,
96 tools,
97 tool_choice,
98 additional_params,
99 })
100 }
101}
102
103fn extract_tools_from_additional_params(
104 additional_params: &mut Value,
105) -> Result<Vec<Value>, CompletionError> {
106 if let Some(map) = additional_params.as_object_mut()
107 && let Some(raw_tools) = map.remove("tools")
108 {
109 return serde_json::from_value::<Vec<Value>>(raw_tools).map_err(|err| {
110 CompletionError::RequestError(
111 format!("Invalid xAI `additional_params.tools` payload: {err}").into(),
112 )
113 });
114 }
115
116 Ok(Vec::new())
117}
118
119#[derive(Debug, Deserialize, Serialize)]
124pub struct CompletionResponse {
125 pub id: String,
126 pub model: String,
127 pub output: Vec<Output>,
128 #[serde(default)]
129 pub created: i64,
130 #[serde(default)]
131 pub object: String,
132 #[serde(default)]
133 pub status: Option<String>,
134 pub usage: Option<ResponsesUsage>,
135}
136
137impl TryFrom<CompletionResponse> for completion::CompletionResponse<CompletionResponse> {
138 type Error = CompletionError;
139
140 fn try_from(response: CompletionResponse) -> Result<Self, Self::Error> {
141 let content: Vec<completion::AssistantContent> = response
142 .output
143 .iter()
144 .cloned()
145 .flat_map(<Vec<completion::AssistantContent>>::from)
146 .collect();
147
148 let choice = OneOrMany::many(content).map_err(|_| {
149 CompletionError::ResponseError("Response contained no output".to_owned())
150 })?;
151
152 let usage = response
153 .usage
154 .as_ref()
155 .map(|u| completion::Usage {
156 input_tokens: u.input_tokens,
157 output_tokens: u.output_tokens,
158 total_tokens: u.total_tokens,
159 cached_input_tokens: u
160 .input_tokens_details
161 .clone()
162 .map(|x| x.cached_tokens)
163 .unwrap_or_default(),
164 cache_creation_input_tokens: 0,
165 })
166 .unwrap_or_default();
167
168 Ok(completion::CompletionResponse {
169 choice,
170 usage,
171 raw_response: response,
172 message_id: None,
173 })
174 }
175}
176
177#[derive(Clone)]
182pub struct CompletionModel<T = reqwest::Client> {
183 pub(crate) client: Client<T>,
184 pub model: String,
185}
186
187impl<T> CompletionModel<T> {
188 pub fn new(client: Client<T>, model: impl Into<String>) -> Self {
189 Self {
190 client,
191 model: model.into(),
192 }
193 }
194}
195
196impl<T> completion::CompletionModel for CompletionModel<T>
197where
198 T: HttpClientExt + Clone + Default + std::fmt::Debug + Send + 'static,
199{
200 type Response = CompletionResponse;
201 type StreamingResponse = StreamingCompletionResponse;
202
203 type Client = Client<T>;
204
205 fn make(client: &Self::Client, model: impl Into<String>) -> Self {
206 Self::new(client.clone(), model)
207 }
208
209 async fn completion(
210 &self,
211 completion_request: completion::CompletionRequest,
212 ) -> Result<completion::CompletionResponse<CompletionResponse>, CompletionError> {
213 let span = if tracing::Span::current().is_disabled() {
214 info_span!(
215 target: "rig::completions",
216 "chat",
217 gen_ai.operation.name = "chat",
218 gen_ai.provider.name = "xai",
219 gen_ai.request.model = self.model,
220 gen_ai.system_instructions = tracing::field::Empty,
221 gen_ai.response.id = tracing::field::Empty,
222 gen_ai.response.model = tracing::field::Empty,
223 gen_ai.usage.output_tokens = tracing::field::Empty,
224 gen_ai.usage.input_tokens = tracing::field::Empty,
225 gen_ai.usage.cache_read.input_tokens = tracing::field::Empty,
226 )
227 } else {
228 tracing::Span::current()
229 };
230
231 span.record("gen_ai.system_instructions", &completion_request.preamble);
232
233 let request =
234 XAICompletionRequest::try_from((self.model.to_string().as_ref(), completion_request))?;
235
236 if enabled!(Level::TRACE) {
237 tracing::trace!(target: "rig::completions",
238 "xAI completion request: {}",
239 serde_json::to_string_pretty(&request)?
240 );
241 }
242
243 let body = serde_json::to_vec(&request)?;
244 let req = self
245 .client
246 .post("/v1/responses")?
247 .body(body)
248 .map_err(|e| CompletionError::HttpError(e.into()))?;
249
250 async move {
251 let response = self.client.send::<_, Bytes>(req).await?;
252 let status = response.status();
253 let response_body = response.into_body().into_future().await?.to_vec();
254
255 if status.is_success() {
256 match serde_json::from_slice::<ApiResponse<CompletionResponse>>(&response_body)? {
257 ApiResponse::Ok(response) => {
258 if enabled!(Level::TRACE) {
259 tracing::trace!(target: "rig::completions",
260 "xAI completion response: {}",
261 serde_json::to_string_pretty(&response)?
262 );
263 }
264
265 response.try_into()
266 }
267 ApiResponse::Error(error) => {
268 Err(CompletionError::ProviderError(error.message()))
269 }
270 }
271 } else {
272 Err(CompletionError::ProviderError(
273 String::from_utf8_lossy(&response_body).to_string(),
274 ))
275 }
276 }
277 .instrument(span)
278 .await
279 }
280
281 async fn stream(
282 &self,
283 request: CompletionRequest,
284 ) -> Result<BaseStreamingCompletionResponse<Self::StreamingResponse>, CompletionError> {
285 self.stream(request).await
286 }
287}
288
289#[cfg(test)]
290mod tests {
291 use super::XAICompletionRequest;
292 use crate::OneOrMany;
293 use crate::completion::CompletionRequest;
294 use crate::completion::request::Document;
295
296 #[test]
297 fn xai_request_includes_normalized_documents() {
298 let request = CompletionRequest {
299 model: None,
300 preamble: Some("Use the provided context.".to_string()),
301 chat_history: OneOrMany::one("What is glarb-glarb?".into()),
302 documents: vec![Document {
303 id: "doc_1".to_string(),
304 text: "Definition of glarb-glarb: an ancient tool.".to_string(),
305 additional_props: Default::default(),
306 }],
307 tools: vec![],
308 temperature: None,
309 max_tokens: None,
310 tool_choice: None,
311 additional_params: None,
312 output_schema: None,
313 };
314
315 let xai_request = XAICompletionRequest::try_from(("grok-4-0709", request))
316 .expect("request conversion should succeed");
317 let serialized = serde_json::to_value(xai_request).expect("serialization should succeed");
318 let input = serialized["input"]
319 .as_array()
320 .expect("xAI request input should be an array");
321
322 assert!(
323 input
324 .iter()
325 .any(|message| message.to_string().contains("glarb-glarb")),
326 "normalized documents should be forwarded into xAI input"
327 );
328 }
329}