xai_grpc_client/generated/
xai_api.rs

1// This file is @generated by prost-build.
2/// The response from the service, when creating a deferred completion request.
3#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
4pub struct StartDeferredResponse {
5    /// The ID of this request. This ID can be used to retrieve completion results
6    /// later.
7    #[prost(string, tag = "1")]
8    pub request_id: ::prost::alloc::string::String,
9}
10/// Retrieve the deferred chat request's response with the `request_id` in
11/// StartDeferredResponse.
12#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
13pub struct GetDeferredRequest {
14    /// The ID of this request to get.
15    #[prost(string, tag = "1")]
16    pub request_id: ::prost::alloc::string::String,
17}
18/// Status of deferred completion request.
19#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
20#[repr(i32)]
21pub enum DeferredStatus {
22    /// Invalid status.
23    InvalidDeferredStatus = 0,
24    /// The request has been processed and is available for download.
25    Done = 1,
26    /// The request has been processed but the content has expired and is not
27    /// available anymore.
28    Expired = 2,
29    /// The request is still being processed.
30    Pending = 3,
31}
32impl DeferredStatus {
33    /// String value of the enum field names used in the ProtoBuf definition.
34    ///
35    /// The values are not transformed in any way and thus are considered stable
36    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
37    pub fn as_str_name(&self) -> &'static str {
38        match self {
39            Self::InvalidDeferredStatus => "INVALID_DEFERRED_STATUS",
40            Self::Done => "DONE",
41            Self::Expired => "EXPIRED",
42            Self::Pending => "PENDING",
43        }
44    }
45    /// Creates an enum from field names used in the ProtoBuf definition.
46    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
47        match value {
48            "INVALID_DEFERRED_STATUS" => Some(Self::InvalidDeferredStatus),
49            "DONE" => Some(Self::Done),
50            "EXPIRED" => Some(Self::Expired),
51            "PENDING" => Some(Self::Pending),
52            _ => None,
53        }
54    }
55}
56/// Request message for generating an image.
57#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
58pub struct GenerateImageRequest {
59    /// Input prompt to generate an image from.
60    #[prost(string, tag = "1")]
61    pub prompt: ::prost::alloc::string::String,
62    /// Optional input image to perform generations based on.
63    #[prost(message, optional, tag = "5")]
64    pub image: ::core::option::Option<ImageUrlContent>,
65    /// Name or alias of the image generation model to be used.
66    #[prost(string, tag = "2")]
67    pub model: ::prost::alloc::string::String,
68    /// Number of images to generate. Allowed values are \[1, 10\].
69    #[prost(int32, optional, tag = "3")]
70    pub n: ::core::option::Option<i32>,
71    /// An opaque string supplied by the API client (customer) to identify a user.
72    /// The string will be stored in the logs and can be used in customer service
73    /// requests to identify certain requests.
74    #[prost(string, tag = "4")]
75    pub user: ::prost::alloc::string::String,
76    /// Optional field to specify the image format to return the generated image(s)
77    /// in. See ImageFormat enum for options.
78    #[prost(enumeration = "ImageFormat", tag = "11")]
79    pub format: i32,
80}
81/// The response from the image generation models containing the generated image(s).
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct ImageResponse {
84    /// A list of generated images (including relevant metadata).
85    #[prost(message, repeated, tag = "1")]
86    pub images: ::prost::alloc::vec::Vec<GeneratedImage>,
87    /// The model used to generate the image (ignoring aliases).
88    #[prost(string, tag = "2")]
89    pub model: ::prost::alloc::string::String,
90}
91/// Contains all data related to a generated image.
92#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
93pub struct GeneratedImage {
94    /// The up sampled prompt that was used to generate the image.
95    #[prost(string, tag = "2")]
96    pub up_sampled_prompt: ::prost::alloc::string::String,
97    /// Whether the image generated by the model respects moderation rules.
98    /// The field will be true if the image respect moderation rules. Otherwise
99    /// the field will be false and the image field is replaced by a placeholder.
100    #[prost(bool, tag = "4")]
101    pub respect_moderation: bool,
102    /// The generated image.
103    #[prost(oneof = "generated_image::Image", tags = "1, 3")]
104    pub image: ::core::option::Option<generated_image::Image>,
105}
106/// Nested message and enum types in `GeneratedImage`.
107pub mod generated_image {
108    /// The generated image.
109    #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
110    pub enum Image {
111        /// A base-64 encoded string of the image.
112        #[prost(string, tag = "1")]
113        Base64(::prost::alloc::string::String),
114        /// A url that points to the generated image.
115        #[prost(string, tag = "3")]
116        Url(::prost::alloc::string::String),
117    }
118}
119/// Contains data relating to an image that is provided to the model.
120#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
121pub struct ImageUrlContent {
122    /// This is either an image URL or a base64-encoded version of the image.
123    /// The following image formats are supported: PNG and JPG.
124    /// If an image URL is provided, the image will be downloaded for every API
125    /// request without being cached. Images are fetched using
126    /// "XaiImageApiFetch/1.0" user agent, and will timeout after 5 seconds.
127    /// The image size is limited to 10 MiB. If the image download fails, the API
128    /// request will fail as well.
129    #[prost(string, tag = "1")]
130    pub image_url: ::prost::alloc::string::String,
131    /// The level of pre-processing resolution that will be applied to the image.
132    #[prost(enumeration = "ImageDetail", tag = "2")]
133    pub detail: i32,
134}
135/// Indicates the level of preprocessing to apply to images that will be fed to
136/// the model.
137#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
138#[repr(i32)]
139pub enum ImageDetail {
140    /// Detail level is invalid.
141    DetailInvalid = 0,
142    /// The system will decide the image resolution to use.
143    DetailAuto = 1,
144    /// The model will process a low-resolution version of the image. This is
145    /// faster and cheaper (i.e. consumes fewer tokens).
146    DetailLow = 2,
147    /// The model will process a high-resolution of the image. This is slower and
148    /// more expensive but will allow the model to attend to more nuanced details
149    /// in the image.
150    DetailHigh = 3,
151}
152impl ImageDetail {
153    /// String value of the enum field names used in the ProtoBuf definition.
154    ///
155    /// The values are not transformed in any way and thus are considered stable
156    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
157    pub fn as_str_name(&self) -> &'static str {
158        match self {
159            Self::DetailInvalid => "DETAIL_INVALID",
160            Self::DetailAuto => "DETAIL_AUTO",
161            Self::DetailLow => "DETAIL_LOW",
162            Self::DetailHigh => "DETAIL_HIGH",
163        }
164    }
165    /// Creates an enum from field names used in the ProtoBuf definition.
166    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
167        match value {
168            "DETAIL_INVALID" => Some(Self::DetailInvalid),
169            "DETAIL_AUTO" => Some(Self::DetailAuto),
170            "DETAIL_LOW" => Some(Self::DetailLow),
171            "DETAIL_HIGH" => Some(Self::DetailHigh),
172            _ => None,
173        }
174    }
175}
176/// The image format to be returned (base-64 encoded string or a url of
177/// the image).
178#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
179#[repr(i32)]
180pub enum ImageFormat {
181    /// Image format is invalid.
182    ImgFormatInvalid = 0,
183    /// A base-64 encoding of the image.
184    ImgFormatBase64 = 1,
185    /// An URL at which the user can download the image.
186    ImgFormatUrl = 2,
187}
188impl ImageFormat {
189    /// String value of the enum field names used in the ProtoBuf definition.
190    ///
191    /// The values are not transformed in any way and thus are considered stable
192    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
193    pub fn as_str_name(&self) -> &'static str {
194        match self {
195            Self::ImgFormatInvalid => "IMG_FORMAT_INVALID",
196            Self::ImgFormatBase64 => "IMG_FORMAT_BASE64",
197            Self::ImgFormatUrl => "IMG_FORMAT_URL",
198        }
199    }
200    /// Creates an enum from field names used in the ProtoBuf definition.
201    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
202        match value {
203            "IMG_FORMAT_INVALID" => Some(Self::ImgFormatInvalid),
204            "IMG_FORMAT_BASE64" => Some(Self::ImgFormatBase64),
205            "IMG_FORMAT_URL" => Some(Self::ImgFormatUrl),
206            _ => None,
207        }
208    }
209}
210/// Generated client implementations.
211pub mod image_client {
212    #![allow(
213        unused_variables,
214        dead_code,
215        missing_docs,
216        clippy::wildcard_imports,
217        clippy::let_unit_value,
218    )]
219    use tonic::codegen::*;
220    use tonic::codegen::http::Uri;
221    /// An API service for interaction with image generation models.
222    #[derive(Debug, Clone)]
223    pub struct ImageClient<T> {
224        inner: tonic::client::Grpc<T>,
225    }
226    impl ImageClient<tonic::transport::Channel> {
227        /// Attempt to create a new client by connecting to a given endpoint.
228        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
229        where
230            D: TryInto<tonic::transport::Endpoint>,
231            D::Error: Into<StdError>,
232        {
233            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
234            Ok(Self::new(conn))
235        }
236    }
237    impl<T> ImageClient<T>
238    where
239        T: tonic::client::GrpcService<tonic::body::Body>,
240        T::Error: Into<StdError>,
241        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
242        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
243    {
244        pub fn new(inner: T) -> Self {
245            let inner = tonic::client::Grpc::new(inner);
246            Self { inner }
247        }
248        pub fn with_origin(inner: T, origin: Uri) -> Self {
249            let inner = tonic::client::Grpc::with_origin(inner, origin);
250            Self { inner }
251        }
252        pub fn with_interceptor<F>(
253            inner: T,
254            interceptor: F,
255        ) -> ImageClient<InterceptedService<T, F>>
256        where
257            F: tonic::service::Interceptor,
258            T::ResponseBody: Default,
259            T: tonic::codegen::Service<
260                http::Request<tonic::body::Body>,
261                Response = http::Response<
262                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
263                >,
264            >,
265            <T as tonic::codegen::Service<
266                http::Request<tonic::body::Body>,
267            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
268        {
269            ImageClient::new(InterceptedService::new(inner, interceptor))
270        }
271        /// Compress requests with the given encoding.
272        ///
273        /// This requires the server to support it otherwise it might respond with an
274        /// error.
275        #[must_use]
276        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
277            self.inner = self.inner.send_compressed(encoding);
278            self
279        }
280        /// Enable decompressing responses.
281        #[must_use]
282        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
283            self.inner = self.inner.accept_compressed(encoding);
284            self
285        }
286        /// Limits the maximum size of a decoded message.
287        ///
288        /// Default: `4MB`
289        #[must_use]
290        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
291            self.inner = self.inner.max_decoding_message_size(limit);
292            self
293        }
294        /// Limits the maximum size of an encoded message.
295        ///
296        /// Default: `usize::MAX`
297        #[must_use]
298        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
299            self.inner = self.inner.max_encoding_message_size(limit);
300            self
301        }
302        /// Create an image based on a text prompt and optionally another image.
303        pub async fn generate_image(
304            &mut self,
305            request: impl tonic::IntoRequest<super::GenerateImageRequest>,
306        ) -> std::result::Result<tonic::Response<super::ImageResponse>, tonic::Status> {
307            self.inner
308                .ready()
309                .await
310                .map_err(|e| {
311                    tonic::Status::unknown(
312                        format!("Service was not ready: {}", e.into()),
313                    )
314                })?;
315            let codec = tonic_prost::ProstCodec::default();
316            let path = http::uri::PathAndQuery::from_static(
317                "/xai_api.Image/GenerateImage",
318            );
319            let mut req = request.into_request();
320            req.extensions_mut()
321                .insert(GrpcMethod::new("xai_api.Image", "GenerateImage"));
322            self.inner.unary(req, path, codec).await
323        }
324    }
325}
326/// Records the cost associated with a sampling request (both chat and sample
327/// endpoints).
328#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
329pub struct SamplingUsage {
330    /// Total number of text completion tokens generated across all choices
331    /// (in case of n>1).
332    #[prost(int32, tag = "1")]
333    pub completion_tokens: i32,
334    /// Total number of reasoning tokens generated across all choices.
335    #[prost(int32, tag = "6")]
336    pub reasoning_tokens: i32,
337    /// Total number of prompt tokens (both text and images).
338    #[prost(int32, tag = "2")]
339    pub prompt_tokens: i32,
340    /// Total number of tokens (prompt + completion).
341    #[prost(int32, tag = "3")]
342    pub total_tokens: i32,
343    /// Total number of (uncached) text tokens in the prompt.
344    #[prost(int32, tag = "4")]
345    pub prompt_text_tokens: i32,
346    /// Total number of cached text tokens in the prompt.
347    #[prost(int32, tag = "7")]
348    pub cached_prompt_text_tokens: i32,
349    /// Total number of image tokens in the prompt.
350    #[prost(int32, tag = "5")]
351    pub prompt_image_tokens: i32,
352    /// Number of individual live search sources used.
353    /// Only applicable when live search is enabled.
354    /// e.g. If a live search query returns citations from both X and Web and news sources, this will be 3.
355    /// If it returns citations from only X, this will be 1.
356    #[prost(int32, tag = "8")]
357    pub num_sources_used: i32,
358    /// List of server side tools called.
359    #[prost(enumeration = "ServerSideTool", repeated, tag = "9")]
360    pub server_side_tools_used: ::prost::alloc::vec::Vec<i32>,
361}
362/// Usage of embedding models.
363#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]
364pub struct EmbeddingUsage {
365    /// The number of feature vectors produced from text inputs.
366    #[prost(int32, tag = "1")]
367    pub num_text_embeddings: i32,
368    /// The number of feature vectors produced from image inputs.
369    #[prost(int32, tag = "2")]
370    pub num_image_embeddings: i32,
371}
372#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
373#[repr(i32)]
374pub enum ServerSideTool {
375    Invalid = 0,
376    WebSearch = 1,
377    XSearch = 2,
378    CodeExecution = 3,
379    ViewImage = 4,
380    ViewXVideo = 5,
381    CollectionsSearch = 6,
382    Mcp = 7,
383    DocumentSearch = 8,
384}
385impl ServerSideTool {
386    /// String value of the enum field names used in the ProtoBuf definition.
387    ///
388    /// The values are not transformed in any way and thus are considered stable
389    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
390    pub fn as_str_name(&self) -> &'static str {
391        match self {
392            Self::Invalid => "SERVER_SIDE_TOOL_INVALID",
393            Self::WebSearch => "SERVER_SIDE_TOOL_WEB_SEARCH",
394            Self::XSearch => "SERVER_SIDE_TOOL_X_SEARCH",
395            Self::CodeExecution => "SERVER_SIDE_TOOL_CODE_EXECUTION",
396            Self::ViewImage => "SERVER_SIDE_TOOL_VIEW_IMAGE",
397            Self::ViewXVideo => "SERVER_SIDE_TOOL_VIEW_X_VIDEO",
398            Self::CollectionsSearch => "SERVER_SIDE_TOOL_COLLECTIONS_SEARCH",
399            Self::Mcp => "SERVER_SIDE_TOOL_MCP",
400            Self::DocumentSearch => "SERVER_SIDE_TOOL_DOCUMENT_SEARCH",
401        }
402    }
403    /// Creates an enum from field names used in the ProtoBuf definition.
404    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
405        match value {
406            "SERVER_SIDE_TOOL_INVALID" => Some(Self::Invalid),
407            "SERVER_SIDE_TOOL_WEB_SEARCH" => Some(Self::WebSearch),
408            "SERVER_SIDE_TOOL_X_SEARCH" => Some(Self::XSearch),
409            "SERVER_SIDE_TOOL_CODE_EXECUTION" => Some(Self::CodeExecution),
410            "SERVER_SIDE_TOOL_VIEW_IMAGE" => Some(Self::ViewImage),
411            "SERVER_SIDE_TOOL_VIEW_X_VIDEO" => Some(Self::ViewXVideo),
412            "SERVER_SIDE_TOOL_COLLECTIONS_SEARCH" => Some(Self::CollectionsSearch),
413            "SERVER_SIDE_TOOL_MCP" => Some(Self::Mcp),
414            "SERVER_SIDE_TOOL_DOCUMENT_SEARCH" => Some(Self::DocumentSearch),
415            _ => None,
416        }
417    }
418}
419/// Request to get a text completion response sampling.
420#[derive(Clone, PartialEq, ::prost::Message)]
421pub struct SampleTextRequest {
422    /// Text prompts to sample on.
423    #[prost(string, repeated, tag = "1")]
424    pub prompt: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
425    /// Name or alias of the model to be used.
426    #[prost(string, tag = "3")]
427    pub model: ::prost::alloc::string::String,
428    /// The number of completions to create concurrently. A single completion will
429    /// be generated if the parameter is unset. Each completion is charged at the
430    /// same rate. You can generate at most 128 concurrent completions.
431    #[prost(int32, optional, tag = "8")]
432    pub n: ::core::option::Option<i32>,
433    /// The maximum number of tokens to sample. If unset, the model samples until
434    /// one of the following stop-conditions is reached:
435    ///
436    /// * The context length of the model is exceeded
437    /// * One of the `stop` sequences has been observed.
438    ///
439    /// We recommend choosing a reasonable value to reduce the risk of accidental
440    /// long-generations that consume many tokens.
441    #[prost(int32, optional, tag = "7")]
442    pub max_tokens: ::core::option::Option<i32>,
443    /// A random seed used to make the sampling process deterministic. This is
444    /// provided in a best-effort basis without guarantee that sampling is 100%
445    /// deterministic given a seed. This is primarily provided for short-lived
446    /// testing purposes. Given a fixed request and seed, the answers may change
447    /// over time as our systems evolve.
448    #[prost(int32, optional, tag = "11")]
449    pub seed: ::core::option::Option<i32>,
450    /// String patterns that will cause the sampling procedure to stop prematurely
451    /// when observed.
452    /// Note that the completion is based on individual tokens and sampling can
453    /// only terminate at token boundaries. If a stop string is a substring of an
454    /// individual token, the completion will include the entire token, which
455    /// extends beyond the stop string.
456    /// For example, if `stop = \["wor"\]` and we prompt the model with "hello" to
457    /// which it responds with "world", then the sampling procedure will stop after
458    /// observing the "world" token and the completion will contain
459    /// the entire world "world" even though the stop string was just "wor".
460    /// You can provide at most 8 stop strings.
461    #[prost(string, repeated, tag = "12")]
462    pub stop: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
463    /// A number between 0 and 2 used to control the variance of completions.
464    /// The smaller the value, the more deterministic the model will become. For
465    /// example, if we sample 1000 answers to the same prompt at a temperature of
466    /// 0.001, then most of the 1000 answers will be identical. Conversely, if we
467    /// conduct the same experiment at a temperature of 2, virtually no two answers
468    /// will be identical. Note that increasing the temperature will cause
469    /// the model to hallucinate more strongly.
470    #[prost(float, optional, tag = "14")]
471    pub temperature: ::core::option::Option<f32>,
472    /// A number between 0 and 1 controlling the likelihood of the model to use
473    /// less-common answers. Recall that the model produces a probability for
474    /// each token. This means, for any choice of token there are thousands of
475    /// possibilities to choose from. This parameter controls the "nucleus sampling
476    /// algorithm". Instead of considering every possible token at every step, we
477    /// only look at the K tokens who's probabilities exceed `top_p`.
478    /// For example, if we set `top_p = 0.9`, then the set of tokens we actually
479    /// sample from, will have a probability mass of at least 90%. In practice,
480    /// low values will make the model more deterministic.
481    #[prost(float, optional, tag = "15")]
482    pub top_p: ::core::option::Option<f32>,
483    /// Number between -2.0 and 2.0.
484    /// Positive values penalize new tokens based on their existing frequency in the text so far,
485    /// decreasing the model's likelihood to repeat the same line verbatim.
486    #[prost(float, optional, tag = "13")]
487    pub frequency_penalty: ::core::option::Option<f32>,
488    /// Whether to return log probabilities of the output tokens or not.
489    /// If true, returns the log probabilities of each output token returned in the content of message.
490    #[prost(bool, tag = "5")]
491    pub logprobs: bool,
492    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
493    /// Not supported by grok-3 models.
494    #[prost(float, optional, tag = "9")]
495    pub presence_penalty: ::core::option::Option<f32>,
496    /// An integer between 0 and 8 specifying the number of most likely tokens to return at each token position,
497    /// each with an associated log probability.
498    /// logprobs must be set to true if this parameter is used.
499    #[prost(int32, optional, tag = "6")]
500    pub top_logprobs: ::core::option::Option<i32>,
501    /// An opaque string supplied by the API client (customer) to identify a user.
502    /// The string will be stored in the logs and can be used in customer service
503    /// requests to identify certain requests.
504    #[prost(string, tag = "17")]
505    pub user: ::prost::alloc::string::String,
506}
507/// Response of a text completion response sampling.
508#[derive(Clone, PartialEq, ::prost::Message)]
509pub struct SampleTextResponse {
510    /// The ID of this request. This ID will also show up on your billing records
511    /// and you can use it when contacting us regarding a specific request.
512    #[prost(string, tag = "1")]
513    pub id: ::prost::alloc::string::String,
514    /// Completions in response to the input messages. The number of completions is
515    /// controlled via the `n` parameter on the request.
516    #[prost(message, repeated, tag = "2")]
517    pub choices: ::prost::alloc::vec::Vec<SampleChoice>,
518    /// A UNIX timestamp (UTC) indicating when the response object was created.
519    /// The timestamp is taken when the model starts generating response.
520    #[prost(message, optional, tag = "5")]
521    pub created: ::core::option::Option<::prost_types::Timestamp>,
522    /// The name of the model used for the request. This model name contains
523    /// the actual model name used rather than any aliases.
524    /// This means the this can be `grok-2-1212` even when the request was
525    /// specifying `grok-2-latest`.
526    #[prost(string, tag = "6")]
527    pub model: ::prost::alloc::string::String,
528    /// Note supported yet. Included for compatibility reasons.
529    #[prost(string, tag = "7")]
530    pub system_fingerprint: ::prost::alloc::string::String,
531    /// The number of tokens consumed by this request.
532    #[prost(message, optional, tag = "9")]
533    pub usage: ::core::option::Option<SamplingUsage>,
534}
535/// Contains the response generated by the model.
536#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
537pub struct SampleChoice {
538    /// Indicating why the model stopped sampling.
539    #[prost(enumeration = "FinishReason", tag = "1")]
540    pub finish_reason: i32,
541    /// The index of this choice in the list of choices. If you set `n > 1` on
542    /// your request, you will receive more than one choice in your response.
543    #[prost(int32, tag = "2")]
544    pub index: i32,
545    /// The actual text generated by the model.
546    #[prost(string, tag = "3")]
547    pub text: ::prost::alloc::string::String,
548}
549/// Reasons why the model stopped sampling.
550#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
551#[repr(i32)]
552pub enum FinishReason {
553    /// Invalid reason.
554    ReasonInvalid = 0,
555    /// The max_len parameter specified on the input is reached.
556    ReasonMaxLen = 1,
557    /// The maximum context length of the model is reached.
558    ReasonMaxContext = 2,
559    /// One of the stop words was found.
560    ReasonStop = 3,
561    /// A tool call is included in the response.
562    ReasonToolCalls = 4,
563    /// Time limit has been reached.
564    ReasonTimeLimit = 5,
565}
566impl FinishReason {
567    /// String value of the enum field names used in the ProtoBuf definition.
568    ///
569    /// The values are not transformed in any way and thus are considered stable
570    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
571    pub fn as_str_name(&self) -> &'static str {
572        match self {
573            Self::ReasonInvalid => "REASON_INVALID",
574            Self::ReasonMaxLen => "REASON_MAX_LEN",
575            Self::ReasonMaxContext => "REASON_MAX_CONTEXT",
576            Self::ReasonStop => "REASON_STOP",
577            Self::ReasonToolCalls => "REASON_TOOL_CALLS",
578            Self::ReasonTimeLimit => "REASON_TIME_LIMIT",
579        }
580    }
581    /// Creates an enum from field names used in the ProtoBuf definition.
582    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
583        match value {
584            "REASON_INVALID" => Some(Self::ReasonInvalid),
585            "REASON_MAX_LEN" => Some(Self::ReasonMaxLen),
586            "REASON_MAX_CONTEXT" => Some(Self::ReasonMaxContext),
587            "REASON_STOP" => Some(Self::ReasonStop),
588            "REASON_TOOL_CALLS" => Some(Self::ReasonToolCalls),
589            "REASON_TIME_LIMIT" => Some(Self::ReasonTimeLimit),
590            _ => None,
591        }
592    }
593}
594/// Generated client implementations.
595pub mod sample_client {
596    #![allow(
597        unused_variables,
598        dead_code,
599        missing_docs,
600        clippy::wildcard_imports,
601        clippy::let_unit_value,
602    )]
603    use tonic::codegen::*;
604    use tonic::codegen::http::Uri;
605    /// An API service for sampling the responses of available language models.
606    #[derive(Debug, Clone)]
607    pub struct SampleClient<T> {
608        inner: tonic::client::Grpc<T>,
609    }
610    impl SampleClient<tonic::transport::Channel> {
611        /// Attempt to create a new client by connecting to a given endpoint.
612        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
613        where
614            D: TryInto<tonic::transport::Endpoint>,
615            D::Error: Into<StdError>,
616        {
617            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
618            Ok(Self::new(conn))
619        }
620    }
621    impl<T> SampleClient<T>
622    where
623        T: tonic::client::GrpcService<tonic::body::Body>,
624        T::Error: Into<StdError>,
625        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
626        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
627    {
628        pub fn new(inner: T) -> Self {
629            let inner = tonic::client::Grpc::new(inner);
630            Self { inner }
631        }
632        pub fn with_origin(inner: T, origin: Uri) -> Self {
633            let inner = tonic::client::Grpc::with_origin(inner, origin);
634            Self { inner }
635        }
636        pub fn with_interceptor<F>(
637            inner: T,
638            interceptor: F,
639        ) -> SampleClient<InterceptedService<T, F>>
640        where
641            F: tonic::service::Interceptor,
642            T::ResponseBody: Default,
643            T: tonic::codegen::Service<
644                http::Request<tonic::body::Body>,
645                Response = http::Response<
646                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
647                >,
648            >,
649            <T as tonic::codegen::Service<
650                http::Request<tonic::body::Body>,
651            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
652        {
653            SampleClient::new(InterceptedService::new(inner, interceptor))
654        }
655        /// Compress requests with the given encoding.
656        ///
657        /// This requires the server to support it otherwise it might respond with an
658        /// error.
659        #[must_use]
660        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
661            self.inner = self.inner.send_compressed(encoding);
662            self
663        }
664        /// Enable decompressing responses.
665        #[must_use]
666        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
667            self.inner = self.inner.accept_compressed(encoding);
668            self
669        }
670        /// Limits the maximum size of a decoded message.
671        ///
672        /// Default: `4MB`
673        #[must_use]
674        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
675            self.inner = self.inner.max_decoding_message_size(limit);
676            self
677        }
678        /// Limits the maximum size of an encoded message.
679        ///
680        /// Default: `usize::MAX`
681        #[must_use]
682        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
683            self.inner = self.inner.max_encoding_message_size(limit);
684            self
685        }
686        /// Get raw sampling of text response from the model inference.
687        pub async fn sample_text(
688            &mut self,
689            request: impl tonic::IntoRequest<super::SampleTextRequest>,
690        ) -> std::result::Result<
691            tonic::Response<super::SampleTextResponse>,
692            tonic::Status,
693        > {
694            self.inner
695                .ready()
696                .await
697                .map_err(|e| {
698                    tonic::Status::unknown(
699                        format!("Service was not ready: {}", e.into()),
700                    )
701                })?;
702            let codec = tonic_prost::ProstCodec::default();
703            let path = http::uri::PathAndQuery::from_static(
704                "/xai_api.Sample/SampleText",
705            );
706            let mut req = request.into_request();
707            req.extensions_mut().insert(GrpcMethod::new("xai_api.Sample", "SampleText"));
708            self.inner.unary(req, path, codec).await
709        }
710        /// Get streaming raw sampling of text response from the model inference.
711        pub async fn sample_text_streaming(
712            &mut self,
713            request: impl tonic::IntoRequest<super::SampleTextRequest>,
714        ) -> std::result::Result<
715            tonic::Response<tonic::codec::Streaming<super::SampleTextResponse>>,
716            tonic::Status,
717        > {
718            self.inner
719                .ready()
720                .await
721                .map_err(|e| {
722                    tonic::Status::unknown(
723                        format!("Service was not ready: {}", e.into()),
724                    )
725                })?;
726            let codec = tonic_prost::ProstCodec::default();
727            let path = http::uri::PathAndQuery::from_static(
728                "/xai_api.Sample/SampleTextStreaming",
729            );
730            let mut req = request.into_request();
731            req.extensions_mut()
732                .insert(GrpcMethod::new("xai_api.Sample", "SampleTextStreaming"));
733            self.inner.server_streaming(req, path, codec).await
734        }
735    }
736}
737#[derive(Clone, PartialEq, ::prost::Message)]
738pub struct GetCompletionsRequest {
739    /// A sequence of messages in the conversation. There must be at least a single
740    /// message that the model can respond to.
741    #[prost(message, repeated, tag = "1")]
742    pub messages: ::prost::alloc::vec::Vec<Message>,
743    /// Name of the model. This is the name as reported by the models API. More
744    /// details can be found on your console at <https://console.x.ai.>
745    #[prost(string, tag = "2")]
746    pub model: ::prost::alloc::string::String,
747    /// An opaque string supplied by the API client (customer) to identify a user.
748    /// The string will be stored in the logs and can be used in customer service
749    /// requests to identify certain requests.
750    #[prost(string, tag = "16")]
751    pub user: ::prost::alloc::string::String,
752    /// The number of completions to create concurrently. A single completion will
753    /// be generated if the parameter is unset. Each completion is charged at the
754    /// same rate. You can generate at most 128 concurrent completions.
755    /// PLEASE NOTE: This field is deprecated and will be removed in the future.
756    #[prost(int32, optional, tag = "8")]
757    pub n: ::core::option::Option<i32>,
758    /// The maximum number of tokens to sample. If unset, the model samples until
759    /// one of the following stop-conditions is reached:
760    ///
761    /// * The context length of the model is exceeded
762    /// * One of the `stop` sequences has been observed.
763    /// * The time limit exceeds.
764    ///
765    /// Note that for reasoning models and models that support function calls, the
766    /// limit is only applied to the main content and not to the reasoning content
767    /// or function calls.
768    ///
769    /// We recommend choosing a reasonable value to reduce the risk of accidental
770    /// long-generations that consume many tokens.
771    #[prost(int32, optional, tag = "7")]
772    pub max_tokens: ::core::option::Option<i32>,
773    /// A random seed used to make the sampling process deterministic. This is
774    /// provided in a best-effort basis without guarantee that sampling is 100%
775    /// deterministic given a seed. This is primarily provided for short-lived
776    /// testing purposes. Given a fixed request and seed, the answers may change
777    /// over time as our systems evolve.
778    #[prost(int32, optional, tag = "11")]
779    pub seed: ::core::option::Option<i32>,
780    /// String patterns that will cause the sampling procedure to stop prematurely
781    /// when observed.
782    /// Note that the completion is based on individual tokens and sampling can
783    /// only terminate at token boundaries. If a stop string is a substring of an
784    /// individual token, the completion will include the entire token, which
785    /// extends beyond the stop string.
786    /// For example, if `stop = \["wor"\]` and we prompt the model with "hello" to
787    /// which it responds with "world", then the sampling procedure will stop after
788    /// observing the "world" token and the completion will contain
789    /// the entire world "world" even though the stop string was just "wor".
790    /// You can provide at most 8 stop strings.
791    #[prost(string, repeated, tag = "12")]
792    pub stop: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
793    /// A number between 0 and 2 used to control the variance of completions.
794    /// The smaller the value, the more deterministic the model will become. For
795    /// example, if we sample 1000 answers to the same prompt at a temperature of
796    /// 0.001, then most of the 1000 answers will be identical. Conversely, if we
797    /// conduct the same experiment at a temperature of 2, virtually no two answers
798    /// will be identical. Note that increasing the temperature will cause
799    /// the model to hallucinate more strongly.
800    #[prost(float, optional, tag = "14")]
801    pub temperature: ::core::option::Option<f32>,
802    /// A number between 0 and 1 controlling the likelihood of the model to use
803    /// less-common answers. Recall that the model produces a probability for
804    /// each token. This means, for any choice of token there are thousands of
805    /// possibilities to choose from. This parameter controls the "nucleus sampling
806    /// algorithm". Instead of considering every possible token at every step, we
807    /// only look at the K tokens who's probabilities exceed `top_p`.
808    /// For example, if we set `top_p = 0.9`, then the set of tokens we actually
809    /// sample from, will have a probability mass of at least 90%. In practice,
810    /// low values will make the model more deterministic.
811    #[prost(float, optional, tag = "15")]
812    pub top_p: ::core::option::Option<f32>,
813    /// If set to true, log probabilities of the sampling are returned.
814    #[prost(bool, tag = "5")]
815    pub logprobs: bool,
816    /// Number of top log probabilities to return.
817    #[prost(int32, optional, tag = "6")]
818    pub top_logprobs: ::core::option::Option<i32>,
819    /// A list of tools the model may call. Currently, only functions are supported
820    /// as a tool. Use this to provide a list of functions the model may generate
821    /// JSON inputs for.
822    #[prost(message, repeated, tag = "17")]
823    pub tools: ::prost::alloc::vec::Vec<Tool>,
824    /// Controls if the model can, should, or must not use tools.
825    #[prost(message, optional, tag = "18")]
826    pub tool_choice: ::core::option::Option<ToolChoice>,
827    /// Formatting constraint on the response.
828    #[prost(message, optional, tag = "10")]
829    pub response_format: ::core::option::Option<ResponseFormat>,
830    /// Positive values penalize new tokens based on their existing frequency in
831    /// the text so far, decreasing the model's likelihood to repeat the same line
832    /// verbatim.
833    #[prost(float, optional, tag = "3")]
834    pub frequency_penalty: ::core::option::Option<f32>,
835    /// Positive values penalize new tokens based on whether they appear in
836    /// the text so far, increasing the model's likelihood to talk about
837    /// new topics.
838    #[prost(float, optional, tag = "9")]
839    pub presence_penalty: ::core::option::Option<f32>,
840    /// Constrains effort on reasoning for reasoning models. Default to `EFFORT_MEDIUM`.
841    #[prost(enumeration = "ReasoningEffort", optional, tag = "19")]
842    pub reasoning_effort: ::core::option::Option<i32>,
843    /// Set the parameters to be used for realtime data. If not set, no realtime data will be acquired by the model.
844    #[prost(message, optional, tag = "20")]
845    pub search_parameters: ::core::option::Option<SearchParameters>,
846    /// / If set to false, the model can perform maximum one tool call per response. Default to true.
847    #[prost(bool, optional, tag = "21")]
848    pub parallel_tool_calls: ::core::option::Option<bool>,
849    /// Previous response id. The messages from this response must be chained.
850    #[prost(string, optional, tag = "22")]
851    pub previous_response_id: ::core::option::Option<::prost::alloc::string::String>,
852    /// Whether to store request and responses. Default is false.
853    #[prost(bool, tag = "23")]
854    pub store_messages: bool,
855    /// Whether to use encrypted thinking for thinking trace rehydration.
856    #[prost(bool, tag = "24")]
857    pub use_encrypted_content: bool,
858    /// Maximum number of agentic tool calling turns allowed for this request.
859    /// If not set, defaults to the server's global cap.
860    /// The effective max_turns will be the min of the server's global cap and the request's max_turns.
861    /// This parameter will be ignored for any non-agentic requests.
862    /// With parallel tool calls, multiple tool calls can occur within a single turn,
863    /// so max_turns does not necessarily equal the total number of tool calls.
864    #[prost(int32, optional, tag = "25")]
865    pub max_turns: ::core::option::Option<i32>,
866    /// Allow the users to control what optional fields to be returned in the response.
867    #[prost(enumeration = "IncludeOption", repeated, tag = "26")]
868    pub include: ::prost::alloc::vec::Vec<i32>,
869}
870#[derive(Clone, PartialEq, ::prost::Message)]
871pub struct GetChatCompletionResponse {
872    /// The ID of this request. This ID will also show up on your billing records
873    /// and you can use it when contacting us regarding a specific request.
874    #[prost(string, tag = "1")]
875    pub id: ::prost::alloc::string::String,
876    /// Model-generated outputs/responses to the input messages. Each output contains
877    /// the model's response including text content, reasoning traces, tool calls, and
878    /// metadata about the generation process.
879    #[prost(message, repeated, tag = "2")]
880    pub outputs: ::prost::alloc::vec::Vec<CompletionOutput>,
881    /// A UNIX timestamp (UTC) indicating when the response object was created.
882    /// The timestamp is taken when the model starts generating response.
883    #[prost(message, optional, tag = "5")]
884    pub created: ::core::option::Option<::prost_types::Timestamp>,
885    /// The name of the model used for the request. This model name contains
886    /// the actual model name used rather than any aliases.
887    /// This means the this can be `grok-2-1212` even when the request was
888    /// specifying `grok-2-latest`.
889    #[prost(string, tag = "6")]
890    pub model: ::prost::alloc::string::String,
891    /// This fingerprint represents the backend configuration that the model runs
892    /// with.
893    #[prost(string, tag = "7")]
894    pub system_fingerprint: ::prost::alloc::string::String,
895    /// The number of tokens consumed by this request.
896    #[prost(message, optional, tag = "9")]
897    pub usage: ::core::option::Option<SamplingUsage>,
898    /// / List of all the external pages (urls) used by the model to produce its final answer.
899    /// This is only present when live search is enabled, (That is `SearchParameters` have been defined in `GetCompletionsRequest`).
900    #[prost(string, repeated, tag = "10")]
901    pub citations: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
902    /// Settings used while generating the response.
903    #[prost(message, optional, tag = "11")]
904    pub settings: ::core::option::Option<RequestSettings>,
905    /// Debug output. Only available to trusted testers.
906    #[prost(message, optional, tag = "12")]
907    pub debug_output: ::core::option::Option<DebugOutput>,
908}
909#[derive(Clone, PartialEq, ::prost::Message)]
910pub struct GetChatCompletionChunk {
911    /// The ID of this request. This ID will also show up on your billing records
912    /// and you can use it when contacting us regarding a specific request.
913    #[prost(string, tag = "1")]
914    pub id: ::prost::alloc::string::String,
915    /// Model-generated outputs/responses being streamed as they are generated.
916    /// Each output chunk contains incremental updates to the model's response.
917    #[prost(message, repeated, tag = "2")]
918    pub outputs: ::prost::alloc::vec::Vec<CompletionOutputChunk>,
919    /// A UNIX timestamp (UTC) indicating when the response object was created.
920    /// The timestamp is taken when the model starts generating response.
921    #[prost(message, optional, tag = "3")]
922    pub created: ::core::option::Option<::prost_types::Timestamp>,
923    /// The name of the model used for the request. This model name contains
924    /// the actual model name used rather than any aliases.
925    /// This means the this can be `grok-2-1212` even when the request was
926    /// specifying `grok-2-latest`.
927    #[prost(string, tag = "4")]
928    pub model: ::prost::alloc::string::String,
929    /// This fingerprint represents the backend configuration that the model runs
930    /// with.
931    #[prost(string, tag = "5")]
932    pub system_fingerprint: ::prost::alloc::string::String,
933    /// The total number of tokens consumed when this chunk was streamed. Note that
934    /// this is not the final number of tokens billed unless this is the last chunk
935    /// in the stream.
936    #[prost(message, optional, tag = "6")]
937    pub usage: ::core::option::Option<SamplingUsage>,
938    /// / List of all the external pages used by the model to answer. Only populated for the last chunk.
939    /// This is only present for requests that make use of live search or server-side search tools.
940    #[prost(string, repeated, tag = "7")]
941    pub citations: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
942    /// Only available for teams that have debugging privileges.
943    #[prost(message, optional, tag = "10")]
944    pub debug_output: ::core::option::Option<DebugOutput>,
945}
946/// Response from GetDeferredCompletion, including the response if the completion
947/// request has been processed without error.
948#[derive(Clone, PartialEq, ::prost::Message)]
949pub struct GetDeferredCompletionResponse {
950    /// Current status of the request.
951    #[prost(enumeration = "DeferredStatus", tag = "2")]
952    pub status: i32,
953    /// Response. Only present if `status=DONE`
954    #[prost(message, optional, tag = "1")]
955    pub response: ::core::option::Option<GetChatCompletionResponse>,
956}
957/// Contains the response generated by the model.
958#[derive(Clone, PartialEq, ::prost::Message)]
959pub struct CompletionOutput {
960    /// Indicating why the model stopped sampling.
961    #[prost(enumeration = "FinishReason", tag = "1")]
962    pub finish_reason: i32,
963    /// The index of this output in the list of outputs. When multiple outputs are
964    /// generated, each output is assigned a sequential index starting from 0.
965    #[prost(int32, tag = "2")]
966    pub index: i32,
967    /// The actual message generated by the model.
968    #[prost(message, optional, tag = "3")]
969    pub message: ::core::option::Option<CompletionMessage>,
970    /// The log probabilities of the sampling.
971    #[prost(message, optional, tag = "4")]
972    pub logprobs: ::core::option::Option<LogProbs>,
973}
974/// Holds the model output (i.e. the result of the sampling process).
975#[derive(Clone, PartialEq, ::prost::Message)]
976pub struct CompletionMessage {
977    /// The generated text based on the input prompt.
978    #[prost(string, tag = "1")]
979    pub content: ::prost::alloc::string::String,
980    /// Reasoning trace the model produced before issuing the final answer.
981    #[prost(string, tag = "4")]
982    pub reasoning_content: ::prost::alloc::string::String,
983    /// The role of the message author. Will always default to "assistant".
984    #[prost(enumeration = "MessageRole", tag = "2")]
985    pub role: i32,
986    /// The tools that the assistant wants to call.
987    #[prost(message, repeated, tag = "3")]
988    pub tool_calls: ::prost::alloc::vec::Vec<ToolCall>,
989    /// The encrypted content.
990    #[prost(string, tag = "5")]
991    pub encrypted_content: ::prost::alloc::string::String,
992    /// The citations that the model used to answer the question.
993    #[prost(message, repeated, tag = "6")]
994    pub citations: ::prost::alloc::vec::Vec<InlineCitation>,
995}
996/// Holds the differences (deltas) that when concatenated make up the entire
997/// agent response.
998#[derive(Clone, PartialEq, ::prost::Message)]
999pub struct CompletionOutputChunk {
1000    /// The actual text differences that need to be accumulated on the client.
1001    #[prost(message, optional, tag = "1")]
1002    pub delta: ::core::option::Option<Delta>,
1003    /// The log probability of the choice.
1004    #[prost(message, optional, tag = "2")]
1005    pub logprobs: ::core::option::Option<LogProbs>,
1006    /// Indicating why the model stopped sampling.
1007    #[prost(enumeration = "FinishReason", tag = "3")]
1008    pub finish_reason: i32,
1009    /// The index of this output chunk in the list of output chunks.
1010    #[prost(int32, tag = "4")]
1011    pub index: i32,
1012}
1013/// The delta of a streaming response.
1014#[derive(Clone, PartialEq, ::prost::Message)]
1015pub struct Delta {
1016    /// The main model output/answer.
1017    #[prost(string, tag = "1")]
1018    pub content: ::prost::alloc::string::String,
1019    /// Part of the model's reasoning trace.
1020    #[prost(string, tag = "4")]
1021    pub reasoning_content: ::prost::alloc::string::String,
1022    /// The entity type who sent the message. For example, a message can be sent by
1023    /// a user or the assistant.
1024    #[prost(enumeration = "MessageRole", tag = "2")]
1025    pub role: i32,
1026    /// A list of tool calls if tool call is requested by the model.
1027    #[prost(message, repeated, tag = "3")]
1028    pub tool_calls: ::prost::alloc::vec::Vec<ToolCall>,
1029    /// The encrypted content.
1030    #[prost(string, tag = "5")]
1031    pub encrypted_content: ::prost::alloc::string::String,
1032    /// The citations that the model used to answer the question.
1033    #[prost(message, repeated, tag = "6")]
1034    pub citations: ::prost::alloc::vec::Vec<InlineCitation>,
1035}
1036#[derive(Clone, PartialEq, ::prost::Message)]
1037pub struct InlineCitation {
1038    /// The globally unique id of the citation per response.
1039    #[prost(string, tag = "1")]
1040    pub id: ::prost::alloc::string::String,
1041    /// The index where the inline citation should be inserted in the complete text response.
1042    #[prost(int32, tag = "2")]
1043    pub start_index: i32,
1044    /// The citation type.
1045    #[prost(oneof = "inline_citation::Citation", tags = "3, 4, 5")]
1046    pub citation: ::core::option::Option<inline_citation::Citation>,
1047}
1048/// Nested message and enum types in `InlineCitation`.
1049pub mod inline_citation {
1050    /// The citation type.
1051    #[derive(Clone, PartialEq, ::prost::Oneof)]
1052    pub enum Citation {
1053        /// The citation returned from the web search tool.
1054        #[prost(message, tag = "3")]
1055        WebCitation(super::WebCitation),
1056        /// The citation returned from the X search tool.
1057        #[prost(message, tag = "4")]
1058        XCitation(super::XCitation),
1059        /// The citation returned from the collections search tool.
1060        #[prost(message, tag = "5")]
1061        CollectionsCitation(super::CollectionsCitation),
1062    }
1063}
1064#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1065pub struct WebCitation {
1066    /// The url of the web page that the citation is from.
1067    #[prost(string, tag = "1")]
1068    pub url: ::prost::alloc::string::String,
1069}
1070#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1071pub struct XCitation {
1072    /// The url of the X post or profile that the citation is from.
1073    /// The url is always a x.com url.
1074    #[prost(string, tag = "1")]
1075    pub url: ::prost::alloc::string::String,
1076}
1077#[derive(Clone, PartialEq, ::prost::Message)]
1078pub struct CollectionsCitation {
1079    /// The id of the file that the citation is from.
1080    #[prost(string, tag = "1")]
1081    pub file_id: ::prost::alloc::string::String,
1082    /// The id of the chunk that the citation is from.
1083    #[prost(string, tag = "2")]
1084    pub chunk_id: ::prost::alloc::string::String,
1085    /// The content of the chunk that the citation is from.
1086    #[prost(string, tag = "3")]
1087    pub chunk_content: ::prost::alloc::string::String,
1088    /// The relevance score of the citation.
1089    #[prost(float, tag = "4")]
1090    pub score: f32,
1091    /// The ids of the collections that the citation is from.
1092    #[prost(string, repeated, tag = "5")]
1093    pub collection_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1094}
1095/// Holding the log probabilities of the sampling.
1096#[derive(Clone, PartialEq, ::prost::Message)]
1097pub struct LogProbs {
1098    /// A list of log probability entries, each corresponding to a sampled token
1099    /// and its associated data.
1100    #[prost(message, repeated, tag = "1")]
1101    pub content: ::prost::alloc::vec::Vec<LogProb>,
1102}
1103/// Represents the logarithmic probability and metadata for a single sampled
1104/// token.
1105#[derive(Clone, PartialEq, ::prost::Message)]
1106pub struct LogProb {
1107    /// The text representation of the sampled token.
1108    #[prost(string, tag = "1")]
1109    pub token: ::prost::alloc::string::String,
1110    /// The logarithmic probability of this token being sampled, given the prior
1111    /// context.
1112    #[prost(float, tag = "2")]
1113    pub logprob: f32,
1114    /// The raw byte representation of the token, useful for handling non-text or
1115    /// encoded data.
1116    #[prost(bytes = "vec", tag = "3")]
1117    pub bytes: ::prost::alloc::vec::Vec<u8>,
1118    /// A list of the top alternative tokens and their log probabilities at this
1119    /// sampling step.
1120    #[prost(message, repeated, tag = "4")]
1121    pub top_logprobs: ::prost::alloc::vec::Vec<TopLogProb>,
1122}
1123/// Represents an alternative token and its log probability among the top
1124/// candidates.
1125#[derive(Clone, PartialEq, ::prost::Message)]
1126pub struct TopLogProb {
1127    /// The text representation of an alternative token considered by the model.
1128    #[prost(string, tag = "1")]
1129    pub token: ::prost::alloc::string::String,
1130    /// The logarithmic probability of this alternative token being sampled.
1131    #[prost(float, tag = "2")]
1132    pub logprob: f32,
1133    /// The raw byte representation of the alternative token.
1134    #[prost(bytes = "vec", tag = "3")]
1135    pub bytes: ::prost::alloc::vec::Vec<u8>,
1136}
1137/// Holds a single content element that is part of an input message.
1138#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1139pub struct Content {
1140    #[prost(oneof = "content::Content", tags = "1, 2, 3")]
1141    pub content: ::core::option::Option<content::Content>,
1142}
1143/// Nested message and enum types in `Content`.
1144pub mod content {
1145    #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
1146    pub enum Content {
1147        /// The content is a pure text message.
1148        #[prost(string, tag = "1")]
1149        Text(::prost::alloc::string::String),
1150        /// The content is a single image.
1151        #[prost(message, tag = "2")]
1152        ImageUrl(super::ImageUrlContent),
1153        /// The content is a file attachment (PDF, document, etc.).
1154        #[prost(message, tag = "3")]
1155        File(super::FileContent),
1156    }
1157}
1158/// A file attachment in a message.
1159#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1160pub struct FileContent {
1161    /// The file ID returned by the Files API when a user uploads a file.
1162    /// This ID is used to reference the uploaded file in chat conversations.
1163    #[prost(string, tag = "1")]
1164    pub file_id: ::prost::alloc::string::String,
1165}
1166/// A message in a conversation. This message is part of the model input. Each
1167/// message originates from a "role", which indicates the entity type who sent
1168/// the message. Messages can contain multiple content elements such as text and
1169/// images.
1170#[derive(Clone, PartialEq, ::prost::Message)]
1171pub struct Message {
1172    /// The content of the message. Some model support multi-modal message contents
1173    /// that consist of text and images. At least one content element must be set
1174    /// for each message.
1175    #[prost(message, repeated, tag = "1")]
1176    pub content: ::prost::alloc::vec::Vec<Content>,
1177    /// Reasoning trace the model produced before issuing the final answer.
1178    #[prost(string, optional, tag = "5")]
1179    pub reasoning_content: ::core::option::Option<::prost::alloc::string::String>,
1180    /// The entity type who sent the message. For example, a message can be sent by
1181    /// a user or the assistant.
1182    #[prost(enumeration = "MessageRole", tag = "2")]
1183    pub role: i32,
1184    /// The name of the entity who sent the message. The name can only be set if
1185    /// the role is ROLE_USER.
1186    #[prost(string, tag = "3")]
1187    pub name: ::prost::alloc::string::String,
1188    /// The tools that the assistant wants to call.
1189    #[prost(message, repeated, tag = "4")]
1190    pub tool_calls: ::prost::alloc::vec::Vec<ToolCall>,
1191    /// The encrypted content.
1192    #[prost(string, tag = "6")]
1193    pub encrypted_content: ::prost::alloc::string::String,
1194}
1195#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1196pub struct ToolChoice {
1197    #[prost(oneof = "tool_choice::ToolChoice", tags = "1, 2")]
1198    pub tool_choice: ::core::option::Option<tool_choice::ToolChoice>,
1199}
1200/// Nested message and enum types in `ToolChoice`.
1201pub mod tool_choice {
1202    #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
1203    pub enum ToolChoice {
1204        /// Force the model to perform in a given mode.
1205        #[prost(enumeration = "super::ToolMode", tag = "1")]
1206        Mode(i32),
1207        /// Force the model to call a particular function.
1208        #[prost(string, tag = "2")]
1209        FunctionName(::prost::alloc::string::String),
1210    }
1211}
1212#[derive(Clone, PartialEq, ::prost::Message)]
1213pub struct Tool {
1214    #[prost(oneof = "tool::Tool", tags = "1, 3, 4, 5, 6, 7, 8")]
1215    pub tool: ::core::option::Option<tool::Tool>,
1216}
1217/// Nested message and enum types in `Tool`.
1218pub mod tool {
1219    #[derive(Clone, PartialEq, ::prost::Oneof)]
1220    pub enum Tool {
1221        /// Tool Call defined by user
1222        #[prost(message, tag = "1")]
1223        Function(super::Function),
1224        /// Built in web search.
1225        #[prost(message, tag = "3")]
1226        WebSearch(super::WebSearch),
1227        /// Built in X search.
1228        #[prost(message, tag = "4")]
1229        XSearch(super::XSearch),
1230        /// Built in code execution.
1231        #[prost(message, tag = "5")]
1232        CodeExecution(super::CodeExecution),
1233        /// Built in collections search.
1234        #[prost(message, tag = "6")]
1235        CollectionsSearch(super::CollectionsSearch),
1236        /// A remote MCP server to use.
1237        #[prost(message, tag = "7")]
1238        Mcp(super::Mcp),
1239        /// Built in document search.
1240        #[prost(message, tag = "8")]
1241        DocumentSearch(super::DocumentSearch),
1242    }
1243}
1244#[derive(Clone, PartialEq, ::prost::Message)]
1245pub struct Mcp {
1246    /// A label for the server. if provided, this will be used to prefix tool calls.
1247    #[prost(string, tag = "1")]
1248    pub server_label: ::prost::alloc::string::String,
1249    /// A description of the server.
1250    #[prost(string, tag = "2")]
1251    pub server_description: ::prost::alloc::string::String,
1252    /// The URL of the MCP server.
1253    #[prost(string, tag = "3")]
1254    pub server_url: ::prost::alloc::string::String,
1255    /// A list of tool names that are allowed to be called by the model. If empty, all tools are allowed.
1256    #[prost(string, repeated, tag = "4")]
1257    pub allowed_tool_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1258    /// An optional authorization token to use when calling the MCP server. This will be set as the Authorization header.
1259    #[prost(string, optional, tag = "5")]
1260    pub authorization: ::core::option::Option<::prost::alloc::string::String>,
1261    /// Extra headers that will be included in the request to the MCP server.
1262    #[prost(map = "string, string", tag = "6")]
1263    pub extra_headers: ::std::collections::HashMap<
1264        ::prost::alloc::string::String,
1265        ::prost::alloc::string::String,
1266    >,
1267}
1268#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1269pub struct WebSearch {
1270    /// List of website domains (without protocol specification or subdomains) to exclude from search results (e.g., \["example.com"\]).
1271    /// Use this to prevent results from unwanted sites. A maximum of 5 websites can be excluded.
1272    /// This parameter cannot be set together with `allowed_domains`.
1273    #[prost(string, repeated, tag = "1")]
1274    pub excluded_domains: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1275    /// List of website domains (without protocol specification or subdomains)
1276    /// to restrict search results to (e.g., \["example.com"\]). A maximum of 5 websites can be allowed.
1277    /// Use this as a whitelist to limit results to only these specific sites; no other websites will
1278    /// be considered. If no relevant information is found on these websites, the number of results
1279    /// returned might be smaller than `max_search_results` set in `SearchParameters`. Note: This
1280    /// parameter cannot be set together with `excluded_domains`.
1281    #[prost(string, repeated, tag = "2")]
1282    pub allowed_domains: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1283    /// Enable image understanding in downstream tools (e.g. allow fetching and interpreting images).
1284    /// When true, the server may add image viewing tools to the active MCP toolset.
1285    #[prost(bool, optional, tag = "3")]
1286    pub enable_image_understanding: ::core::option::Option<bool>,
1287}
1288#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1289pub struct XSearch {
1290    /// Optional start date for search results in ISO-8601 YYYY-MM-DD format (e.g., "2024-05-24").
1291    /// Only content after this date will be considered. Defaults to unset (no start date restriction).
1292    /// See <https://en.wikipedia.org/wiki/ISO_8601> for format details.
1293    #[prost(message, optional, tag = "1")]
1294    pub from_date: ::core::option::Option<::prost_types::Timestamp>,
1295    /// Optional end date for search results in ISO-8601 YYYY-MM-DD format (e.g., "2024-12-24").
1296    /// Only content before this date will be considered. Defaults to unset (no end date restriction).
1297    /// See <https://en.wikipedia.org/wiki/ISO_8601> for format details.
1298    #[prost(message, optional, tag = "2")]
1299    pub to_date: ::core::option::Option<::prost_types::Timestamp>,
1300    /// Optional list of X usernames (without the '@' symbol) to limit search results to posts
1301    /// from specific accounts (e.g., \["xai"\]). If set, only posts authored by these
1302    /// handles will be considered in the agentic search.
1303    /// This field can not be set together with `excluded_x_handles`.
1304    /// Defaults to unset (no exclusions).
1305    #[prost(string, repeated, tag = "3")]
1306    pub allowed_x_handles: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1307    /// Optional list of X usernames (without the '@' symbol) used to exclude posts from specific accounts.
1308    /// If set, posts authored by these handles will be excluded from the agentic search results.
1309    /// This field can not be set together with `allowed_x_handles`.
1310    /// Defaults to unset (no exclusions).
1311    #[prost(string, repeated, tag = "4")]
1312    pub excluded_x_handles: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1313    /// Enable image understanding in downstream tools (e.g. allow fetching and interpreting images).
1314    /// When true, the server may add image viewing tools to the active MCP toolset.
1315    #[prost(bool, optional, tag = "5")]
1316    pub enable_image_understanding: ::core::option::Option<bool>,
1317    /// Enable video understanding in downstream tools (e.g. allow fetching and interpreting videos).
1318    /// When true, the server may add video viewing tools to the active MCP toolset.
1319    #[prost(bool, optional, tag = "6")]
1320    pub enable_video_understanding: ::core::option::Option<bool>,
1321}
1322#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]
1323pub struct CodeExecution {}
1324#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1325pub struct CollectionsSearch {
1326    /// The ID(s) of the source collection(s) within which the search should be performed.
1327    /// A maximum of 10 collections IDs can be used for search.
1328    #[prost(string, repeated, tag = "1")]
1329    pub collection_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1330    /// Optional number of chunks to be returned for each collections search.
1331    /// Defaults to 10.
1332    #[prost(int32, optional, tag = "2")]
1333    pub limit: ::core::option::Option<i32>,
1334}
1335#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)]
1336pub struct DocumentSearch {
1337    /// Optional number of files to limit the search to.
1338    #[prost(int32, optional, tag = "2")]
1339    pub limit: ::core::option::Option<i32>,
1340}
1341#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1342pub struct Function {
1343    /// Name of the function.
1344    #[prost(string, tag = "1")]
1345    pub name: ::prost::alloc::string::String,
1346    /// Description of the function.
1347    #[prost(string, tag = "2")]
1348    pub description: ::prost::alloc::string::String,
1349    /// Not supported: Only kept for compatibility reasons.
1350    #[prost(bool, tag = "3")]
1351    pub strict: bool,
1352    /// The parameters the functions accepts, described as a JSON Schema object.
1353    #[prost(string, tag = "4")]
1354    pub parameters: ::prost::alloc::string::String,
1355}
1356/// Content of a tool call, typically in a response from model.
1357#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1358pub struct ToolCall {
1359    /// The ID of the tool call.
1360    #[prost(string, tag = "1")]
1361    pub id: ::prost::alloc::string::String,
1362    /// Information to indicate whether the tool call needs to be executed on client side or server side.
1363    /// By default, it will be a client-side tool call if not specified.
1364    #[prost(enumeration = "ToolCallType", tag = "2")]
1365    pub r#type: i32,
1366    /// Status of the tool call.
1367    #[prost(enumeration = "ToolCallStatus", tag = "3")]
1368    pub status: i32,
1369    /// Error message if the tool call is failed.
1370    #[prost(string, optional, tag = "4")]
1371    pub error_message: ::core::option::Option<::prost::alloc::string::String>,
1372    /// Information regarding invoking the tool call.
1373    #[prost(oneof = "tool_call::Tool", tags = "10")]
1374    pub tool: ::core::option::Option<tool_call::Tool>,
1375}
1376/// Nested message and enum types in `ToolCall`.
1377pub mod tool_call {
1378    /// Information regarding invoking the tool call.
1379    #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
1380    pub enum Tool {
1381        #[prost(message, tag = "10")]
1382        Function(super::FunctionCall),
1383    }
1384}
1385/// Tool call information.
1386#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1387pub struct FunctionCall {
1388    /// Name of the function to call.
1389    #[prost(string, tag = "1")]
1390    pub name: ::prost::alloc::string::String,
1391    /// Arguments used to call the function as json string.
1392    #[prost(string, tag = "2")]
1393    pub arguments: ::prost::alloc::string::String,
1394}
1395/// The response format for structured response.
1396#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1397pub struct ResponseFormat {
1398    /// Type of format expected for the response. Default to `FORMAT_TYPE_TEXT`
1399    #[prost(enumeration = "FormatType", tag = "1")]
1400    pub format_type: i32,
1401    /// The JSON schema that the response should conform to.
1402    /// Only considered if `format_type` is `FORMAT_TYPE_JSON_SCHEMA`.
1403    #[prost(string, optional, tag = "2")]
1404    pub schema: ::core::option::Option<::prost::alloc::string::String>,
1405}
1406/// Parameters for configuring search behavior in a chat request.
1407///
1408/// This message allows customization of search functionality when using models that support
1409/// searching external sources for information. You can specify which sources to search,
1410/// set date ranges for relevant content, control the search mode, and configure how
1411/// results are returned.
1412#[derive(Clone, PartialEq, ::prost::Message)]
1413pub struct SearchParameters {
1414    /// Controls when search is performed. Possible values are:
1415    ///
1416    /// * OFF_SEARCH_MODE (default): No search is performed, and no external data will be considered.
1417    /// * ON_SEARCH_MODE: Search is always performed when sampling from the model and the model will search in every source provided for relevant data.
1418    /// * AUTO_SEARCH_MODE: The model decides whether to perform a search based on the prompt and which sources to use.
1419    #[prost(enumeration = "SearchMode", tag = "1")]
1420    pub mode: i32,
1421    /// A list of search sources to query, such as web, news, X, or RSS feeds.
1422    /// Multiple sources can be specified. If no sources are provided, the model will default to
1423    /// searching the web and X.
1424    #[prost(message, repeated, tag = "9")]
1425    pub sources: ::prost::alloc::vec::Vec<Source>,
1426    /// Optional start date for search results in ISO-8601 YYYY-MM-DD format (e.g., "2024-05-24").
1427    /// Only content after this date will be considered. Defaults to unset (no start date restriction).
1428    /// See <https://en.wikipedia.org/wiki/ISO_8601> for format details.
1429    #[prost(message, optional, tag = "4")]
1430    pub from_date: ::core::option::Option<::prost_types::Timestamp>,
1431    /// Optional end date for search results in ISO-8601 YYYY-MM-DD format (e.g., "2024-12-24").
1432    /// Only content before this date will be considered. Defaults to unset (no end date restriction).
1433    /// See <https://en.wikipedia.org/wiki/ISO_8601> for format details.
1434    #[prost(message, optional, tag = "5")]
1435    pub to_date: ::core::option::Option<::prost_types::Timestamp>,
1436    /// If set to true, the model will return a list of citations (URLs or references)
1437    /// to the sources used in generating the response. Defaults to true.
1438    #[prost(bool, tag = "7")]
1439    pub return_citations: bool,
1440    /// Optional limit on the number of search results to consider
1441    /// when generating a response. Must be in the range \[1, 30\]. Defaults to 15.
1442    #[prost(int32, optional, tag = "8")]
1443    pub max_search_results: ::core::option::Option<i32>,
1444}
1445/// Defines a source for search requests, specifying the type of content to search.
1446/// This message acts as a container for different types of search sources. Only one type
1447/// of source can be specified per instance using the oneof field.
1448#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1449pub struct Source {
1450    #[prost(oneof = "source::Source", tags = "1, 2, 3, 4")]
1451    pub source: ::core::option::Option<source::Source>,
1452}
1453/// Nested message and enum types in `Source`.
1454pub mod source {
1455    #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
1456    pub enum Source {
1457        /// Configuration for searching online web content. Use this to search general websites
1458        /// with options to filter by country, exclude specific domains, or only allow specific domains.
1459        #[prost(message, tag = "1")]
1460        Web(super::WebSource),
1461        /// Configuration for searching recent articles and reports from news outlets.
1462        /// Useful for current events or topic-specific updates.
1463        #[prost(message, tag = "2")]
1464        News(super::NewsSource),
1465        /// Configuration for searching content on X. Allows focusing on
1466        /// specific user handles for targeted content.
1467        #[prost(message, tag = "3")]
1468        X(super::XSource),
1469        /// Configuration for searching content from RSS feeds. Requires specific feed URLs
1470        /// to query.
1471        #[prost(message, tag = "4")]
1472        Rss(super::RssSource),
1473    }
1474}
1475/// Configuration for a web search source in search requests.
1476///
1477/// This message configures a source for searching online web content. It allows specification
1478/// of regional content through country codes and filtering of results by excluding or allowing
1479/// specific websites.
1480#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1481pub struct WebSource {
1482    /// List of website domains (without protocol specification or subdomains) to exclude from search results (e.g., \["example.com"\]).
1483    /// Use this to prevent results from unwanted sites. A maximum of 5 websites can be excluded.
1484    /// This parameter cannot be set together with `allowed_websites`.
1485    #[prost(string, repeated, tag = "2")]
1486    pub excluded_websites: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1487    /// List of website domains (without protocol specification or subdomains)
1488    /// to restrict search results to (e.g., \["example.com"\]). A maximum of 5 websites can be allowed.
1489    /// Use this as a whitelist to limit results to only these specific sites; no other websites will
1490    /// be considered. If no relevant information is found on these websites, the number of results
1491    /// returned might be smaller than `max_search_results` set in `SearchParameters`. Note: This
1492    /// parameter cannot be set together with `excluded_websites`.
1493    #[prost(string, repeated, tag = "5")]
1494    pub allowed_websites: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1495    /// Optional ISO alpha-2 country code (e.g., "BE" for Belgium) to limit search results
1496    /// to content from a specific region or country. Defaults to unset (global search).
1497    /// See <https://en.wikipedia.org/wiki/ISO_3166-2> for valid codes.
1498    #[prost(string, optional, tag = "3")]
1499    pub country: ::core::option::Option<::prost::alloc::string::String>,
1500    /// Whether to exclude adult content from the search results. Defaults to true.
1501    #[prost(bool, tag = "4")]
1502    pub safe_search: bool,
1503}
1504/// Configuration for a news search source in search requests.
1505///
1506/// This message configures a source for searching recent articles and reports from news outlets.
1507/// It is useful for obtaining current events or topic-specific updates with regional filtering.
1508#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1509pub struct NewsSource {
1510    /// List of website domains (without protocol specification or subdomains)
1511    /// to exclude from search results (e.g., \["example.com"\]). A maximum of 5 websites can be excluded.
1512    /// Use this to prevent results from specific news sites. Defaults to unset (no exclusions).
1513    #[prost(string, repeated, tag = "2")]
1514    pub excluded_websites: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1515    /// Optional ISO alpha-2 country code (e.g., "BE" for Belgium) to limit search results
1516    /// to news from a specific region or country. Defaults to unset (global news).
1517    /// See <https://en.wikipedia.org/wiki/ISO_3166-2> for valid codes.
1518    #[prost(string, optional, tag = "3")]
1519    pub country: ::core::option::Option<::prost::alloc::string::String>,
1520    /// Whether to exclude adult content from the search results. Defaults to true.
1521    #[prost(bool, tag = "4")]
1522    pub safe_search: bool,
1523}
1524/// Configuration for an X (formerly Twitter) search source in search requests.
1525///
1526/// This message configures a source for searching content on X. It allows focusing the search
1527/// on specific user handles to retrieve targeted posts and interactions.
1528#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1529pub struct XSource {
1530    /// Optional list of X usernames (without the '@' symbol) to limit search results to posts
1531    /// from specific accounts (e.g., \["xai"\]). If set, only posts authored by these
1532    /// handles will be considered in the live search.
1533    /// This field can not be set together with `excluded_x_handles`.
1534    /// Defaults to unset (no exclusions).
1535    #[prost(string, repeated, tag = "7")]
1536    pub included_x_handles: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1537    /// Optional list of X usernames (without the '@' symbol) used to exclude posts from specific accounts.
1538    /// If set, posts authored by these handles will be excluded from the live search results.
1539    /// This field can not be set together with `included_x_handles`.
1540    /// Defaults to unset (no exclusions).
1541    #[prost(string, repeated, tag = "8")]
1542    pub excluded_x_handles: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1543    /// Optional post favorite count threshold. Defaults to unset (don't filter posts by post favorite count).
1544    /// If set, only posts with a favorite count greater than or equal to this threshold will be considered.
1545    #[prost(int32, optional, tag = "9")]
1546    pub post_favorite_count: ::core::option::Option<i32>,
1547    /// Optional post view count threshold. Defaults to unset (don't filter posts by post view count).
1548    /// If set, only posts with a view count greater than or equal to this threshold will be considered.
1549    #[prost(int32, optional, tag = "10")]
1550    pub post_view_count: ::core::option::Option<i32>,
1551}
1552/// Configuration for an RSS search source in search requests.
1553///
1554/// This message configures a source for searching content from RSS feeds. It requires specific
1555/// feed URLs to query for content updates.
1556#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1557pub struct RssSource {
1558    /// List of RSS feed URLs to search. Each URL must point to a valid RSS feed.
1559    /// At least one link must be provided.
1560    #[prost(string, repeated, tag = "1")]
1561    pub links: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1562}
1563#[derive(Clone, PartialEq, ::prost::Message)]
1564pub struct RequestSettings {
1565    /// Max number of tokens that can be generated in a response. This includes both output and reasoning tokens.
1566    #[prost(int32, optional, tag = "1")]
1567    pub max_tokens: ::core::option::Option<i32>,
1568    /// / If set to false, the model can perform maximum one tool call. Default to true.
1569    #[prost(bool, tag = "2")]
1570    pub parallel_tool_calls: bool,
1571    /// The ID of the previous response from the model.
1572    #[prost(string, optional, tag = "3")]
1573    pub previous_response_id: ::core::option::Option<::prost::alloc::string::String>,
1574    /// Constrains effort on reasoning for reasoning models. Default to `EFFORT_MEDIUM`.
1575    #[prost(enumeration = "ReasoningEffort", optional, tag = "4")]
1576    pub reasoning_effort: ::core::option::Option<i32>,
1577    /// A number between 0 and 2 used to control the variance of completions.
1578    /// The smaller the value, the more deterministic the model will become. For
1579    /// example, if we sample 1000 answers to the same prompt at a temperature of
1580    /// 0.001, then most of the 1000 answers will be identical. Conversely, if we
1581    /// conduct the same experiment at a temperature of 2, virtually no two answers
1582    /// will be identical. Note that increasing the temperature will cause
1583    /// the model to hallucinate more strongly.
1584    #[prost(float, optional, tag = "5")]
1585    pub temperature: ::core::option::Option<f32>,
1586    /// Formatting constraint on the response.
1587    #[prost(message, optional, tag = "6")]
1588    pub response_format: ::core::option::Option<ResponseFormat>,
1589    /// Controls if the model can, should, or must not use tools.
1590    #[prost(message, optional, tag = "7")]
1591    pub tool_choice: ::core::option::Option<ToolChoice>,
1592    /// A list of tools the model may call. Currently, only functions are supported
1593    /// as a tool. Use this to provide a list of functions the model may generate
1594    /// JSON inputs for.
1595    #[prost(message, repeated, tag = "8")]
1596    pub tools: ::prost::alloc::vec::Vec<Tool>,
1597    /// A number between 0 and 1 controlling the likelihood of the model to use
1598    /// less-common answers. Recall that the model produces a probability for
1599    /// each token. This means, for any choice of token there are thousands of
1600    /// possibilities to choose from. This parameter controls the "nucleus sampling
1601    /// algorithm". Instead of considering every possible token at every step, we
1602    /// only look at the K tokens who's probabilities exceed `top_p`.
1603    /// For example, if we set `top_p = 0.9`, then the set of tokens we actually
1604    /// sample from, will have a probability mass of at least 90%. In practice,
1605    /// low values will make the model more deterministic.
1606    #[prost(float, optional, tag = "9")]
1607    pub top_p: ::core::option::Option<f32>,
1608    /// An opaque string supplied by the API client (customer) to identify a user.
1609    /// The string will be stored in the logs and can be used in customer service
1610    /// requests to identify certain requests.
1611    #[prost(string, tag = "10")]
1612    pub user: ::prost::alloc::string::String,
1613    /// Set the parameters to be used for realtime data. If not set, no realtime data will be acquired by the model.
1614    #[prost(message, optional, tag = "11")]
1615    pub search_parameters: ::core::option::Option<SearchParameters>,
1616    /// Whether to store request and responses. Default is false.
1617    #[prost(bool, tag = "12")]
1618    pub store_messages: bool,
1619    /// Whether to use encrypted thinking for thinking trace rehydration.
1620    #[prost(bool, tag = "13")]
1621    pub use_encrypted_content: bool,
1622    /// Allow the users to control what optional fields to be returned in the response.
1623    #[prost(enumeration = "IncludeOption", repeated, tag = "14")]
1624    pub include: ::prost::alloc::vec::Vec<i32>,
1625}
1626/// Request to retrieve a stored completion response.
1627#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1628pub struct GetStoredCompletionRequest {
1629    /// The response id to be retrieved.
1630    #[prost(string, tag = "1")]
1631    pub response_id: ::prost::alloc::string::String,
1632}
1633/// Request to delete a stored completion response.
1634#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1635pub struct DeleteStoredCompletionRequest {
1636    /// The response id to be deleted.
1637    #[prost(string, tag = "1")]
1638    pub response_id: ::prost::alloc::string::String,
1639}
1640/// Response for deleting a stored completion.
1641#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1642pub struct DeleteStoredCompletionResponse {
1643    /// The response id that was deleted.
1644    #[prost(string, tag = "1")]
1645    pub response_id: ::prost::alloc::string::String,
1646}
1647/// Holds debug information. Only available to trusted testers.
1648#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
1649pub struct DebugOutput {
1650    /// Number of attempts made to the model.
1651    #[prost(int32, tag = "1")]
1652    pub attempts: i32,
1653    /// The request received from the user.
1654    #[prost(string, tag = "2")]
1655    pub request: ::prost::alloc::string::String,
1656    /// The prompt sent to the model in text form.
1657    #[prost(string, tag = "3")]
1658    pub prompt: ::prost::alloc::string::String,
1659    /// The JSON-serialized request sent to the inference engine.
1660    #[prost(string, tag = "9")]
1661    pub engine_request: ::prost::alloc::string::String,
1662    /// The response(s) received from the model.
1663    #[prost(string, repeated, tag = "4")]
1664    pub responses: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1665    /// The raw chunks returned from the pipeline of samplers.
1666    #[prost(string, repeated, tag = "12")]
1667    pub chunks: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1668    /// Number of cache reads
1669    #[prost(uint32, tag = "5")]
1670    pub cache_read_count: u32,
1671    /// Size of cache read
1672    #[prost(uint64, tag = "6")]
1673    pub cache_read_input_bytes: u64,
1674    /// Number of cache writes
1675    #[prost(uint32, tag = "7")]
1676    pub cache_write_count: u32,
1677    /// Size of cache write
1678    #[prost(uint64, tag = "8")]
1679    pub cache_write_input_bytes: u64,
1680    /// The lb address header
1681    #[prost(string, tag = "10")]
1682    pub lb_address: ::prost::alloc::string::String,
1683    /// The tag of the sampler that served this request.
1684    #[prost(string, tag = "11")]
1685    pub sampler_tag: ::prost::alloc::string::String,
1686}
1687#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1688#[repr(i32)]
1689pub enum IncludeOption {
1690    /// Default value / invalid option.
1691    Invalid = 0,
1692    /// Include the encrypted output from the web search tool in the response.
1693    WebSearchCallOutput = 1,
1694    /// Include the encrypted output from the X search tool in the response.
1695    XSearchCallOutput = 2,
1696    /// Include the plaintext output from the code execution tool in the response.
1697    CodeExecutionCallOutput = 3,
1698    /// Include the plaintext output from the collections search tool in the response.
1699    CollectionsSearchCallOutput = 4,
1700    /// Include the plaintext output from the document search tool in the response.
1701    DocumentSearchCallOutput = 5,
1702    /// Include the plaintext output from the MCP tool in the response.
1703    McpCallOutput = 6,
1704    /// Include the inline citations in the final response.
1705    InlineCitations = 7,
1706}
1707impl IncludeOption {
1708    /// String value of the enum field names used in the ProtoBuf definition.
1709    ///
1710    /// The values are not transformed in any way and thus are considered stable
1711    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1712    pub fn as_str_name(&self) -> &'static str {
1713        match self {
1714            Self::Invalid => "INCLUDE_OPTION_INVALID",
1715            Self::WebSearchCallOutput => "INCLUDE_OPTION_WEB_SEARCH_CALL_OUTPUT",
1716            Self::XSearchCallOutput => "INCLUDE_OPTION_X_SEARCH_CALL_OUTPUT",
1717            Self::CodeExecutionCallOutput => "INCLUDE_OPTION_CODE_EXECUTION_CALL_OUTPUT",
1718            Self::CollectionsSearchCallOutput => {
1719                "INCLUDE_OPTION_COLLECTIONS_SEARCH_CALL_OUTPUT"
1720            }
1721            Self::DocumentSearchCallOutput => {
1722                "INCLUDE_OPTION_DOCUMENT_SEARCH_CALL_OUTPUT"
1723            }
1724            Self::McpCallOutput => "INCLUDE_OPTION_MCP_CALL_OUTPUT",
1725            Self::InlineCitations => "INCLUDE_OPTION_INLINE_CITATIONS",
1726        }
1727    }
1728    /// Creates an enum from field names used in the ProtoBuf definition.
1729    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1730        match value {
1731            "INCLUDE_OPTION_INVALID" => Some(Self::Invalid),
1732            "INCLUDE_OPTION_WEB_SEARCH_CALL_OUTPUT" => Some(Self::WebSearchCallOutput),
1733            "INCLUDE_OPTION_X_SEARCH_CALL_OUTPUT" => Some(Self::XSearchCallOutput),
1734            "INCLUDE_OPTION_CODE_EXECUTION_CALL_OUTPUT" => {
1735                Some(Self::CodeExecutionCallOutput)
1736            }
1737            "INCLUDE_OPTION_COLLECTIONS_SEARCH_CALL_OUTPUT" => {
1738                Some(Self::CollectionsSearchCallOutput)
1739            }
1740            "INCLUDE_OPTION_DOCUMENT_SEARCH_CALL_OUTPUT" => {
1741                Some(Self::DocumentSearchCallOutput)
1742            }
1743            "INCLUDE_OPTION_MCP_CALL_OUTPUT" => Some(Self::McpCallOutput),
1744            "INCLUDE_OPTION_INLINE_CITATIONS" => Some(Self::InlineCitations),
1745            _ => None,
1746        }
1747    }
1748}
1749#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1750#[repr(i32)]
1751pub enum MessageRole {
1752    /// Default value / invalid role.
1753    InvalidRole = 0,
1754    /// User role.
1755    RoleUser = 1,
1756    /// Assistant role, normally the response from the model.
1757    RoleAssistant = 2,
1758    /// System role, typically for system instructions.
1759    RoleSystem = 3,
1760    /// Indicates a return from a tool call. Deprecated in favor of ROLE_TOOL.
1761    RoleFunction = 4,
1762    /// Indicates a return from a tool call.
1763    RoleTool = 5,
1764}
1765impl MessageRole {
1766    /// String value of the enum field names used in the ProtoBuf definition.
1767    ///
1768    /// The values are not transformed in any way and thus are considered stable
1769    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1770    pub fn as_str_name(&self) -> &'static str {
1771        match self {
1772            Self::InvalidRole => "INVALID_ROLE",
1773            Self::RoleUser => "ROLE_USER",
1774            Self::RoleAssistant => "ROLE_ASSISTANT",
1775            Self::RoleSystem => "ROLE_SYSTEM",
1776            Self::RoleFunction => "ROLE_FUNCTION",
1777            Self::RoleTool => "ROLE_TOOL",
1778        }
1779    }
1780    /// Creates an enum from field names used in the ProtoBuf definition.
1781    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1782        match value {
1783            "INVALID_ROLE" => Some(Self::InvalidRole),
1784            "ROLE_USER" => Some(Self::RoleUser),
1785            "ROLE_ASSISTANT" => Some(Self::RoleAssistant),
1786            "ROLE_SYSTEM" => Some(Self::RoleSystem),
1787            "ROLE_FUNCTION" => Some(Self::RoleFunction),
1788            "ROLE_TOOL" => Some(Self::RoleTool),
1789            _ => None,
1790        }
1791    }
1792}
1793#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1794#[repr(i32)]
1795pub enum ReasoningEffort {
1796    InvalidEffort = 0,
1797    EffortLow = 1,
1798    EffortMedium = 2,
1799    EffortHigh = 3,
1800}
1801impl ReasoningEffort {
1802    /// String value of the enum field names used in the ProtoBuf definition.
1803    ///
1804    /// The values are not transformed in any way and thus are considered stable
1805    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1806    pub fn as_str_name(&self) -> &'static str {
1807        match self {
1808            Self::InvalidEffort => "INVALID_EFFORT",
1809            Self::EffortLow => "EFFORT_LOW",
1810            Self::EffortMedium => "EFFORT_MEDIUM",
1811            Self::EffortHigh => "EFFORT_HIGH",
1812        }
1813    }
1814    /// Creates an enum from field names used in the ProtoBuf definition.
1815    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1816        match value {
1817            "INVALID_EFFORT" => Some(Self::InvalidEffort),
1818            "EFFORT_LOW" => Some(Self::EffortLow),
1819            "EFFORT_MEDIUM" => Some(Self::EffortMedium),
1820            "EFFORT_HIGH" => Some(Self::EffortHigh),
1821            _ => None,
1822        }
1823    }
1824}
1825#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1826#[repr(i32)]
1827pub enum ToolMode {
1828    /// Invalid tool mode.
1829    Invalid = 0,
1830    /// Let the model decide if a tool shall be used.
1831    Auto = 1,
1832    /// Force the model to not use tools.
1833    None = 2,
1834    /// Force the model to use tools.
1835    Required = 3,
1836}
1837impl ToolMode {
1838    /// String value of the enum field names used in the ProtoBuf definition.
1839    ///
1840    /// The values are not transformed in any way and thus are considered stable
1841    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1842    pub fn as_str_name(&self) -> &'static str {
1843        match self {
1844            Self::Invalid => "TOOL_MODE_INVALID",
1845            Self::Auto => "TOOL_MODE_AUTO",
1846            Self::None => "TOOL_MODE_NONE",
1847            Self::Required => "TOOL_MODE_REQUIRED",
1848        }
1849    }
1850    /// Creates an enum from field names used in the ProtoBuf definition.
1851    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1852        match value {
1853            "TOOL_MODE_INVALID" => Some(Self::Invalid),
1854            "TOOL_MODE_AUTO" => Some(Self::Auto),
1855            "TOOL_MODE_NONE" => Some(Self::None),
1856            "TOOL_MODE_REQUIRED" => Some(Self::Required),
1857            _ => None,
1858        }
1859    }
1860}
1861#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1862#[repr(i32)]
1863pub enum FormatType {
1864    /// Invalid format type.
1865    Invalid = 0,
1866    /// Raw text.
1867    Text = 1,
1868    /// Any JSON object.
1869    JsonObject = 2,
1870    /// Follow a JSON schema.
1871    JsonSchema = 3,
1872}
1873impl FormatType {
1874    /// String value of the enum field names used in the ProtoBuf definition.
1875    ///
1876    /// The values are not transformed in any way and thus are considered stable
1877    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1878    pub fn as_str_name(&self) -> &'static str {
1879        match self {
1880            Self::Invalid => "FORMAT_TYPE_INVALID",
1881            Self::Text => "FORMAT_TYPE_TEXT",
1882            Self::JsonObject => "FORMAT_TYPE_JSON_OBJECT",
1883            Self::JsonSchema => "FORMAT_TYPE_JSON_SCHEMA",
1884        }
1885    }
1886    /// Creates an enum from field names used in the ProtoBuf definition.
1887    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1888        match value {
1889            "FORMAT_TYPE_INVALID" => Some(Self::Invalid),
1890            "FORMAT_TYPE_TEXT" => Some(Self::Text),
1891            "FORMAT_TYPE_JSON_OBJECT" => Some(Self::JsonObject),
1892            "FORMAT_TYPE_JSON_SCHEMA" => Some(Self::JsonSchema),
1893            _ => None,
1894        }
1895    }
1896}
1897#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1898#[repr(i32)]
1899pub enum ToolCallType {
1900    Invalid = 0,
1901    /// Indicates the tool is a client-side tool, and should be executed on client side.
1902    /// Maps to `function_call` type in OAI Responses API.
1903    ClientSideTool = 1,
1904    /// Indicates the tool is a server-side web_search tool, and client side won't need to execute.
1905    /// Maps to `web_search_call` type in OAI Responses API.
1906    WebSearchTool = 2,
1907    /// Indicates the tool is a server-side x_search tool, and client side won't need to execute.
1908    /// Maps to `x_search_call` type in OAI Responses API.
1909    XSearchTool = 3,
1910    /// Indicates the tool is a server-side code_execution tool, and client side won't need to execute.
1911    /// Maps to `code_interpreter_call` type in OAI Responses API.
1912    CodeExecutionTool = 4,
1913    /// Indicates the tool is a server-side collections_search tool, and client side won't need to execute.
1914    /// Maps to `file_search_call` type in OAI Responses API.
1915    CollectionsSearchTool = 5,
1916    /// Indicates the tool is a server-side mcp_tool, and client side won't need to execute.
1917    /// Maps to `mcp_call` type in OAI Responses API.
1918    McpTool = 6,
1919    /// Indicates the tool is a server-side document_search tool, and client side won't need to execute.
1920    DocumentSearchTool = 7,
1921}
1922impl ToolCallType {
1923    /// String value of the enum field names used in the ProtoBuf definition.
1924    ///
1925    /// The values are not transformed in any way and thus are considered stable
1926    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1927    pub fn as_str_name(&self) -> &'static str {
1928        match self {
1929            Self::Invalid => "TOOL_CALL_TYPE_INVALID",
1930            Self::ClientSideTool => "TOOL_CALL_TYPE_CLIENT_SIDE_TOOL",
1931            Self::WebSearchTool => "TOOL_CALL_TYPE_WEB_SEARCH_TOOL",
1932            Self::XSearchTool => "TOOL_CALL_TYPE_X_SEARCH_TOOL",
1933            Self::CodeExecutionTool => "TOOL_CALL_TYPE_CODE_EXECUTION_TOOL",
1934            Self::CollectionsSearchTool => "TOOL_CALL_TYPE_COLLECTIONS_SEARCH_TOOL",
1935            Self::McpTool => "TOOL_CALL_TYPE_MCP_TOOL",
1936            Self::DocumentSearchTool => "TOOL_CALL_TYPE_DOCUMENT_SEARCH_TOOL",
1937        }
1938    }
1939    /// Creates an enum from field names used in the ProtoBuf definition.
1940    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1941        match value {
1942            "TOOL_CALL_TYPE_INVALID" => Some(Self::Invalid),
1943            "TOOL_CALL_TYPE_CLIENT_SIDE_TOOL" => Some(Self::ClientSideTool),
1944            "TOOL_CALL_TYPE_WEB_SEARCH_TOOL" => Some(Self::WebSearchTool),
1945            "TOOL_CALL_TYPE_X_SEARCH_TOOL" => Some(Self::XSearchTool),
1946            "TOOL_CALL_TYPE_CODE_EXECUTION_TOOL" => Some(Self::CodeExecutionTool),
1947            "TOOL_CALL_TYPE_COLLECTIONS_SEARCH_TOOL" => Some(Self::CollectionsSearchTool),
1948            "TOOL_CALL_TYPE_MCP_TOOL" => Some(Self::McpTool),
1949            "TOOL_CALL_TYPE_DOCUMENT_SEARCH_TOOL" => Some(Self::DocumentSearchTool),
1950            _ => None,
1951        }
1952    }
1953}
1954#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1955#[repr(i32)]
1956pub enum ToolCallStatus {
1957    /// The tool call is in progress.
1958    InProgress = 0,
1959    /// The tool call is completed.
1960    Completed = 1,
1961    /// The tool call is incomplete.
1962    Incomplete = 2,
1963    /// The tool call is failed.
1964    Failed = 3,
1965}
1966impl ToolCallStatus {
1967    /// String value of the enum field names used in the ProtoBuf definition.
1968    ///
1969    /// The values are not transformed in any way and thus are considered stable
1970    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1971    pub fn as_str_name(&self) -> &'static str {
1972        match self {
1973            Self::InProgress => "TOOL_CALL_STATUS_IN_PROGRESS",
1974            Self::Completed => "TOOL_CALL_STATUS_COMPLETED",
1975            Self::Incomplete => "TOOL_CALL_STATUS_INCOMPLETE",
1976            Self::Failed => "TOOL_CALL_STATUS_FAILED",
1977        }
1978    }
1979    /// Creates an enum from field names used in the ProtoBuf definition.
1980    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1981        match value {
1982            "TOOL_CALL_STATUS_IN_PROGRESS" => Some(Self::InProgress),
1983            "TOOL_CALL_STATUS_COMPLETED" => Some(Self::Completed),
1984            "TOOL_CALL_STATUS_INCOMPLETE" => Some(Self::Incomplete),
1985            "TOOL_CALL_STATUS_FAILED" => Some(Self::Failed),
1986            _ => None,
1987        }
1988    }
1989}
1990/// Mode to control the web search.
1991#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1992#[repr(i32)]
1993pub enum SearchMode {
1994    InvalidSearchMode = 0,
1995    OffSearchMode = 1,
1996    OnSearchMode = 2,
1997    AutoSearchMode = 3,
1998}
1999impl SearchMode {
2000    /// String value of the enum field names used in the ProtoBuf definition.
2001    ///
2002    /// The values are not transformed in any way and thus are considered stable
2003    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
2004    pub fn as_str_name(&self) -> &'static str {
2005        match self {
2006            Self::InvalidSearchMode => "INVALID_SEARCH_MODE",
2007            Self::OffSearchMode => "OFF_SEARCH_MODE",
2008            Self::OnSearchMode => "ON_SEARCH_MODE",
2009            Self::AutoSearchMode => "AUTO_SEARCH_MODE",
2010        }
2011    }
2012    /// Creates an enum from field names used in the ProtoBuf definition.
2013    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
2014        match value {
2015            "INVALID_SEARCH_MODE" => Some(Self::InvalidSearchMode),
2016            "OFF_SEARCH_MODE" => Some(Self::OffSearchMode),
2017            "ON_SEARCH_MODE" => Some(Self::OnSearchMode),
2018            "AUTO_SEARCH_MODE" => Some(Self::AutoSearchMode),
2019            _ => None,
2020        }
2021    }
2022}
2023/// Generated client implementations.
2024pub mod chat_client {
2025    #![allow(
2026        unused_variables,
2027        dead_code,
2028        missing_docs,
2029        clippy::wildcard_imports,
2030        clippy::let_unit_value,
2031    )]
2032    use tonic::codegen::*;
2033    use tonic::codegen::http::Uri;
2034    /// An API that exposes our language models via a Chat interface.
2035    #[derive(Debug, Clone)]
2036    pub struct ChatClient<T> {
2037        inner: tonic::client::Grpc<T>,
2038    }
2039    impl ChatClient<tonic::transport::Channel> {
2040        /// Attempt to create a new client by connecting to a given endpoint.
2041        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
2042        where
2043            D: TryInto<tonic::transport::Endpoint>,
2044            D::Error: Into<StdError>,
2045        {
2046            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
2047            Ok(Self::new(conn))
2048        }
2049    }
2050    impl<T> ChatClient<T>
2051    where
2052        T: tonic::client::GrpcService<tonic::body::Body>,
2053        T::Error: Into<StdError>,
2054        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
2055        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
2056    {
2057        pub fn new(inner: T) -> Self {
2058            let inner = tonic::client::Grpc::new(inner);
2059            Self { inner }
2060        }
2061        pub fn with_origin(inner: T, origin: Uri) -> Self {
2062            let inner = tonic::client::Grpc::with_origin(inner, origin);
2063            Self { inner }
2064        }
2065        pub fn with_interceptor<F>(
2066            inner: T,
2067            interceptor: F,
2068        ) -> ChatClient<InterceptedService<T, F>>
2069        where
2070            F: tonic::service::Interceptor,
2071            T::ResponseBody: Default,
2072            T: tonic::codegen::Service<
2073                http::Request<tonic::body::Body>,
2074                Response = http::Response<
2075                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
2076                >,
2077            >,
2078            <T as tonic::codegen::Service<
2079                http::Request<tonic::body::Body>,
2080            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
2081        {
2082            ChatClient::new(InterceptedService::new(inner, interceptor))
2083        }
2084        /// Compress requests with the given encoding.
2085        ///
2086        /// This requires the server to support it otherwise it might respond with an
2087        /// error.
2088        #[must_use]
2089        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
2090            self.inner = self.inner.send_compressed(encoding);
2091            self
2092        }
2093        /// Enable decompressing responses.
2094        #[must_use]
2095        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
2096            self.inner = self.inner.accept_compressed(encoding);
2097            self
2098        }
2099        /// Limits the maximum size of a decoded message.
2100        ///
2101        /// Default: `4MB`
2102        #[must_use]
2103        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
2104            self.inner = self.inner.max_decoding_message_size(limit);
2105            self
2106        }
2107        /// Limits the maximum size of an encoded message.
2108        ///
2109        /// Default: `usize::MAX`
2110        #[must_use]
2111        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
2112            self.inner = self.inner.max_encoding_message_size(limit);
2113            self
2114        }
2115        /// Samples a response from the model and blocks until the response has been
2116        /// fully generated.
2117        pub async fn get_completion(
2118            &mut self,
2119            request: impl tonic::IntoRequest<super::GetCompletionsRequest>,
2120        ) -> std::result::Result<
2121            tonic::Response<super::GetChatCompletionResponse>,
2122            tonic::Status,
2123        > {
2124            self.inner
2125                .ready()
2126                .await
2127                .map_err(|e| {
2128                    tonic::Status::unknown(
2129                        format!("Service was not ready: {}", e.into()),
2130                    )
2131                })?;
2132            let codec = tonic_prost::ProstCodec::default();
2133            let path = http::uri::PathAndQuery::from_static(
2134                "/xai_api.Chat/GetCompletion",
2135            );
2136            let mut req = request.into_request();
2137            req.extensions_mut()
2138                .insert(GrpcMethod::new("xai_api.Chat", "GetCompletion"));
2139            self.inner.unary(req, path, codec).await
2140        }
2141        /// Samples a response from the model and streams out the model tokens as they
2142        /// are being generated.
2143        pub async fn get_completion_chunk(
2144            &mut self,
2145            request: impl tonic::IntoRequest<super::GetCompletionsRequest>,
2146        ) -> std::result::Result<
2147            tonic::Response<tonic::codec::Streaming<super::GetChatCompletionChunk>>,
2148            tonic::Status,
2149        > {
2150            self.inner
2151                .ready()
2152                .await
2153                .map_err(|e| {
2154                    tonic::Status::unknown(
2155                        format!("Service was not ready: {}", e.into()),
2156                    )
2157                })?;
2158            let codec = tonic_prost::ProstCodec::default();
2159            let path = http::uri::PathAndQuery::from_static(
2160                "/xai_api.Chat/GetCompletionChunk",
2161            );
2162            let mut req = request.into_request();
2163            req.extensions_mut()
2164                .insert(GrpcMethod::new("xai_api.Chat", "GetCompletionChunk"));
2165            self.inner.server_streaming(req, path, codec).await
2166        }
2167        /// Starts sampling of the model and immediately returns a response containing
2168        /// a request id. The request id may be used to poll
2169        /// the `GetDeferredCompletion` RPC.
2170        pub async fn start_deferred_completion(
2171            &mut self,
2172            request: impl tonic::IntoRequest<super::GetCompletionsRequest>,
2173        ) -> std::result::Result<
2174            tonic::Response<super::StartDeferredResponse>,
2175            tonic::Status,
2176        > {
2177            self.inner
2178                .ready()
2179                .await
2180                .map_err(|e| {
2181                    tonic::Status::unknown(
2182                        format!("Service was not ready: {}", e.into()),
2183                    )
2184                })?;
2185            let codec = tonic_prost::ProstCodec::default();
2186            let path = http::uri::PathAndQuery::from_static(
2187                "/xai_api.Chat/StartDeferredCompletion",
2188            );
2189            let mut req = request.into_request();
2190            req.extensions_mut()
2191                .insert(GrpcMethod::new("xai_api.Chat", "StartDeferredCompletion"));
2192            self.inner.unary(req, path, codec).await
2193        }
2194        /// Gets the result of a deferred completion started by calling `StartDeferredCompletion`.
2195        pub async fn get_deferred_completion(
2196            &mut self,
2197            request: impl tonic::IntoRequest<super::GetDeferredRequest>,
2198        ) -> std::result::Result<
2199            tonic::Response<super::GetDeferredCompletionResponse>,
2200            tonic::Status,
2201        > {
2202            self.inner
2203                .ready()
2204                .await
2205                .map_err(|e| {
2206                    tonic::Status::unknown(
2207                        format!("Service was not ready: {}", e.into()),
2208                    )
2209                })?;
2210            let codec = tonic_prost::ProstCodec::default();
2211            let path = http::uri::PathAndQuery::from_static(
2212                "/xai_api.Chat/GetDeferredCompletion",
2213            );
2214            let mut req = request.into_request();
2215            req.extensions_mut()
2216                .insert(GrpcMethod::new("xai_api.Chat", "GetDeferredCompletion"));
2217            self.inner.unary(req, path, codec).await
2218        }
2219        /// Retrieve a stored response using the response ID.
2220        pub async fn get_stored_completion(
2221            &mut self,
2222            request: impl tonic::IntoRequest<super::GetStoredCompletionRequest>,
2223        ) -> std::result::Result<
2224            tonic::Response<super::GetChatCompletionResponse>,
2225            tonic::Status,
2226        > {
2227            self.inner
2228                .ready()
2229                .await
2230                .map_err(|e| {
2231                    tonic::Status::unknown(
2232                        format!("Service was not ready: {}", e.into()),
2233                    )
2234                })?;
2235            let codec = tonic_prost::ProstCodec::default();
2236            let path = http::uri::PathAndQuery::from_static(
2237                "/xai_api.Chat/GetStoredCompletion",
2238            );
2239            let mut req = request.into_request();
2240            req.extensions_mut()
2241                .insert(GrpcMethod::new("xai_api.Chat", "GetStoredCompletion"));
2242            self.inner.unary(req, path, codec).await
2243        }
2244        /// Delete a stored response using the response ID.
2245        pub async fn delete_stored_completion(
2246            &mut self,
2247            request: impl tonic::IntoRequest<super::DeleteStoredCompletionRequest>,
2248        ) -> std::result::Result<
2249            tonic::Response<super::DeleteStoredCompletionResponse>,
2250            tonic::Status,
2251        > {
2252            self.inner
2253                .ready()
2254                .await
2255                .map_err(|e| {
2256                    tonic::Status::unknown(
2257                        format!("Service was not ready: {}", e.into()),
2258                    )
2259                })?;
2260            let codec = tonic_prost::ProstCodec::default();
2261            let path = http::uri::PathAndQuery::from_static(
2262                "/xai_api.Chat/DeleteStoredCompletion",
2263            );
2264            let mut req = request.into_request();
2265            req.extensions_mut()
2266                .insert(GrpcMethod::new("xai_api.Chat", "DeleteStoredCompletion"));
2267            self.inner.unary(req, path, codec).await
2268        }
2269    }
2270}
2271/// Request to get details of a specific model by name.
2272#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
2273pub struct GetModelRequest {
2274    /// The name of the model to retrieve details about.
2275    #[prost(string, tag = "1")]
2276    pub name: ::prost::alloc::string::String,
2277}
2278/// Describes a language model available on the platform.
2279#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
2280pub struct LanguageModel {
2281    /// The model name used in API requests/responses.
2282    #[prost(string, tag = "1")]
2283    pub name: ::prost::alloc::string::String,
2284    /// The aliases of the name, which can also be used in lieu of name in the API
2285    /// requests.
2286    #[prost(string, repeated, tag = "11")]
2287    pub aliases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
2288    /// The version number of this model. Used to identify minor updates when
2289    /// the model name is not changed.
2290    #[prost(string, tag = "2")]
2291    pub version: ::prost::alloc::string::String,
2292    /// The supported input modalities of the model.
2293    #[prost(enumeration = "Modality", repeated, tag = "3")]
2294    pub input_modalities: ::prost::alloc::vec::Vec<i32>,
2295    /// The supported output modalities of the model.
2296    #[prost(enumeration = "Modality", repeated, tag = "4")]
2297    pub output_modalities: ::prost::alloc::vec::Vec<i32>,
2298    /// The price (in 1/100 USD cents) per one million text prompt tokens.
2299    #[prost(int64, tag = "5")]
2300    pub prompt_text_token_price: i64,
2301    /// The price (in 1/100 USD cents) per one million image prompt tokens.
2302    #[prost(int64, tag = "6")]
2303    pub prompt_image_token_price: i64,
2304    /// The price (in USD cents) per 100 million cached text prompt tokens.
2305    #[prost(int64, tag = "12")]
2306    pub cached_prompt_token_price: i64,
2307    /// The price (in 1/100 USD cents) per one million text completion token.
2308    #[prost(int64, tag = "7")]
2309    pub completion_text_token_price: i64,
2310    /// The price (in 1/100 USD cents) per one million searches.
2311    #[prost(int64, tag = "13")]
2312    pub search_price: i64,
2313    /// The creation time of the model.
2314    #[prost(message, optional, tag = "8")]
2315    pub created: ::core::option::Option<::prost_types::Timestamp>,
2316    /// Maximum length of the prompt/input (this includes tokens of all kinds).
2317    /// This is typically known as the context length of the model.
2318    #[prost(int32, tag = "9")]
2319    pub max_prompt_length: i32,
2320    /// Fingerprint of the unique configuration of the model.
2321    #[prost(string, tag = "10")]
2322    pub system_fingerprint: ::prost::alloc::string::String,
2323}
2324/// Response from ListLanguageModels including a list of language models.
2325#[derive(Clone, PartialEq, ::prost::Message)]
2326pub struct ListLanguageModelsResponse {
2327    /// A list of language models.
2328    #[prost(message, repeated, tag = "1")]
2329    pub models: ::prost::alloc::vec::Vec<LanguageModel>,
2330}
2331/// Describes an embedding model available on the platform.
2332#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
2333pub struct EmbeddingModel {
2334    /// The name under which the model is available in the API.
2335    #[prost(string, tag = "1")]
2336    pub name: ::prost::alloc::string::String,
2337    /// The aliases of the name, which can also be used in lieu of name in the API
2338    /// requests.
2339    #[prost(string, repeated, tag = "11")]
2340    pub aliases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
2341    /// The version number of this model. Used to identify minor updates when
2342    /// the model name is not changed.
2343    #[prost(string, tag = "2")]
2344    pub version: ::prost::alloc::string::String,
2345    /// The supported input modalities of the model.
2346    #[prost(enumeration = "Modality", repeated, tag = "3")]
2347    pub input_modalities: ::prost::alloc::vec::Vec<i32>,
2348    /// The supported output modalities of the model.
2349    #[prost(enumeration = "Modality", repeated, tag = "4")]
2350    pub output_modalities: ::prost::alloc::vec::Vec<i32>,
2351    /// The price (in 1/100 USD cents) per one million text prompt tokens.
2352    #[prost(int64, tag = "5")]
2353    pub prompt_text_token_price: i64,
2354    /// The price (in 1/100 USD cents) per one million image prompt tokens.
2355    #[prost(int64, tag = "6")]
2356    pub prompt_image_token_price: i64,
2357    /// The creation time of the model.
2358    #[prost(message, optional, tag = "7")]
2359    pub created: ::core::option::Option<::prost_types::Timestamp>,
2360    /// Fingerprint of the unique configuration of the model.
2361    #[prost(string, tag = "8")]
2362    pub system_fingerprint: ::prost::alloc::string::String,
2363}
2364/// Response from ListEmbeddingModels including a list of embedding models.
2365#[derive(Clone, PartialEq, ::prost::Message)]
2366pub struct ListEmbeddingModelsResponse {
2367    /// A list of embedding model(s).
2368    #[prost(message, repeated, tag = "1")]
2369    pub models: ::prost::alloc::vec::Vec<EmbeddingModel>,
2370}
2371/// Describes a language model available on the platform.
2372#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
2373pub struct ImageGenerationModel {
2374    /// The model name used in API requests/responses.
2375    #[prost(string, tag = "1")]
2376    pub name: ::prost::alloc::string::String,
2377    /// The aliases of the name, which can also be used in lieu of name in the API
2378    /// requests.
2379    #[prost(string, repeated, tag = "11")]
2380    pub aliases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
2381    /// The version number of this model. Used to identify minor updates when
2382    /// the model name is not changed.
2383    #[prost(string, tag = "2")]
2384    pub version: ::prost::alloc::string::String,
2385    /// The supported input modalities of the model.
2386    #[prost(enumeration = "Modality", repeated, tag = "3")]
2387    pub input_modalities: ::prost::alloc::vec::Vec<i32>,
2388    /// The supported output modalities of the model.
2389    #[prost(enumeration = "Modality", repeated, tag = "6")]
2390    pub output_modalities: ::prost::alloc::vec::Vec<i32>,
2391    /// The price (in USD cents) per image.
2392    #[prost(int64, tag = "12")]
2393    pub image_price: i64,
2394    /// When the language model was created.
2395    #[prost(message, optional, tag = "8")]
2396    pub created: ::core::option::Option<::prost_types::Timestamp>,
2397    /// Maximum length of the prompt/input (this includes tokens of all kinds).
2398    /// This is typically known as the context length of the model.
2399    #[prost(int32, tag = "9")]
2400    pub max_prompt_length: i32,
2401    /// Fingerprint of the unique configuration of the model.
2402    #[prost(string, tag = "10")]
2403    pub system_fingerprint: ::prost::alloc::string::String,
2404}
2405/// Response from ListImageGenerationModels including a list of image generation
2406/// models.
2407#[derive(Clone, PartialEq, ::prost::Message)]
2408pub struct ListImageGenerationModelsResponse {
2409    /// A list of image generation models.
2410    #[prost(message, repeated, tag = "1")]
2411    pub models: ::prost::alloc::vec::Vec<ImageGenerationModel>,
2412}
2413/// Modalities supported by a model input/output.
2414#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
2415#[repr(i32)]
2416pub enum Modality {
2417    /// Invalid modality.
2418    InvalidModality = 0,
2419    /// Text input/output.
2420    Text = 1,
2421    /// Image input/output.
2422    Image = 2,
2423    /// Embedding input/output.
2424    Embedding = 3,
2425}
2426impl Modality {
2427    /// String value of the enum field names used in the ProtoBuf definition.
2428    ///
2429    /// The values are not transformed in any way and thus are considered stable
2430    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
2431    pub fn as_str_name(&self) -> &'static str {
2432        match self {
2433            Self::InvalidModality => "INVALID_MODALITY",
2434            Self::Text => "TEXT",
2435            Self::Image => "IMAGE",
2436            Self::Embedding => "EMBEDDING",
2437        }
2438    }
2439    /// Creates an enum from field names used in the ProtoBuf definition.
2440    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
2441        match value {
2442            "INVALID_MODALITY" => Some(Self::InvalidModality),
2443            "TEXT" => Some(Self::Text),
2444            "IMAGE" => Some(Self::Image),
2445            "EMBEDDING" => Some(Self::Embedding),
2446            _ => None,
2447        }
2448    }
2449}
2450/// Generated client implementations.
2451pub mod models_client {
2452    #![allow(
2453        unused_variables,
2454        dead_code,
2455        missing_docs,
2456        clippy::wildcard_imports,
2457        clippy::let_unit_value,
2458    )]
2459    use tonic::codegen::*;
2460    use tonic::codegen::http::Uri;
2461    /// An API service that let users get details of available models on the
2462    /// platform.
2463    #[derive(Debug, Clone)]
2464    pub struct ModelsClient<T> {
2465        inner: tonic::client::Grpc<T>,
2466    }
2467    impl ModelsClient<tonic::transport::Channel> {
2468        /// Attempt to create a new client by connecting to a given endpoint.
2469        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
2470        where
2471            D: TryInto<tonic::transport::Endpoint>,
2472            D::Error: Into<StdError>,
2473        {
2474            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
2475            Ok(Self::new(conn))
2476        }
2477    }
2478    impl<T> ModelsClient<T>
2479    where
2480        T: tonic::client::GrpcService<tonic::body::Body>,
2481        T::Error: Into<StdError>,
2482        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
2483        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
2484    {
2485        pub fn new(inner: T) -> Self {
2486            let inner = tonic::client::Grpc::new(inner);
2487            Self { inner }
2488        }
2489        pub fn with_origin(inner: T, origin: Uri) -> Self {
2490            let inner = tonic::client::Grpc::with_origin(inner, origin);
2491            Self { inner }
2492        }
2493        pub fn with_interceptor<F>(
2494            inner: T,
2495            interceptor: F,
2496        ) -> ModelsClient<InterceptedService<T, F>>
2497        where
2498            F: tonic::service::Interceptor,
2499            T::ResponseBody: Default,
2500            T: tonic::codegen::Service<
2501                http::Request<tonic::body::Body>,
2502                Response = http::Response<
2503                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
2504                >,
2505            >,
2506            <T as tonic::codegen::Service<
2507                http::Request<tonic::body::Body>,
2508            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
2509        {
2510            ModelsClient::new(InterceptedService::new(inner, interceptor))
2511        }
2512        /// Compress requests with the given encoding.
2513        ///
2514        /// This requires the server to support it otherwise it might respond with an
2515        /// error.
2516        #[must_use]
2517        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
2518            self.inner = self.inner.send_compressed(encoding);
2519            self
2520        }
2521        /// Enable decompressing responses.
2522        #[must_use]
2523        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
2524            self.inner = self.inner.accept_compressed(encoding);
2525            self
2526        }
2527        /// Limits the maximum size of a decoded message.
2528        ///
2529        /// Default: `4MB`
2530        #[must_use]
2531        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
2532            self.inner = self.inner.max_decoding_message_size(limit);
2533            self
2534        }
2535        /// Limits the maximum size of an encoded message.
2536        ///
2537        /// Default: `usize::MAX`
2538        #[must_use]
2539        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
2540            self.inner = self.inner.max_encoding_message_size(limit);
2541            self
2542        }
2543        /// Lists all language models available to your team (based on the API key).
2544        pub async fn list_language_models(
2545            &mut self,
2546            request: impl tonic::IntoRequest<()>,
2547        ) -> std::result::Result<
2548            tonic::Response<super::ListLanguageModelsResponse>,
2549            tonic::Status,
2550        > {
2551            self.inner
2552                .ready()
2553                .await
2554                .map_err(|e| {
2555                    tonic::Status::unknown(
2556                        format!("Service was not ready: {}", e.into()),
2557                    )
2558                })?;
2559            let codec = tonic_prost::ProstCodec::default();
2560            let path = http::uri::PathAndQuery::from_static(
2561                "/xai_api.Models/ListLanguageModels",
2562            );
2563            let mut req = request.into_request();
2564            req.extensions_mut()
2565                .insert(GrpcMethod::new("xai_api.Models", "ListLanguageModels"));
2566            self.inner.unary(req, path, codec).await
2567        }
2568        /// Lists all embedding models available to your team (based on the API key).
2569        pub async fn list_embedding_models(
2570            &mut self,
2571            request: impl tonic::IntoRequest<()>,
2572        ) -> std::result::Result<
2573            tonic::Response<super::ListEmbeddingModelsResponse>,
2574            tonic::Status,
2575        > {
2576            self.inner
2577                .ready()
2578                .await
2579                .map_err(|e| {
2580                    tonic::Status::unknown(
2581                        format!("Service was not ready: {}", e.into()),
2582                    )
2583                })?;
2584            let codec = tonic_prost::ProstCodec::default();
2585            let path = http::uri::PathAndQuery::from_static(
2586                "/xai_api.Models/ListEmbeddingModels",
2587            );
2588            let mut req = request.into_request();
2589            req.extensions_mut()
2590                .insert(GrpcMethod::new("xai_api.Models", "ListEmbeddingModels"));
2591            self.inner.unary(req, path, codec).await
2592        }
2593        /// Lists all image generation models available to your team (based on the API key).
2594        pub async fn list_image_generation_models(
2595            &mut self,
2596            request: impl tonic::IntoRequest<()>,
2597        ) -> std::result::Result<
2598            tonic::Response<super::ListImageGenerationModelsResponse>,
2599            tonic::Status,
2600        > {
2601            self.inner
2602                .ready()
2603                .await
2604                .map_err(|e| {
2605                    tonic::Status::unknown(
2606                        format!("Service was not ready: {}", e.into()),
2607                    )
2608                })?;
2609            let codec = tonic_prost::ProstCodec::default();
2610            let path = http::uri::PathAndQuery::from_static(
2611                "/xai_api.Models/ListImageGenerationModels",
2612            );
2613            let mut req = request.into_request();
2614            req.extensions_mut()
2615                .insert(GrpcMethod::new("xai_api.Models", "ListImageGenerationModels"));
2616            self.inner.unary(req, path, codec).await
2617        }
2618        /// Get details of a specific language model by model name.
2619        pub async fn get_language_model(
2620            &mut self,
2621            request: impl tonic::IntoRequest<super::GetModelRequest>,
2622        ) -> std::result::Result<tonic::Response<super::LanguageModel>, tonic::Status> {
2623            self.inner
2624                .ready()
2625                .await
2626                .map_err(|e| {
2627                    tonic::Status::unknown(
2628                        format!("Service was not ready: {}", e.into()),
2629                    )
2630                })?;
2631            let codec = tonic_prost::ProstCodec::default();
2632            let path = http::uri::PathAndQuery::from_static(
2633                "/xai_api.Models/GetLanguageModel",
2634            );
2635            let mut req = request.into_request();
2636            req.extensions_mut()
2637                .insert(GrpcMethod::new("xai_api.Models", "GetLanguageModel"));
2638            self.inner.unary(req, path, codec).await
2639        }
2640        /// Get details of a specific embedding model by model name.
2641        pub async fn get_embedding_model(
2642            &mut self,
2643            request: impl tonic::IntoRequest<super::GetModelRequest>,
2644        ) -> std::result::Result<tonic::Response<super::EmbeddingModel>, tonic::Status> {
2645            self.inner
2646                .ready()
2647                .await
2648                .map_err(|e| {
2649                    tonic::Status::unknown(
2650                        format!("Service was not ready: {}", e.into()),
2651                    )
2652                })?;
2653            let codec = tonic_prost::ProstCodec::default();
2654            let path = http::uri::PathAndQuery::from_static(
2655                "/xai_api.Models/GetEmbeddingModel",
2656            );
2657            let mut req = request.into_request();
2658            req.extensions_mut()
2659                .insert(GrpcMethod::new("xai_api.Models", "GetEmbeddingModel"));
2660            self.inner.unary(req, path, codec).await
2661        }
2662        /// Get details of a specific image generation model by model name.
2663        pub async fn get_image_generation_model(
2664            &mut self,
2665            request: impl tonic::IntoRequest<super::GetModelRequest>,
2666        ) -> std::result::Result<
2667            tonic::Response<super::ImageGenerationModel>,
2668            tonic::Status,
2669        > {
2670            self.inner
2671                .ready()
2672                .await
2673                .map_err(|e| {
2674                    tonic::Status::unknown(
2675                        format!("Service was not ready: {}", e.into()),
2676                    )
2677                })?;
2678            let codec = tonic_prost::ProstCodec::default();
2679            let path = http::uri::PathAndQuery::from_static(
2680                "/xai_api.Models/GetImageGenerationModel",
2681            );
2682            let mut req = request.into_request();
2683            req.extensions_mut()
2684                .insert(GrpcMethod::new("xai_api.Models", "GetImageGenerationModel"));
2685            self.inner.unary(req, path, codec).await
2686        }
2687    }
2688}
2689/// Request message for generating embeddings.
2690#[derive(Clone, PartialEq, ::prost::Message)]
2691pub struct EmbedRequest {
2692    /// The entities to embed. Note that not every model supports images and text.
2693    /// Some models are text-only and some are image-only. You can at most embed
2694    /// 128 inputs in a single request.
2695    #[prost(message, repeated, tag = "1")]
2696    pub input: ::prost::alloc::vec::Vec<EmbedInput>,
2697    /// Name or alias of the embedding model to use.
2698    #[prost(string, tag = "2")]
2699    pub model: ::prost::alloc::string::String,
2700    /// Format of the returned embeddings.
2701    #[prost(enumeration = "EmbedEncodingFormat", tag = "3")]
2702    pub encoding_format: i32,
2703    /// An opaque string supplied by the API client (customer) to identify a user.
2704    /// The string will be stored in the logs and can be used in customer service
2705    /// requests to identify certain requests.
2706    #[prost(string, tag = "4")]
2707    pub user: ::prost::alloc::string::String,
2708}
2709/// Input content to be embedded.
2710#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
2711pub struct EmbedInput {
2712    #[prost(oneof = "embed_input::Input", tags = "1, 2")]
2713    pub input: ::core::option::Option<embed_input::Input>,
2714}
2715/// Nested message and enum types in `EmbedInput`.
2716pub mod embed_input {
2717    #[derive(Clone, PartialEq, Eq, Hash, ::prost::Oneof)]
2718    pub enum Input {
2719        /// A string to be embedded.
2720        #[prost(string, tag = "1")]
2721        String(::prost::alloc::string::String),
2722        /// An image to be embedded.
2723        #[prost(message, tag = "2")]
2724        ImageUrl(super::ImageUrlContent),
2725    }
2726}
2727/// Response object for the `Embed` RPC.
2728#[derive(Clone, PartialEq, ::prost::Message)]
2729pub struct EmbedResponse {
2730    /// An identifier of this request. The same ID will be used in your billing
2731    /// records.
2732    #[prost(string, tag = "1")]
2733    pub id: ::prost::alloc::string::String,
2734    /// The embeddings generated from the inputs.
2735    #[prost(message, repeated, tag = "2")]
2736    pub embeddings: ::prost::alloc::vec::Vec<Embedding>,
2737    /// The usage associated with this request.
2738    #[prost(message, optional, tag = "3")]
2739    pub usage: ::core::option::Option<EmbeddingUsage>,
2740    /// The name of the model used for the request. This model name contains
2741    /// the actual model name used rather than any aliases.
2742    /// This means it can be `embed-0205` even when the request was specifying
2743    /// `embed-latest`.
2744    #[prost(string, tag = "4")]
2745    pub model: ::prost::alloc::string::String,
2746    /// This fingerprint represents the backend configuration that the model runs
2747    /// with.
2748    #[prost(string, tag = "5")]
2749    pub system_fingerprint: ::prost::alloc::string::String,
2750}
2751/// Holds the embedding vector for a single embedding input.
2752#[derive(Clone, PartialEq, ::prost::Message)]
2753pub struct Embedding {
2754    /// The index of the input this embedding was produced from.
2755    #[prost(int32, tag = "1")]
2756    pub index: i32,
2757    /// The feature vectors derived from the inputs. Note that some inputs such as
2758    /// images may produce multiple feature vectors.
2759    #[prost(message, repeated, tag = "2")]
2760    pub embeddings: ::prost::alloc::vec::Vec<FeatureVector>,
2761}
2762/// A single feature vector.
2763#[derive(Clone, PartialEq, ::prost::Message)]
2764pub struct FeatureVector {
2765    /// The feature vector encoded as an array of floats. Only populated if
2766    /// the encoding format is FORMAT_FLOAT.
2767    #[prost(float, repeated, tag = "1")]
2768    pub float_array: ::prost::alloc::vec::Vec<f32>,
2769    /// The feature vector encoded as a base64 string. Only populated if
2770    /// the encoding format is FORMAT_BASE64.
2771    #[prost(string, tag = "2")]
2772    pub base64_array: ::prost::alloc::string::String,
2773}
2774#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
2775#[repr(i32)]
2776pub enum EmbedEncodingFormat {
2777    /// Invalid format.
2778    FormatInvalid = 0,
2779    /// Returns the embeddings as an array of floats.
2780    FormatFloat = 1,
2781    /// Returns the embeddings as a base64-encoded string.
2782    FormatBase64 = 2,
2783}
2784impl EmbedEncodingFormat {
2785    /// String value of the enum field names used in the ProtoBuf definition.
2786    ///
2787    /// The values are not transformed in any way and thus are considered stable
2788    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
2789    pub fn as_str_name(&self) -> &'static str {
2790        match self {
2791            Self::FormatInvalid => "FORMAT_INVALID",
2792            Self::FormatFloat => "FORMAT_FLOAT",
2793            Self::FormatBase64 => "FORMAT_BASE64",
2794        }
2795    }
2796    /// Creates an enum from field names used in the ProtoBuf definition.
2797    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
2798        match value {
2799            "FORMAT_INVALID" => Some(Self::FormatInvalid),
2800            "FORMAT_FLOAT" => Some(Self::FormatFloat),
2801            "FORMAT_BASE64" => Some(Self::FormatBase64),
2802            _ => None,
2803        }
2804    }
2805}
2806/// Generated client implementations.
2807pub mod embedder_client {
2808    #![allow(
2809        unused_variables,
2810        dead_code,
2811        missing_docs,
2812        clippy::wildcard_imports,
2813        clippy::let_unit_value,
2814    )]
2815    use tonic::codegen::*;
2816    use tonic::codegen::http::Uri;
2817    /// An API service for interaction with available embedding models.
2818    #[derive(Debug, Clone)]
2819    pub struct EmbedderClient<T> {
2820        inner: tonic::client::Grpc<T>,
2821    }
2822    impl EmbedderClient<tonic::transport::Channel> {
2823        /// Attempt to create a new client by connecting to a given endpoint.
2824        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
2825        where
2826            D: TryInto<tonic::transport::Endpoint>,
2827            D::Error: Into<StdError>,
2828        {
2829            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
2830            Ok(Self::new(conn))
2831        }
2832    }
2833    impl<T> EmbedderClient<T>
2834    where
2835        T: tonic::client::GrpcService<tonic::body::Body>,
2836        T::Error: Into<StdError>,
2837        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
2838        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
2839    {
2840        pub fn new(inner: T) -> Self {
2841            let inner = tonic::client::Grpc::new(inner);
2842            Self { inner }
2843        }
2844        pub fn with_origin(inner: T, origin: Uri) -> Self {
2845            let inner = tonic::client::Grpc::with_origin(inner, origin);
2846            Self { inner }
2847        }
2848        pub fn with_interceptor<F>(
2849            inner: T,
2850            interceptor: F,
2851        ) -> EmbedderClient<InterceptedService<T, F>>
2852        where
2853            F: tonic::service::Interceptor,
2854            T::ResponseBody: Default,
2855            T: tonic::codegen::Service<
2856                http::Request<tonic::body::Body>,
2857                Response = http::Response<
2858                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
2859                >,
2860            >,
2861            <T as tonic::codegen::Service<
2862                http::Request<tonic::body::Body>,
2863            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
2864        {
2865            EmbedderClient::new(InterceptedService::new(inner, interceptor))
2866        }
2867        /// Compress requests with the given encoding.
2868        ///
2869        /// This requires the server to support it otherwise it might respond with an
2870        /// error.
2871        #[must_use]
2872        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
2873            self.inner = self.inner.send_compressed(encoding);
2874            self
2875        }
2876        /// Enable decompressing responses.
2877        #[must_use]
2878        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
2879            self.inner = self.inner.accept_compressed(encoding);
2880            self
2881        }
2882        /// Limits the maximum size of a decoded message.
2883        ///
2884        /// Default: `4MB`
2885        #[must_use]
2886        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
2887            self.inner = self.inner.max_decoding_message_size(limit);
2888            self
2889        }
2890        /// Limits the maximum size of an encoded message.
2891        ///
2892        /// Default: `usize::MAX`
2893        #[must_use]
2894        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
2895            self.inner = self.inner.max_encoding_message_size(limit);
2896            self
2897        }
2898        /// Produces one embedding for each input object. The size of the produced
2899        /// feature vectors depends on the chosen model.
2900        pub async fn embed(
2901            &mut self,
2902            request: impl tonic::IntoRequest<super::EmbedRequest>,
2903        ) -> std::result::Result<tonic::Response<super::EmbedResponse>, tonic::Status> {
2904            self.inner
2905                .ready()
2906                .await
2907                .map_err(|e| {
2908                    tonic::Status::unknown(
2909                        format!("Service was not ready: {}", e.into()),
2910                    )
2911                })?;
2912            let codec = tonic_prost::ProstCodec::default();
2913            let path = http::uri::PathAndQuery::from_static("/xai_api.Embedder/Embed");
2914            let mut req = request.into_request();
2915            req.extensions_mut().insert(GrpcMethod::new("xai_api.Embedder", "Embed"));
2916            self.inner.unary(req, path, codec).await
2917        }
2918    }
2919}
2920/// Request to convert text to a sequence of tokens.
2921#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
2922pub struct TokenizeTextRequest {
2923    /// Text to tokenize.
2924    #[prost(string, tag = "1")]
2925    pub text: ::prost::alloc::string::String,
2926    /// Name or alias of the model used for tokenization.
2927    #[prost(string, tag = "2")]
2928    pub model: ::prost::alloc::string::String,
2929    /// An opaque string supplied by the API client (customer) to identify a user.
2930    /// The string will be stored in the logs and can be used in customer service
2931    /// requests to identify certain requests.
2932    #[prost(string, tag = "3")]
2933    pub user: ::prost::alloc::string::String,
2934}
2935/// Information on a token.
2936#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
2937pub struct Token {
2938    /// ID of the token.
2939    #[prost(uint32, tag = "1")]
2940    pub token_id: u32,
2941    /// String snippet of the token.
2942    #[prost(string, tag = "2")]
2943    pub string_token: ::prost::alloc::string::String,
2944    /// Bytes representing the token.
2945    #[prost(bytes = "vec", tag = "4")]
2946    pub token_bytes: ::prost::alloc::vec::Vec<u8>,
2947}
2948/// Response including the tokenization result.
2949#[derive(Clone, PartialEq, ::prost::Message)]
2950pub struct TokenizeTextResponse {
2951    /// The sequence of tokens. This is the output of the tokenization process.
2952    #[prost(message, repeated, tag = "1")]
2953    pub tokens: ::prost::alloc::vec::Vec<Token>,
2954    /// The name of the model used for the request. This model name contains
2955    /// the actual model name used rather than any aliases.
2956    /// This means the this can be `grok-2-1212` even when the request was
2957    /// specifying `grok-2-latest`.
2958    #[prost(string, tag = "2")]
2959    pub model: ::prost::alloc::string::String,
2960}
2961/// Generated client implementations.
2962pub mod tokenize_client {
2963    #![allow(
2964        unused_variables,
2965        dead_code,
2966        missing_docs,
2967        clippy::wildcard_imports,
2968        clippy::let_unit_value,
2969    )]
2970    use tonic::codegen::*;
2971    use tonic::codegen::http::Uri;
2972    /// An API service to tokenize input prompts.
2973    #[derive(Debug, Clone)]
2974    pub struct TokenizeClient<T> {
2975        inner: tonic::client::Grpc<T>,
2976    }
2977    impl TokenizeClient<tonic::transport::Channel> {
2978        /// Attempt to create a new client by connecting to a given endpoint.
2979        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
2980        where
2981            D: TryInto<tonic::transport::Endpoint>,
2982            D::Error: Into<StdError>,
2983        {
2984            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
2985            Ok(Self::new(conn))
2986        }
2987    }
2988    impl<T> TokenizeClient<T>
2989    where
2990        T: tonic::client::GrpcService<tonic::body::Body>,
2991        T::Error: Into<StdError>,
2992        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
2993        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
2994    {
2995        pub fn new(inner: T) -> Self {
2996            let inner = tonic::client::Grpc::new(inner);
2997            Self { inner }
2998        }
2999        pub fn with_origin(inner: T, origin: Uri) -> Self {
3000            let inner = tonic::client::Grpc::with_origin(inner, origin);
3001            Self { inner }
3002        }
3003        pub fn with_interceptor<F>(
3004            inner: T,
3005            interceptor: F,
3006        ) -> TokenizeClient<InterceptedService<T, F>>
3007        where
3008            F: tonic::service::Interceptor,
3009            T::ResponseBody: Default,
3010            T: tonic::codegen::Service<
3011                http::Request<tonic::body::Body>,
3012                Response = http::Response<
3013                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
3014                >,
3015            >,
3016            <T as tonic::codegen::Service<
3017                http::Request<tonic::body::Body>,
3018            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
3019        {
3020            TokenizeClient::new(InterceptedService::new(inner, interceptor))
3021        }
3022        /// Compress requests with the given encoding.
3023        ///
3024        /// This requires the server to support it otherwise it might respond with an
3025        /// error.
3026        #[must_use]
3027        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
3028            self.inner = self.inner.send_compressed(encoding);
3029            self
3030        }
3031        /// Enable decompressing responses.
3032        #[must_use]
3033        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
3034            self.inner = self.inner.accept_compressed(encoding);
3035            self
3036        }
3037        /// Limits the maximum size of a decoded message.
3038        ///
3039        /// Default: `4MB`
3040        #[must_use]
3041        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
3042            self.inner = self.inner.max_decoding_message_size(limit);
3043            self
3044        }
3045        /// Limits the maximum size of an encoded message.
3046        ///
3047        /// Default: `usize::MAX`
3048        #[must_use]
3049        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
3050            self.inner = self.inner.max_encoding_message_size(limit);
3051            self
3052        }
3053        /// Convert text to a sequence of tokens.
3054        pub async fn tokenize_text(
3055            &mut self,
3056            request: impl tonic::IntoRequest<super::TokenizeTextRequest>,
3057        ) -> std::result::Result<
3058            tonic::Response<super::TokenizeTextResponse>,
3059            tonic::Status,
3060        > {
3061            self.inner
3062                .ready()
3063                .await
3064                .map_err(|e| {
3065                    tonic::Status::unknown(
3066                        format!("Service was not ready: {}", e.into()),
3067                    )
3068                })?;
3069            let codec = tonic_prost::ProstCodec::default();
3070            let path = http::uri::PathAndQuery::from_static(
3071                "/xai_api.Tokenize/TokenizeText",
3072            );
3073            let mut req = request.into_request();
3074            req.extensions_mut()
3075                .insert(GrpcMethod::new("xai_api.Tokenize", "TokenizeText"));
3076            self.inner.unary(req, path, codec).await
3077        }
3078    }
3079}
3080/// API key information.
3081#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
3082pub struct ApiKey {
3083    /// A redacted API key. The full API key will not be displayed after it has
3084    /// been created.
3085    #[prost(string, tag = "1")]
3086    pub redacted_api_key: ::prost::alloc::string::String,
3087    /// ID of the user who created this API key.
3088    #[prost(string, tag = "3")]
3089    pub user_id: ::prost::alloc::string::String,
3090    /// Human-readable name for the API key.
3091    #[prost(string, tag = "4")]
3092    pub name: ::prost::alloc::string::String,
3093    /// Unix timestamp when the API key was created.
3094    #[prost(message, optional, tag = "5")]
3095    pub create_time: ::core::option::Option<::prost_types::Timestamp>,
3096    /// Unix timestamp when the API key was last modified.
3097    #[prost(message, optional, tag = "9")]
3098    pub modify_time: ::core::option::Option<::prost_types::Timestamp>,
3099    /// ID of the last user who modified the API key
3100    #[prost(string, tag = "11")]
3101    pub modified_by: ::prost::alloc::string::String,
3102    /// ID of the team this API key belongs to.
3103    #[prost(string, tag = "6")]
3104    pub team_id: ::prost::alloc::string::String,
3105    /// Access Control Lists (ACLs) associated with this key.
3106    /// These indicate the resources that the API key has access to.
3107    #[prost(string, repeated, tag = "7")]
3108    pub acls: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
3109    /// The ID of the API key. This is different from the API key itself.
3110    #[prost(string, tag = "8")]
3111    pub api_key_id: ::prost::alloc::string::String,
3112    /// Whether the API key is currently blocked from making API requests.
3113    #[prost(bool, tag = "10")]
3114    pub api_key_blocked: bool,
3115    /// Whether the team is currently blocked from making API requests.
3116    #[prost(bool, tag = "13")]
3117    pub team_blocked: bool,
3118    /// Whether the API key is currently disabled.
3119    #[prost(bool, tag = "12")]
3120    pub disabled: bool,
3121}
3122/// Generated client implementations.
3123pub mod auth_client {
3124    #![allow(
3125        unused_variables,
3126        dead_code,
3127        missing_docs,
3128        clippy::wildcard_imports,
3129        clippy::let_unit_value,
3130    )]
3131    use tonic::codegen::*;
3132    use tonic::codegen::http::Uri;
3133    /// An API service to check status of an API key.
3134    #[derive(Debug, Clone)]
3135    pub struct AuthClient<T> {
3136        inner: tonic::client::Grpc<T>,
3137    }
3138    impl AuthClient<tonic::transport::Channel> {
3139        /// Attempt to create a new client by connecting to a given endpoint.
3140        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
3141        where
3142            D: TryInto<tonic::transport::Endpoint>,
3143            D::Error: Into<StdError>,
3144        {
3145            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
3146            Ok(Self::new(conn))
3147        }
3148    }
3149    impl<T> AuthClient<T>
3150    where
3151        T: tonic::client::GrpcService<tonic::body::Body>,
3152        T::Error: Into<StdError>,
3153        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
3154        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
3155    {
3156        pub fn new(inner: T) -> Self {
3157            let inner = tonic::client::Grpc::new(inner);
3158            Self { inner }
3159        }
3160        pub fn with_origin(inner: T, origin: Uri) -> Self {
3161            let inner = tonic::client::Grpc::with_origin(inner, origin);
3162            Self { inner }
3163        }
3164        pub fn with_interceptor<F>(
3165            inner: T,
3166            interceptor: F,
3167        ) -> AuthClient<InterceptedService<T, F>>
3168        where
3169            F: tonic::service::Interceptor,
3170            T::ResponseBody: Default,
3171            T: tonic::codegen::Service<
3172                http::Request<tonic::body::Body>,
3173                Response = http::Response<
3174                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
3175                >,
3176            >,
3177            <T as tonic::codegen::Service<
3178                http::Request<tonic::body::Body>,
3179            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
3180        {
3181            AuthClient::new(InterceptedService::new(inner, interceptor))
3182        }
3183        /// Compress requests with the given encoding.
3184        ///
3185        /// This requires the server to support it otherwise it might respond with an
3186        /// error.
3187        #[must_use]
3188        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
3189            self.inner = self.inner.send_compressed(encoding);
3190            self
3191        }
3192        /// Enable decompressing responses.
3193        #[must_use]
3194        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
3195            self.inner = self.inner.accept_compressed(encoding);
3196            self
3197        }
3198        /// Limits the maximum size of a decoded message.
3199        ///
3200        /// Default: `4MB`
3201        #[must_use]
3202        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
3203            self.inner = self.inner.max_decoding_message_size(limit);
3204            self
3205        }
3206        /// Limits the maximum size of an encoded message.
3207        ///
3208        /// Default: `usize::MAX`
3209        #[must_use]
3210        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
3211            self.inner = self.inner.max_encoding_message_size(limit);
3212            self
3213        }
3214        /// Returns some information about an API key.
3215        pub async fn get_api_key_info(
3216            &mut self,
3217            request: impl tonic::IntoRequest<()>,
3218        ) -> std::result::Result<tonic::Response<super::ApiKey>, tonic::Status> {
3219            self.inner
3220                .ready()
3221                .await
3222                .map_err(|e| {
3223                    tonic::Status::unknown(
3224                        format!("Service was not ready: {}", e.into()),
3225                    )
3226                })?;
3227            let codec = tonic_prost::ProstCodec::default();
3228            let path = http::uri::PathAndQuery::from_static(
3229                "/xai_api.Auth/get_api_key_info",
3230            );
3231            let mut req = request.into_request();
3232            req.extensions_mut()
3233                .insert(GrpcMethod::new("xai_api.Auth", "get_api_key_info"));
3234            self.inner.unary(req, path, codec).await
3235        }
3236    }
3237}
3238#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
3239pub struct SearchRequest {
3240    /// The query to search for which will be embedded using the
3241    /// same embedding model as the one used for the source to query.
3242    #[prost(string, tag = "1")]
3243    pub query: ::prost::alloc::string::String,
3244    /// The source to query.
3245    #[prost(message, optional, tag = "2")]
3246    pub source: ::core::option::Option<DocumentsSource>,
3247    /// The number of chunks to return.
3248    /// Will always return the top matching chunks.
3249    /// Optional, defaults to 10
3250    #[prost(int32, optional, tag = "3")]
3251    pub limit: ::core::option::Option<i32>,
3252    /// The ranking metric to use for the search. Defaults to RANK_METRIC_L2_DISTANCE.
3253    #[prost(enumeration = "RankingMetric", optional, tag = "4")]
3254    pub ranking_metric: ::core::option::Option<i32>,
3255    /// User-defined instructions to be included in the search query. Defaults to generic search instructions.
3256    #[prost(string, optional, tag = "5")]
3257    pub instructions: ::core::option::Option<::prost::alloc::string::String>,
3258}
3259/// SearchResponse message contains the results of a document search operation.
3260/// It returns a collection of matching document chunks sorted by relevance score.
3261#[derive(Clone, PartialEq, ::prost::Message)]
3262pub struct SearchResponse {
3263    /// Collection of document chunks that match the search query, ordered by relevance score
3264    /// from highest to lowest.
3265    #[prost(message, repeated, tag = "1")]
3266    pub matches: ::prost::alloc::vec::Vec<SearchMatch>,
3267}
3268/// SearchMatch message represents a single document chunk that matches the search query.
3269/// It contains the document ID, chunk ID, content text, and a relevance score indicating
3270/// how well it matches the query.
3271#[derive(Clone, PartialEq, ::prost::Message)]
3272pub struct SearchMatch {
3273    /// Unique identifier of the document that contains the matching chunk.
3274    #[prost(string, tag = "1")]
3275    pub file_id: ::prost::alloc::string::String,
3276    /// Unique identifier of the specific chunk within the document that matched the search query.
3277    #[prost(string, tag = "2")]
3278    pub chunk_id: ::prost::alloc::string::String,
3279    /// The actual text content of the matching chunk that can be presented to the user.
3280    #[prost(string, tag = "3")]
3281    pub chunk_content: ::prost::alloc::string::String,
3282    /// Score is the score of the chunk, which is determined by the ranking metric.
3283    /// For L2 distance, lower scores indicate better matches. Range is \[0, inf).
3284    /// For cosine similarity, higher scores indicate better matches. Range is \[0, 1\].
3285    #[prost(float, tag = "4")]
3286    pub score: f32,
3287    /// The ID(s) of the collection(s) to which this document belongs.
3288    #[prost(string, repeated, tag = "5")]
3289    pub collection_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
3290}
3291/// Configuration for a documents sources in search requests.
3292///
3293/// This message configures a source for search content within documents or collections of documents.
3294/// Those documents must be uploaded through the management API or directly on the xAI console:
3295/// <https://console.x.ai.>
3296#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)]
3297pub struct DocumentsSource {
3298    /// IDs of collections to use.
3299    #[prost(string, repeated, tag = "1")]
3300    pub collection_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
3301}
3302/// RankingMetric is the metric to use for the search.
3303#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
3304#[repr(i32)]
3305pub enum RankingMetric {
3306    Unknown = 0,
3307    L2Distance = 1,
3308    CosineSimilarity = 2,
3309}
3310impl RankingMetric {
3311    /// String value of the enum field names used in the ProtoBuf definition.
3312    ///
3313    /// The values are not transformed in any way and thus are considered stable
3314    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
3315    pub fn as_str_name(&self) -> &'static str {
3316        match self {
3317            Self::Unknown => "RANKING_METRIC_UNKNOWN",
3318            Self::L2Distance => "RANKING_METRIC_L2_DISTANCE",
3319            Self::CosineSimilarity => "RANKING_METRIC_COSINE_SIMILARITY",
3320        }
3321    }
3322    /// Creates an enum from field names used in the ProtoBuf definition.
3323    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
3324        match value {
3325            "RANKING_METRIC_UNKNOWN" => Some(Self::Unknown),
3326            "RANKING_METRIC_L2_DISTANCE" => Some(Self::L2Distance),
3327            "RANKING_METRIC_COSINE_SIMILARITY" => Some(Self::CosineSimilarity),
3328            _ => None,
3329        }
3330    }
3331}
3332/// Generated client implementations.
3333pub mod documents_client {
3334    #![allow(
3335        unused_variables,
3336        dead_code,
3337        missing_docs,
3338        clippy::wildcard_imports,
3339        clippy::let_unit_value,
3340    )]
3341    use tonic::codegen::*;
3342    use tonic::codegen::http::Uri;
3343    #[derive(Debug, Clone)]
3344    pub struct DocumentsClient<T> {
3345        inner: tonic::client::Grpc<T>,
3346    }
3347    impl DocumentsClient<tonic::transport::Channel> {
3348        /// Attempt to create a new client by connecting to a given endpoint.
3349        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
3350        where
3351            D: TryInto<tonic::transport::Endpoint>,
3352            D::Error: Into<StdError>,
3353        {
3354            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
3355            Ok(Self::new(conn))
3356        }
3357    }
3358    impl<T> DocumentsClient<T>
3359    where
3360        T: tonic::client::GrpcService<tonic::body::Body>,
3361        T::Error: Into<StdError>,
3362        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
3363        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
3364    {
3365        pub fn new(inner: T) -> Self {
3366            let inner = tonic::client::Grpc::new(inner);
3367            Self { inner }
3368        }
3369        pub fn with_origin(inner: T, origin: Uri) -> Self {
3370            let inner = tonic::client::Grpc::with_origin(inner, origin);
3371            Self { inner }
3372        }
3373        pub fn with_interceptor<F>(
3374            inner: T,
3375            interceptor: F,
3376        ) -> DocumentsClient<InterceptedService<T, F>>
3377        where
3378            F: tonic::service::Interceptor,
3379            T::ResponseBody: Default,
3380            T: tonic::codegen::Service<
3381                http::Request<tonic::body::Body>,
3382                Response = http::Response<
3383                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
3384                >,
3385            >,
3386            <T as tonic::codegen::Service<
3387                http::Request<tonic::body::Body>,
3388            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
3389        {
3390            DocumentsClient::new(InterceptedService::new(inner, interceptor))
3391        }
3392        /// Compress requests with the given encoding.
3393        ///
3394        /// This requires the server to support it otherwise it might respond with an
3395        /// error.
3396        #[must_use]
3397        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
3398            self.inner = self.inner.send_compressed(encoding);
3399            self
3400        }
3401        /// Enable decompressing responses.
3402        #[must_use]
3403        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
3404            self.inner = self.inner.accept_compressed(encoding);
3405            self
3406        }
3407        /// Limits the maximum size of a decoded message.
3408        ///
3409        /// Default: `4MB`
3410        #[must_use]
3411        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
3412            self.inner = self.inner.max_decoding_message_size(limit);
3413            self
3414        }
3415        /// Limits the maximum size of an encoded message.
3416        ///
3417        /// Default: `usize::MAX`
3418        #[must_use]
3419        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
3420            self.inner = self.inner.max_encoding_message_size(limit);
3421            self
3422        }
3423        pub async fn search(
3424            &mut self,
3425            request: impl tonic::IntoRequest<super::SearchRequest>,
3426        ) -> std::result::Result<tonic::Response<super::SearchResponse>, tonic::Status> {
3427            self.inner
3428                .ready()
3429                .await
3430                .map_err(|e| {
3431                    tonic::Status::unknown(
3432                        format!("Service was not ready: {}", e.into()),
3433                    )
3434                })?;
3435            let codec = tonic_prost::ProstCodec::default();
3436            let path = http::uri::PathAndQuery::from_static("/xai_api.Documents/Search");
3437            let mut req = request.into_request();
3438            req.extensions_mut().insert(GrpcMethod::new("xai_api.Documents", "Search"));
3439            self.inner.unary(req, path, codec).await
3440        }
3441    }
3442}