gemini_rust/
models.rs

1use serde::{Deserialize, Serialize};
2
3/// Role of a message in a conversation
4#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
5#[serde(rename_all = "lowercase")]
6pub enum Role {
7    /// Message from the user
8    User,
9    /// Message from the model
10    Model,
11}
12
13/// Content part that can be included in a message
14#[derive(Debug, Clone, Serialize, Deserialize)]
15#[serde(untagged)]
16pub enum Part {
17    /// Text content
18    Text {
19        /// The text content
20        text: String,
21        /// Whether this is a thought summary (Gemini 2.5 series only)
22        #[serde(skip_serializing_if = "Option::is_none")]
23        thought: Option<bool>,
24    },
25    InlineData {
26        /// The blob data
27        #[serde(rename = "inlineData")]
28        inline_data: Blob,
29    },
30    /// Function call from the model
31    FunctionCall {
32        /// The function call details
33        #[serde(rename = "functionCall")]
34        function_call: super::tools::FunctionCall,
35    },
36    /// Function response (results from executing a function call)
37    FunctionResponse {
38        /// The function response details
39        #[serde(rename = "functionResponse")]
40        function_response: super::tools::FunctionResponse,
41    },
42}
43
44/// Blob for a message part
45#[derive(Debug, Clone, Serialize, Deserialize)]
46#[serde(rename_all = "camelCase")]
47pub struct Blob {
48    pub mime_type: String,
49    pub data: String, // Base64 encoded data
50}
51
52impl Blob {
53    /// Create a new blob with mime type and data
54    pub fn new(mime_type: impl Into<String>, data: impl Into<String>) -> Self {
55        Self {
56            mime_type: mime_type.into(),
57            data: data.into(),
58        }
59    }
60}
61
62/// Content of a message
63#[derive(Debug, Default, Clone, Serialize, Deserialize)]
64#[serde(rename_all = "camelCase")]
65pub struct Content {
66    /// Parts of the content
67    #[serde(skip_serializing_if = "Option::is_none")]
68    pub parts: Option<Vec<Part>>,
69    /// Role of the content
70    #[serde(skip_serializing_if = "Option::is_none")]
71    pub role: Option<Role>,
72}
73
74impl Content {
75    /// Create a new text content
76    pub fn text(text: impl Into<String>) -> Self {
77        Self {
78            parts: Some(vec![Part::Text {
79                text: text.into(),
80                thought: None,
81            }]),
82            role: None,
83        }
84    }
85
86    /// Create a new content with a function call
87    pub fn function_call(function_call: super::tools::FunctionCall) -> Self {
88        Self {
89            parts: Some(vec![Part::FunctionCall { function_call }]),
90            role: None,
91        }
92    }
93
94    /// Create a new content with a function response
95    pub fn function_response(function_response: super::tools::FunctionResponse) -> Self {
96        Self {
97            parts: Some(vec![Part::FunctionResponse { function_response }]),
98            role: None,
99        }
100    }
101
102    /// Create a new content with a function response from name and JSON value
103    pub fn function_response_json(name: impl Into<String>, response: serde_json::Value) -> Self {
104        Self {
105            parts: Some(vec![Part::FunctionResponse {
106                function_response: super::tools::FunctionResponse::new(name, response),
107            }]),
108            role: None,
109        }
110    }
111
112    /// Create a new content with inline data (blob data)
113    pub fn inline_data(mime_type: impl Into<String>, data: impl Into<String>) -> Self {
114        Self {
115            parts: Some(vec![Part::InlineData {
116                inline_data: Blob::new(mime_type, data),
117            }]),
118            role: None,
119        }
120    }
121
122    /// Add a role to this content
123    pub fn with_role(mut self, role: Role) -> Self {
124        self.role = Some(role);
125        self
126    }
127}
128
129/// Message in a conversation
130#[derive(Debug, Clone, Serialize, Deserialize)]
131pub struct Message {
132    /// Content of the message
133    pub content: Content,
134    /// Role of the message
135    pub role: Role,
136}
137
138impl Message {
139    /// Create a new user message with text content
140    pub fn user(text: impl Into<String>) -> Self {
141        Self {
142            content: Content::text(text).with_role(Role::User),
143            role: Role::User,
144        }
145    }
146
147    /// Create a new model message with text content
148    pub fn model(text: impl Into<String>) -> Self {
149        Self {
150            content: Content::text(text).with_role(Role::Model),
151            role: Role::Model,
152        }
153    }
154
155    pub fn embed(text: impl Into<String>) -> Self {
156        Self {
157            content: Content::text(text),
158            role: Role::Model,
159        }
160    }
161
162    /// Create a new function message with function response content from JSON
163    pub fn function(name: impl Into<String>, response: serde_json::Value) -> Self {
164        Self {
165            content: Content::function_response_json(name, response).with_role(Role::Model),
166            role: Role::Model,
167        }
168    }
169
170    /// Create a new function message with function response from a JSON string
171    pub fn function_str(
172        name: impl Into<String>,
173        response: impl Into<String>,
174    ) -> Result<Self, serde_json::Error> {
175        let response_str = response.into();
176        let json = serde_json::from_str(&response_str)?;
177        Ok(Self {
178            content: Content::function_response_json(name, json).with_role(Role::Model),
179            role: Role::Model,
180        })
181    }
182}
183
184/// Safety rating for content
185#[derive(Debug, Clone, Serialize, Deserialize)]
186pub struct SafetyRating {
187    /// The category of the safety rating
188    pub category: String,
189    /// The probability that the content is harmful
190    pub probability: String,
191}
192
193/// Citation metadata for content
194#[derive(Debug, Clone, Serialize, Deserialize)]
195#[serde(rename_all = "camelCase")]
196pub struct CitationMetadata {
197    /// The citation sources
198    pub citation_sources: Vec<CitationSource>,
199}
200
201/// Citation source
202#[derive(Debug, Clone, Serialize, Deserialize)]
203#[serde(rename_all = "camelCase")]
204pub struct CitationSource {
205    /// The URI of the citation source
206    pub uri: Option<String>,
207    /// The title of the citation source
208    pub title: Option<String>,
209    /// The start index of the citation in the response
210    pub start_index: Option<i32>,
211    /// The end index of the citation in the response
212    pub end_index: Option<i32>,
213    /// The license of the citation source
214    pub license: Option<String>,
215    /// The publication date of the citation source
216    pub publication_date: Option<String>,
217}
218
219/// A candidate response
220#[derive(Debug, Clone, Serialize, Deserialize)]
221#[serde(rename_all = "camelCase")]
222pub struct Candidate {
223    /// The content of the candidate
224    pub content: Content,
225    /// The safety ratings for the candidate
226    #[serde(skip_serializing_if = "Option::is_none")]
227    pub safety_ratings: Option<Vec<SafetyRating>>,
228    /// The citation metadata for the candidate
229    #[serde(skip_serializing_if = "Option::is_none")]
230    pub citation_metadata: Option<CitationMetadata>,
231    /// The finish reason for the candidate
232    #[serde(skip_serializing_if = "Option::is_none")]
233    pub finish_reason: Option<String>,
234    /// The index of the candidate
235    #[serde(skip_serializing_if = "Option::is_none")]
236    pub index: Option<i32>,
237}
238
239/// Metadata about token usage
240#[derive(Debug, Clone, Serialize, Deserialize)]
241#[serde(rename_all = "camelCase")]
242pub struct UsageMetadata {
243    /// The number of prompt tokens
244    pub prompt_token_count: i32,
245    /// The number of response tokens
246    #[serde(skip_serializing_if = "Option::is_none")]
247    pub candidates_token_count: Option<i32>,
248    /// The total number of tokens
249    pub total_token_count: i32,
250    /// The number of thinking tokens (Gemini 2.5 series only)
251    #[serde(skip_serializing_if = "Option::is_none")]
252    pub thoughts_token_count: Option<i32>,
253    /// Detailed prompt token information
254    #[serde(skip_serializing_if = "Option::is_none")]
255    pub prompt_tokens_details: Option<Vec<PromptTokenDetails>>,
256}
257
258/// Details about prompt tokens by modality
259#[derive(Debug, Clone, Serialize, Deserialize)]
260#[serde(rename_all = "camelCase")]
261pub struct PromptTokenDetails {
262    /// The modality (e.g., "TEXT")
263    pub modality: String,
264    /// Token count for this modality
265    pub token_count: i32,
266}
267
268/// Response from the Gemini API for content generation
269#[derive(Debug, Clone, Serialize, Deserialize)]
270#[serde(rename_all = "camelCase")]
271pub struct GenerationResponse {
272    /// The candidates generated
273    pub candidates: Vec<Candidate>,
274    /// The prompt feedback
275    #[serde(skip_serializing_if = "Option::is_none")]
276    pub prompt_feedback: Option<PromptFeedback>,
277    /// Usage metadata
278    #[serde(skip_serializing_if = "Option::is_none")]
279    pub usage_metadata: Option<UsageMetadata>,
280    /// Model version used
281    #[serde(skip_serializing_if = "Option::is_none")]
282    pub model_version: Option<String>,
283    /// Response ID
284    #[serde(skip_serializing_if = "Option::is_none")]
285    pub response_id: Option<String>,
286}
287
288/// Content of the embedding
289#[derive(Debug, Clone, Serialize, Deserialize)]
290pub struct ContentEmbedding {
291    /// The values generated
292    pub values: Vec<f32>, //Maybe Quantize this
293}
294
295/// Response from the Gemini API for content embedding
296#[derive(Debug, Clone, Serialize, Deserialize)]
297pub struct ContentEmbeddingResponse {
298    /// The embeddings generated
299    pub embedding: ContentEmbedding,
300}
301
302/// Response from the Gemini API for batch content embedding
303#[derive(Debug, Clone, Serialize, Deserialize)]
304pub struct BatchContentEmbeddingResponse {
305    /// The embeddings generated
306    pub embeddings: Vec<ContentEmbedding>,
307}
308
309/// Feedback about the prompt
310#[derive(Debug, Clone, Serialize, Deserialize)]
311#[serde(rename_all = "camelCase")]
312pub struct PromptFeedback {
313    /// The safety ratings for the prompt
314    pub safety_ratings: Vec<SafetyRating>,
315    /// The block reason if the prompt was blocked
316    #[serde(skip_serializing_if = "Option::is_none")]
317    pub block_reason: Option<String>,
318}
319
320impl GenerationResponse {
321    /// Get the text of the first candidate
322    pub fn text(&self) -> String {
323        self.candidates
324            .first()
325            .and_then(|c| {
326                c.content.parts.as_ref().and_then(|parts| {
327                    parts.first().and_then(|p| match p {
328                        Part::Text { text, thought: _ } => Some(text.clone()),
329                        _ => None,
330                    })
331                })
332            })
333            .unwrap_or_default()
334    }
335
336    /// Get function calls from the response
337    pub fn function_calls(&self) -> Vec<&super::tools::FunctionCall> {
338        self.candidates
339            .iter()
340            .flat_map(|c| {
341                c.content.parts.as_ref().map(|parts| {
342                    parts.iter().filter_map(|p| match p {
343                        Part::FunctionCall { function_call } => Some(function_call),
344                        _ => None,
345                    }).collect::<Vec<_>>()
346                }).unwrap_or_default()
347            })
348            .collect()
349    }
350
351    /// Get thought summaries from the response
352    pub fn thoughts(&self) -> Vec<String> {
353        self.candidates
354            .iter()
355            .flat_map(|c| {
356                c.content.parts.as_ref().map(|parts| {
357                    parts.iter().filter_map(|p| match p {
358                        Part::Text {
359                            text,
360                            thought: Some(true),
361                        } => Some(text.clone()),
362                        _ => None,
363                    }).collect::<Vec<_>>()
364                }).unwrap_or_default()
365            })
366            .collect()
367    }
368
369    /// Get all text parts (both regular text and thoughts)
370    pub fn all_text(&self) -> Vec<(String, bool)> {
371        self.candidates
372            .iter()
373            .flat_map(|c| {
374                c.content.parts.as_ref().map(|parts| {
375                    parts.iter().filter_map(|p| match p {
376                        Part::Text { text, thought } => Some((text.clone(), thought.unwrap_or(false))),
377                        _ => None,
378                    }).collect::<Vec<_>>()
379                }).unwrap_or_default()
380            })
381            .collect()
382    }
383}
384
385/// Request to generate content
386#[derive(Debug, Clone, Serialize, Deserialize)]
387#[serde(rename_all = "camelCase")]
388pub struct GenerateContentRequest {
389    /// The contents to generate content from
390    pub contents: Vec<Content>,
391    /// The generation config
392    #[serde(skip_serializing_if = "Option::is_none")]
393    pub generation_config: Option<GenerationConfig>,
394    /// The safety settings
395    #[serde(skip_serializing_if = "Option::is_none")]
396    pub safety_settings: Option<Vec<SafetySetting>>,
397    /// The tools that the model can use
398    #[serde(skip_serializing_if = "Option::is_none")]
399    pub tools: Option<Vec<super::tools::Tool>>,
400    /// The tool config
401    #[serde(skip_serializing_if = "Option::is_none")]
402    pub tool_config: Option<ToolConfig>,
403    /// The system instruction
404    #[serde(skip_serializing_if = "Option::is_none")]
405    pub system_instruction: Option<Content>,
406}
407
408/// Request to embed words
409#[derive(Debug, Clone, Serialize, Deserialize)]
410pub struct EmbedContentRequest {
411    /// The specified embedding model
412    pub model: String,
413    /// The chunks content to generate embeddings
414    pub content: Content,
415    /// The embedding task type (optional)
416    #[serde(skip_serializing_if = "Option::is_none")]
417    pub task_type: Option<TaskType>,
418    /// The title of the document (optional)
419    #[serde(skip_serializing_if = "Option::is_none")]
420    pub title: Option<String>,
421    /// The output_dimensionality (optional)
422    #[serde(skip_serializing_if = "Option::is_none")]
423    pub output_dimensionality: Option<i32>,
424}
425
426/// Request to batch embed requests
427#[derive(Debug, Clone, Serialize, Deserialize)]
428pub struct BatchEmbedContentsRequest {
429    /// The list of embed requests
430    pub requests: Vec<EmbedContentRequest>,
431}
432
433/// Request for batch content generation (corrected format)
434#[derive(Debug, Clone, Serialize, Deserialize)]
435#[serde(rename_all = "camelCase")]
436pub struct BatchGenerateContentRequest {
437    /// The batch configuration
438    pub batch: BatchConfig,
439}
440
441/// Batch configuration
442#[derive(Debug, Clone, Serialize, Deserialize)]
443#[serde(rename_all = "camelCase")]
444pub struct BatchConfig {
445    /// Display name for the batch
446    pub display_name: String,
447    /// Input configuration
448    pub input_config: InputConfig,
449}
450
451/// Input configuration for batch requests
452#[derive(Debug, Clone, Serialize, Deserialize)]
453#[serde(rename_all = "camelCase")]
454pub struct InputConfig {
455    /// The requests container
456    pub requests: RequestsContainer,
457}
458
459/// Container for requests
460#[derive(Debug, Clone, Serialize, Deserialize)]
461#[serde(rename_all = "camelCase")]
462pub struct RequestsContainer {
463    /// List of requests
464    pub requests: Vec<BatchRequestItem>,
465}
466
467/// Individual batch request item
468#[derive(Debug, Clone, Serialize, Deserialize)]
469#[serde(rename_all = "camelCase")]
470pub struct BatchRequestItem {
471    /// The actual request
472    pub request: GenerateContentRequest,
473    /// Metadata for the request
474    pub metadata: Option<RequestMetadata>,
475}
476
477/// Metadata for batch request
478#[derive(Debug, Clone, Serialize, Deserialize)]
479#[serde(rename_all = "camelCase")]
480pub struct RequestMetadata {
481    /// Key for the request
482    pub key: String,
483}
484
485/// Response from the Gemini API for batch content generation (async batch creation)
486#[derive(Debug, Clone, Serialize, Deserialize)]
487#[serde(rename_all = "camelCase")]
488pub struct BatchGenerateContentResponse {
489    /// The name/ID of the created batch
490    pub name: String,
491    /// Metadata about the batch
492    pub metadata: BatchMetadata,
493}
494
495/// Metadata for the batch operation
496#[derive(Debug, Clone, Serialize, Deserialize)]
497#[serde(rename_all = "camelCase")]
498pub struct BatchMetadata {
499    /// Type annotation
500    #[serde(rename = "@type")]
501    pub type_annotation: String,
502    /// Model used for the batch
503    pub model: String,
504    /// Display name of the batch
505    pub display_name: String,
506    /// Creation time
507    pub create_time: String,
508    /// Update time
509    pub update_time: String,
510    /// Batch statistics
511    pub batch_stats: BatchStats,
512    /// Current state of the batch
513    pub state: String,
514    /// Name of the batch (duplicate)
515    pub name: String,
516}
517
518/// Statistics for the batch
519#[derive(Debug, Clone, Serialize, Deserialize)]
520#[serde(rename_all = "camelCase")]
521pub struct BatchStats {
522    /// Total number of requests in the batch
523    pub request_count: String,
524    /// Number of pending requests
525    pub pending_request_count: Option<String>,
526    /// Number of completed requests
527    pub completed_request_count: Option<String>,
528    /// Number of failed requests
529    pub failed_request_count: Option<String>,
530}
531
532/// Configuration for thinking (Gemini 2.5 series only)
533#[derive(Debug, Clone, Serialize, Deserialize)]
534#[serde(rename_all = "camelCase")]
535pub struct ThinkingConfig {
536    /// The thinking budget (number of thinking tokens)
537    ///
538    /// - Set to 0 to disable thinking
539    /// - Set to -1 for dynamic thinking (model decides)
540    /// - Set to a positive number for a specific token budget
541    ///
542    /// Model-specific ranges:
543    /// - 2.5 Pro: 128 to 32768 (cannot disable thinking)
544    /// - 2.5 Flash: 0 to 24576
545    /// - 2.5 Flash Lite: 512 to 24576
546    #[serde(skip_serializing_if = "Option::is_none")]
547    pub thinking_budget: Option<i32>,
548
549    /// Whether to include thought summaries in the response
550    ///
551    /// When enabled, the response will include synthesized versions of the model's
552    /// raw thoughts, providing insights into the reasoning process.
553    #[serde(skip_serializing_if = "Option::is_none")]
554    pub include_thoughts: Option<bool>,
555}
556
557impl ThinkingConfig {
558    /// Create a new thinking config with default settings
559    pub fn new() -> Self {
560        Self {
561            thinking_budget: None,
562            include_thoughts: None,
563        }
564    }
565
566    /// Set the thinking budget
567    pub fn with_thinking_budget(mut self, budget: i32) -> Self {
568        self.thinking_budget = Some(budget);
569        self
570    }
571
572    /// Enable dynamic thinking (model decides the budget)
573    pub fn with_dynamic_thinking(mut self) -> Self {
574        self.thinking_budget = Some(-1);
575        self
576    }
577
578    /// Include thought summaries in the response
579    pub fn with_thoughts_included(mut self, include: bool) -> Self {
580        self.include_thoughts = Some(include);
581        self
582    }
583}
584
585impl Default for ThinkingConfig {
586    fn default() -> Self {
587        Self::new()
588    }
589}
590
591/// Configuration for generation
592#[derive(Debug, Clone, Serialize, Deserialize)]
593#[serde(rename_all = "camelCase")]
594pub struct GenerationConfig {
595    /// The temperature for the model (0.0 to 1.0)
596    ///
597    /// Controls the randomness of the output. Higher values (e.g., 0.9) make output
598    /// more random, lower values (e.g., 0.1) make output more deterministic.
599    #[serde(skip_serializing_if = "Option::is_none")]
600    pub temperature: Option<f32>,
601
602    /// The top-p value for the model (0.0 to 1.0)
603    ///
604    /// For each token generation step, the model considers the top_p percentage of
605    /// probability mass for potential token choices. Lower values are more selective,
606    /// higher values allow more variety.
607    #[serde(skip_serializing_if = "Option::is_none")]
608    pub top_p: Option<f32>,
609
610    /// The top-k value for the model
611    ///
612    /// For each token generation step, the model considers the top_k most likely tokens.
613    /// Lower values are more selective, higher values allow more variety.
614    #[serde(skip_serializing_if = "Option::is_none")]
615    pub top_k: Option<i32>,
616
617    /// The maximum number of tokens to generate
618    ///
619    /// Limits the length of the generated content. One token is roughly 4 characters.
620    #[serde(skip_serializing_if = "Option::is_none")]
621    pub max_output_tokens: Option<i32>,
622
623    /// The candidate count
624    ///
625    /// Number of alternative responses to generate.
626    #[serde(skip_serializing_if = "Option::is_none")]
627    pub candidate_count: Option<i32>,
628
629    /// Whether to stop on specific sequences
630    ///
631    /// The model will stop generating content when it encounters any of these sequences.
632    #[serde(skip_serializing_if = "Option::is_none")]
633    pub stop_sequences: Option<Vec<String>>,
634
635    /// The response mime type
636    ///
637    /// Specifies the format of the model's response.
638    #[serde(skip_serializing_if = "Option::is_none")]
639    pub response_mime_type: Option<String>,
640
641    /// The response schema
642    ///
643    /// Specifies the JSON schema for structured responses.
644    #[serde(skip_serializing_if = "Option::is_none")]
645    pub response_schema: Option<serde_json::Value>,
646
647    /// The thinking configuration
648    ///
649    /// Configuration for the model's thinking process (Gemini 2.5 series only).
650    #[serde(skip_serializing_if = "Option::is_none")]
651    pub thinking_config: Option<ThinkingConfig>,
652}
653
654impl Default for GenerationConfig {
655    fn default() -> Self {
656        Self {
657            temperature: Some(0.7),
658            top_p: Some(0.95),
659            top_k: Some(40),
660            max_output_tokens: Some(1024),
661            candidate_count: Some(1),
662            stop_sequences: None,
663            response_mime_type: None,
664            response_schema: None,
665            thinking_config: None,
666        }
667    }
668}
669
670/// Configuration for tools
671#[derive(Debug, Clone, Serialize, Deserialize)]
672pub struct ToolConfig {
673    /// The function calling config
674    #[serde(skip_serializing_if = "Option::is_none")]
675    pub function_calling_config: Option<FunctionCallingConfig>,
676}
677
678/// Configuration for function calling
679#[derive(Debug, Clone, Serialize, Deserialize)]
680pub struct FunctionCallingConfig {
681    /// The mode for function calling
682    pub mode: FunctionCallingMode,
683}
684
685/// Mode for function calling
686#[derive(Debug, Clone, Serialize, Deserialize)]
687#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
688pub enum FunctionCallingMode {
689    /// The model may use function calling
690    Auto,
691    /// The model must use function calling
692    Any,
693    /// The model must not use function calling
694    None,
695}
696
697/// Setting for safety
698#[derive(Debug, Clone, Serialize, Deserialize)]
699pub struct SafetySetting {
700    /// The category of content to filter
701    pub category: HarmCategory,
702    /// The threshold for filtering
703    pub threshold: HarmBlockThreshold,
704}
705
706/// Category of harmful content
707#[derive(Debug, Clone, Serialize, Deserialize)]
708#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
709pub enum HarmCategory {
710    /// Dangerous content
711    Dangerous,
712    /// Harassment content
713    Harassment,
714    /// Hate speech
715    HateSpeech,
716    /// Sexually explicit content
717    SexuallyExplicit,
718}
719
720/// Threshold for blocking harmful content
721#[allow(clippy::enum_variant_names)]
722#[derive(Debug, Clone, Serialize, Deserialize)]
723#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
724pub enum HarmBlockThreshold {
725    /// Block content with low probability of harm
726    BlockLowAndAbove,
727    /// Block content with medium probability of harm
728    BlockMediumAndAbove,
729    /// Block content with high probability of harm
730    BlockHighAndAbove,
731    /// Block content with maximum probability of harm
732    BlockOnlyHigh,
733    /// Never block content
734    BlockNone,
735}
736
737/// Embedding Task types
738#[derive(Debug, Clone, Serialize, Deserialize)]
739#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
740pub enum TaskType {
741    ///Used to generate embeddings that are optimized to assess text similarity
742    SemanticSimilarity,
743    ///Used to generate embeddings that are optimized to classify texts according to preset labels
744    Classification,
745    ///Used to generate embeddings that are optimized to cluster texts based on their similarities
746    Clustering,
747
748    ///Used to generate embeddings that are optimized for document search or information retrieval.
749    RetrievalDocument,
750    RetrievalQuery,
751    QuestionAnswering,
752    FactVerification,
753
754    /// Used to retrieve a code block based on a natural language query, such as sort an array or reverse a linked list.
755    /// Embeddings of the code blocks are computed using RETRIEVAL_DOCUMENT.
756    CodeRetrievalQuery,
757}