google_cloud_ai/google/
google.ai.generativelanguage.v1.rs

1// This file is @generated by prost-build.
2/// A collection of source attributions for a piece of content.
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct CitationMetadata {
5    /// Citations to sources for a specific response.
6    #[prost(message, repeated, tag = "1")]
7    pub citation_sources: ::prost::alloc::vec::Vec<CitationSource>,
8}
9/// A citation to a source for a portion of a specific response.
10#[derive(Clone, PartialEq, ::prost::Message)]
11pub struct CitationSource {
12    /// Optional. Start of segment of the response that is attributed to this
13    /// source.
14    ///
15    /// Index indicates the start of the segment, measured in bytes.
16    #[prost(int32, optional, tag = "1")]
17    pub start_index: ::core::option::Option<i32>,
18    /// Optional. End of the attributed segment, exclusive.
19    #[prost(int32, optional, tag = "2")]
20    pub end_index: ::core::option::Option<i32>,
21    /// Optional. URI that is attributed as a source for a portion of the text.
22    #[prost(string, optional, tag = "3")]
23    pub uri: ::core::option::Option<::prost::alloc::string::String>,
24    /// Optional. License for the GitHub project that is attributed as a source for
25    /// segment.
26    ///
27    /// License info is required for code citations.
28    #[prost(string, optional, tag = "4")]
29    pub license: ::core::option::Option<::prost::alloc::string::String>,
30}
31/// The base structured datatype containing multi-part content of a message.
32///
33/// A `Content` includes a `role` field designating the producer of the `Content`
34/// and a `parts` field containing multi-part data that contains the content of
35/// the message turn.
36#[derive(Clone, PartialEq, ::prost::Message)]
37pub struct Content {
38    /// Ordered `Parts` that constitute a single message. Parts may have different
39    /// MIME types.
40    #[prost(message, repeated, tag = "1")]
41    pub parts: ::prost::alloc::vec::Vec<Part>,
42    /// Optional. The producer of the content. Must be either 'user' or 'model'.
43    ///
44    /// Useful to set for multi-turn conversations, otherwise can be left blank
45    /// or unset.
46    #[prost(string, tag = "2")]
47    pub role: ::prost::alloc::string::String,
48}
49/// A datatype containing media that is part of a multi-part `Content` message.
50///
51/// A `Part` consists of data which has an associated datatype. A `Part` can only
52/// contain one of the accepted types in `Part.data`.
53///
54/// A `Part` must have a fixed IANA MIME type identifying the type and subtype
55/// of the media if the `inline_data` field is filled with raw bytes.
56#[derive(Clone, PartialEq, ::prost::Message)]
57pub struct Part {
58    #[prost(oneof = "part::Data", tags = "2, 3")]
59    pub data: ::core::option::Option<part::Data>,
60}
61/// Nested message and enum types in `Part`.
62pub mod part {
63    #[derive(Clone, PartialEq, ::prost::Oneof)]
64    pub enum Data {
65        /// Inline text.
66        #[prost(string, tag = "2")]
67        Text(::prost::alloc::string::String),
68        /// Inline media bytes.
69        #[prost(message, tag = "3")]
70        InlineData(super::Blob),
71    }
72}
73/// Raw media bytes.
74///
75/// Text should not be sent as raw bytes, use the 'text' field.
76#[derive(Clone, PartialEq, ::prost::Message)]
77pub struct Blob {
78    /// The IANA standard MIME type of the source data.
79    /// Examples:
80    ///    - image/png
81    ///    - image/jpeg
82    /// If an unsupported MIME type is provided, an error will be returned. For a
83    /// complete list of supported types, see [Supported file
84    /// formats](<https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats>).
85    #[prost(string, tag = "1")]
86    pub mime_type: ::prost::alloc::string::String,
87    /// Raw bytes for media formats.
88    #[prost(bytes = "vec", tag = "2")]
89    pub data: ::prost::alloc::vec::Vec<u8>,
90}
91/// Safety rating for a piece of content.
92///
93/// The safety rating contains the category of harm and the
94/// harm probability level in that category for a piece of content.
95/// Content is classified for safety across a number of
96/// harm categories and the probability of the harm classification is included
97/// here.
98#[derive(Clone, Copy, PartialEq, ::prost::Message)]
99pub struct SafetyRating {
100    /// Required. The category for this rating.
101    #[prost(enumeration = "HarmCategory", tag = "3")]
102    pub category: i32,
103    /// Required. The probability of harm for this content.
104    #[prost(enumeration = "safety_rating::HarmProbability", tag = "4")]
105    pub probability: i32,
106    /// Was this content blocked because of this rating?
107    #[prost(bool, tag = "5")]
108    pub blocked: bool,
109}
110/// Nested message and enum types in `SafetyRating`.
111pub mod safety_rating {
112    /// The probability that a piece of content is harmful.
113    ///
114    /// The classification system gives the probability of the content being
115    /// unsafe. This does not indicate the severity of harm for a piece of content.
116    #[derive(
117        Clone,
118        Copy,
119        Debug,
120        PartialEq,
121        Eq,
122        Hash,
123        PartialOrd,
124        Ord,
125        ::prost::Enumeration
126    )]
127    #[repr(i32)]
128    pub enum HarmProbability {
129        /// Probability is unspecified.
130        Unspecified = 0,
131        /// Content has a negligible chance of being unsafe.
132        Negligible = 1,
133        /// Content has a low chance of being unsafe.
134        Low = 2,
135        /// Content has a medium chance of being unsafe.
136        Medium = 3,
137        /// Content has a high chance of being unsafe.
138        High = 4,
139    }
140    impl HarmProbability {
141        /// String value of the enum field names used in the ProtoBuf definition.
142        ///
143        /// The values are not transformed in any way and thus are considered stable
144        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
145        pub fn as_str_name(&self) -> &'static str {
146            match self {
147                Self::Unspecified => "HARM_PROBABILITY_UNSPECIFIED",
148                Self::Negligible => "NEGLIGIBLE",
149                Self::Low => "LOW",
150                Self::Medium => "MEDIUM",
151                Self::High => "HIGH",
152            }
153        }
154        /// Creates an enum from field names used in the ProtoBuf definition.
155        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
156            match value {
157                "HARM_PROBABILITY_UNSPECIFIED" => Some(Self::Unspecified),
158                "NEGLIGIBLE" => Some(Self::Negligible),
159                "LOW" => Some(Self::Low),
160                "MEDIUM" => Some(Self::Medium),
161                "HIGH" => Some(Self::High),
162                _ => None,
163            }
164        }
165    }
166}
167/// Safety setting, affecting the safety-blocking behavior.
168///
169/// Passing a safety setting for a category changes the allowed probability that
170/// content is blocked.
171#[derive(Clone, Copy, PartialEq, ::prost::Message)]
172pub struct SafetySetting {
173    /// Required. The category for this setting.
174    #[prost(enumeration = "HarmCategory", tag = "3")]
175    pub category: i32,
176    /// Required. Controls the probability threshold at which harm is blocked.
177    #[prost(enumeration = "safety_setting::HarmBlockThreshold", tag = "4")]
178    pub threshold: i32,
179}
180/// Nested message and enum types in `SafetySetting`.
181pub mod safety_setting {
182    /// Block at and beyond a specified harm probability.
183    #[derive(
184        Clone,
185        Copy,
186        Debug,
187        PartialEq,
188        Eq,
189        Hash,
190        PartialOrd,
191        Ord,
192        ::prost::Enumeration
193    )]
194    #[repr(i32)]
195    pub enum HarmBlockThreshold {
196        /// Threshold is unspecified.
197        Unspecified = 0,
198        /// Content with NEGLIGIBLE will be allowed.
199        BlockLowAndAbove = 1,
200        /// Content with NEGLIGIBLE and LOW will be allowed.
201        BlockMediumAndAbove = 2,
202        /// Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
203        BlockOnlyHigh = 3,
204        /// All content will be allowed.
205        BlockNone = 4,
206        /// Turn off the safety filter.
207        Off = 5,
208    }
209    impl HarmBlockThreshold {
210        /// String value of the enum field names used in the ProtoBuf definition.
211        ///
212        /// The values are not transformed in any way and thus are considered stable
213        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
214        pub fn as_str_name(&self) -> &'static str {
215            match self {
216                Self::Unspecified => "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
217                Self::BlockLowAndAbove => "BLOCK_LOW_AND_ABOVE",
218                Self::BlockMediumAndAbove => "BLOCK_MEDIUM_AND_ABOVE",
219                Self::BlockOnlyHigh => "BLOCK_ONLY_HIGH",
220                Self::BlockNone => "BLOCK_NONE",
221                Self::Off => "OFF",
222            }
223        }
224        /// Creates an enum from field names used in the ProtoBuf definition.
225        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
226            match value {
227                "HARM_BLOCK_THRESHOLD_UNSPECIFIED" => Some(Self::Unspecified),
228                "BLOCK_LOW_AND_ABOVE" => Some(Self::BlockLowAndAbove),
229                "BLOCK_MEDIUM_AND_ABOVE" => Some(Self::BlockMediumAndAbove),
230                "BLOCK_ONLY_HIGH" => Some(Self::BlockOnlyHigh),
231                "BLOCK_NONE" => Some(Self::BlockNone),
232                "OFF" => Some(Self::Off),
233                _ => None,
234            }
235        }
236    }
237}
238/// The category of a rating.
239///
240/// These categories cover various kinds of harms that developers
241/// may wish to adjust.
242#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
243#[repr(i32)]
244pub enum HarmCategory {
245    /// Category is unspecified.
246    Unspecified = 0,
247    /// **PaLM** - Negative or harmful comments targeting identity and/or protected
248    /// attribute.
249    Derogatory = 1,
250    /// **PaLM** - Content that is rude, disrespectful, or profane.
251    Toxicity = 2,
252    /// **PaLM** - Describes scenarios depicting violence against an individual or
253    /// group, or general descriptions of gore.
254    Violence = 3,
255    /// **PaLM** - Contains references to sexual acts or other lewd content.
256    Sexual = 4,
257    /// **PaLM** - Promotes unchecked medical advice.
258    Medical = 5,
259    /// **PaLM** - Dangerous content that promotes, facilitates, or encourages
260    /// harmful acts.
261    Dangerous = 6,
262    /// **Gemini** - Harassment content.
263    Harassment = 7,
264    /// **Gemini** - Hate speech and content.
265    HateSpeech = 8,
266    /// **Gemini** - Sexually explicit content.
267    SexuallyExplicit = 9,
268    /// **Gemini** - Dangerous content.
269    DangerousContent = 10,
270    /// **Gemini** - Content that may be used to harm civic integrity.
271    CivicIntegrity = 11,
272}
273impl HarmCategory {
274    /// String value of the enum field names used in the ProtoBuf definition.
275    ///
276    /// The values are not transformed in any way and thus are considered stable
277    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
278    pub fn as_str_name(&self) -> &'static str {
279        match self {
280            Self::Unspecified => "HARM_CATEGORY_UNSPECIFIED",
281            Self::Derogatory => "HARM_CATEGORY_DEROGATORY",
282            Self::Toxicity => "HARM_CATEGORY_TOXICITY",
283            Self::Violence => "HARM_CATEGORY_VIOLENCE",
284            Self::Sexual => "HARM_CATEGORY_SEXUAL",
285            Self::Medical => "HARM_CATEGORY_MEDICAL",
286            Self::Dangerous => "HARM_CATEGORY_DANGEROUS",
287            Self::Harassment => "HARM_CATEGORY_HARASSMENT",
288            Self::HateSpeech => "HARM_CATEGORY_HATE_SPEECH",
289            Self::SexuallyExplicit => "HARM_CATEGORY_SEXUALLY_EXPLICIT",
290            Self::DangerousContent => "HARM_CATEGORY_DANGEROUS_CONTENT",
291            Self::CivicIntegrity => "HARM_CATEGORY_CIVIC_INTEGRITY",
292        }
293    }
294    /// Creates an enum from field names used in the ProtoBuf definition.
295    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
296        match value {
297            "HARM_CATEGORY_UNSPECIFIED" => Some(Self::Unspecified),
298            "HARM_CATEGORY_DEROGATORY" => Some(Self::Derogatory),
299            "HARM_CATEGORY_TOXICITY" => Some(Self::Toxicity),
300            "HARM_CATEGORY_VIOLENCE" => Some(Self::Violence),
301            "HARM_CATEGORY_SEXUAL" => Some(Self::Sexual),
302            "HARM_CATEGORY_MEDICAL" => Some(Self::Medical),
303            "HARM_CATEGORY_DANGEROUS" => Some(Self::Dangerous),
304            "HARM_CATEGORY_HARASSMENT" => Some(Self::Harassment),
305            "HARM_CATEGORY_HATE_SPEECH" => Some(Self::HateSpeech),
306            "HARM_CATEGORY_SEXUALLY_EXPLICIT" => Some(Self::SexuallyExplicit),
307            "HARM_CATEGORY_DANGEROUS_CONTENT" => Some(Self::DangerousContent),
308            "HARM_CATEGORY_CIVIC_INTEGRITY" => Some(Self::CivicIntegrity),
309            _ => None,
310        }
311    }
312}
313/// Request to generate a completion from the model.
314#[derive(Clone, PartialEq, ::prost::Message)]
315pub struct GenerateContentRequest {
316    /// Required. The name of the `Model` to use for generating the completion.
317    ///
318    /// Format: `name=models/{model}`.
319    #[prost(string, tag = "1")]
320    pub model: ::prost::alloc::string::String,
321    /// Required. The content of the current conversation with the model.
322    ///
323    /// For single-turn queries, this is a single instance. For multi-turn queries
324    /// like [chat](<https://ai.google.dev/gemini-api/docs/text-generation#chat>),
325    /// this is a repeated field that contains the conversation history and the
326    /// latest request.
327    #[prost(message, repeated, tag = "2")]
328    pub contents: ::prost::alloc::vec::Vec<Content>,
329    /// Optional. A list of unique `SafetySetting` instances for blocking unsafe
330    /// content.
331    ///
332    /// This will be enforced on the `GenerateContentRequest.contents` and
333    /// `GenerateContentResponse.candidates`. There should not be more than one
334    /// setting for each `SafetyCategory` type. The API will block any contents and
335    /// responses that fail to meet the thresholds set by these settings. This list
336    /// overrides the default settings for each `SafetyCategory` specified in the
337    /// safety_settings. If there is no `SafetySetting` for a given
338    /// `SafetyCategory` provided in the list, the API will use the default safety
339    /// setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH,
340    /// HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT,
341    /// HARM_CATEGORY_HARASSMENT are supported. Refer to the
342    /// [guide](<https://ai.google.dev/gemini-api/docs/safety-settings>)
343    /// for detailed information on available safety settings. Also refer to the
344    /// [Safety guidance](<https://ai.google.dev/gemini-api/docs/safety-guidance>) to
345    /// learn how to incorporate safety considerations in your AI applications.
346    #[prost(message, repeated, tag = "3")]
347    pub safety_settings: ::prost::alloc::vec::Vec<SafetySetting>,
348    /// Optional. Configuration options for model generation and outputs.
349    #[prost(message, optional, tag = "4")]
350    pub generation_config: ::core::option::Option<GenerationConfig>,
351}
352/// Configuration options for model generation and outputs. Not all parameters
353/// are configurable for every model.
354#[derive(Clone, PartialEq, ::prost::Message)]
355pub struct GenerationConfig {
356    /// Optional. Number of generated responses to return.
357    ///
358    /// Currently, this value can only be set to 1. If unset, this will default
359    /// to 1.
360    #[prost(int32, optional, tag = "1")]
361    pub candidate_count: ::core::option::Option<i32>,
362    /// Optional. The set of character sequences (up to 5) that will stop output
363    /// generation. If specified, the API will stop at the first appearance of a
364    /// `stop_sequence`. The stop sequence will not be included as part of the
365    /// response.
366    #[prost(string, repeated, tag = "2")]
367    pub stop_sequences: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
368    /// Optional. The maximum number of tokens to include in a response candidate.
369    ///
370    /// Note: The default value varies by model, see the `Model.output_token_limit`
371    /// attribute of the `Model` returned from the `getModel` function.
372    #[prost(int32, optional, tag = "4")]
373    pub max_output_tokens: ::core::option::Option<i32>,
374    /// Optional. Controls the randomness of the output.
375    ///
376    /// Note: The default value varies by model, see the `Model.temperature`
377    /// attribute of the `Model` returned from the `getModel` function.
378    ///
379    /// Values can range from \[0.0, 2.0\].
380    #[prost(float, optional, tag = "5")]
381    pub temperature: ::core::option::Option<f32>,
382    /// Optional. The maximum cumulative probability of tokens to consider when
383    /// sampling.
384    ///
385    /// The model uses combined Top-k and Top-p (nucleus) sampling.
386    ///
387    /// Tokens are sorted based on their assigned probabilities so that only the
388    /// most likely tokens are considered. Top-k sampling directly limits the
389    /// maximum number of tokens to consider, while Nucleus sampling limits the
390    /// number of tokens based on the cumulative probability.
391    ///
392    /// Note: The default value varies by `Model` and is specified by
393    /// the`Model.top_p` attribute returned from the `getModel` function. An empty
394    /// `top_k` attribute indicates that the model doesn't apply top-k sampling
395    /// and doesn't allow setting `top_k` on requests.
396    #[prost(float, optional, tag = "6")]
397    pub top_p: ::core::option::Option<f32>,
398    /// Optional. The maximum number of tokens to consider when sampling.
399    ///
400    /// Gemini models use Top-p (nucleus) sampling or a combination of Top-k and
401    /// nucleus sampling. Top-k sampling considers the set of `top_k` most probable
402    /// tokens. Models running with nucleus sampling don't allow top_k setting.
403    ///
404    /// Note: The default value varies by `Model` and is specified by
405    /// the`Model.top_p` attribute returned from the `getModel` function. An empty
406    /// `top_k` attribute indicates that the model doesn't apply top-k sampling
407    /// and doesn't allow setting `top_k` on requests.
408    #[prost(int32, optional, tag = "7")]
409    pub top_k: ::core::option::Option<i32>,
410    /// Optional. Presence penalty applied to the next token's logprobs if the
411    /// token has already been seen in the response.
412    ///
413    /// This penalty is binary on/off and not dependant on the number of times the
414    /// token is used (after the first). Use
415    /// [frequency_penalty][google.ai.generativelanguage.v1.GenerationConfig.frequency_penalty]
416    /// for a penalty that increases with each use.
417    ///
418    /// A positive penalty will discourage the use of tokens that have already
419    /// been used in the response, increasing the vocabulary.
420    ///
421    /// A negative penalty will encourage the use of tokens that have already been
422    /// used in the response, decreasing the vocabulary.
423    #[prost(float, optional, tag = "15")]
424    pub presence_penalty: ::core::option::Option<f32>,
425    /// Optional. Frequency penalty applied to the next token's logprobs,
426    /// multiplied by the number of times each token has been seen in the respponse
427    /// so far.
428    ///
429    /// A positive penalty will discourage the use of tokens that have already
430    /// been used, proportional to the number of times the token has been used:
431    /// The more a token is used, the more dificult it is for the model to use
432    /// that token again increasing the vocabulary of responses.
433    ///
434    /// Caution: A _negative_ penalty will encourage the model to reuse tokens
435    /// proportional to the number of times the token has been used. Small
436    /// negative values will reduce the vocabulary of a response. Larger negative
437    /// values will cause the model to start repeating a common token  until it
438    /// hits the
439    /// [max_output_tokens][google.ai.generativelanguage.v1.GenerationConfig.max_output_tokens]
440    /// limit: "...the the the the the...".
441    #[prost(float, optional, tag = "16")]
442    pub frequency_penalty: ::core::option::Option<f32>,
443    /// Optional. If true, export the logprobs results in response.
444    #[prost(bool, optional, tag = "17")]
445    pub response_logprobs: ::core::option::Option<bool>,
446    /// Optional. Only valid if
447    /// [response_logprobs=True][google.ai.generativelanguage.v1.GenerationConfig.response_logprobs].
448    /// This sets the number of top logprobs to return at each decoding step in the
449    /// [Candidate.logprobs_result][google.ai.generativelanguage.v1.Candidate.logprobs_result].
450    #[prost(int32, optional, tag = "18")]
451    pub logprobs: ::core::option::Option<i32>,
452}
453/// Response from the model supporting multiple candidate responses.
454///
455/// Safety ratings and content filtering are reported for both
456/// prompt in `GenerateContentResponse.prompt_feedback` and for each candidate
457/// in `finish_reason` and in `safety_ratings`. The API:
458///   - Returns either all requested candidates or none of them
459///   - Returns no candidates at all only if there was something wrong with the
460///     prompt (check `prompt_feedback`)
461///   - Reports feedback on each candidate in `finish_reason` and
462///     `safety_ratings`.
463#[derive(Clone, PartialEq, ::prost::Message)]
464pub struct GenerateContentResponse {
465    /// Candidate responses from the model.
466    #[prost(message, repeated, tag = "1")]
467    pub candidates: ::prost::alloc::vec::Vec<Candidate>,
468    /// Returns the prompt's feedback related to the content filters.
469    #[prost(message, optional, tag = "2")]
470    pub prompt_feedback: ::core::option::Option<
471        generate_content_response::PromptFeedback,
472    >,
473    /// Output only. Metadata on the generation requests' token usage.
474    #[prost(message, optional, tag = "3")]
475    pub usage_metadata: ::core::option::Option<generate_content_response::UsageMetadata>,
476}
477/// Nested message and enum types in `GenerateContentResponse`.
478pub mod generate_content_response {
479    /// A set of the feedback metadata the prompt specified in
480    /// `GenerateContentRequest.content`.
481    #[derive(Clone, PartialEq, ::prost::Message)]
482    pub struct PromptFeedback {
483        /// Optional. If set, the prompt was blocked and no candidates are returned.
484        /// Rephrase the prompt.
485        #[prost(enumeration = "prompt_feedback::BlockReason", tag = "1")]
486        pub block_reason: i32,
487        /// Ratings for safety of the prompt.
488        /// There is at most one rating per category.
489        #[prost(message, repeated, tag = "2")]
490        pub safety_ratings: ::prost::alloc::vec::Vec<super::SafetyRating>,
491    }
492    /// Nested message and enum types in `PromptFeedback`.
493    pub mod prompt_feedback {
494        /// Specifies the reason why the prompt was blocked.
495        #[derive(
496            Clone,
497            Copy,
498            Debug,
499            PartialEq,
500            Eq,
501            Hash,
502            PartialOrd,
503            Ord,
504            ::prost::Enumeration
505        )]
506        #[repr(i32)]
507        pub enum BlockReason {
508            /// Default value. This value is unused.
509            Unspecified = 0,
510            /// Prompt was blocked due to safety reasons. Inspect `safety_ratings`
511            /// to understand which safety category blocked it.
512            Safety = 1,
513            /// Prompt was blocked due to unknown reasons.
514            Other = 2,
515            /// Prompt was blocked due to the terms which are included from the
516            /// terminology blocklist.
517            Blocklist = 3,
518            /// Prompt was blocked due to prohibited content.
519            ProhibitedContent = 4,
520        }
521        impl BlockReason {
522            /// String value of the enum field names used in the ProtoBuf definition.
523            ///
524            /// The values are not transformed in any way and thus are considered stable
525            /// (if the ProtoBuf definition does not change) and safe for programmatic use.
526            pub fn as_str_name(&self) -> &'static str {
527                match self {
528                    Self::Unspecified => "BLOCK_REASON_UNSPECIFIED",
529                    Self::Safety => "SAFETY",
530                    Self::Other => "OTHER",
531                    Self::Blocklist => "BLOCKLIST",
532                    Self::ProhibitedContent => "PROHIBITED_CONTENT",
533                }
534            }
535            /// Creates an enum from field names used in the ProtoBuf definition.
536            pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
537                match value {
538                    "BLOCK_REASON_UNSPECIFIED" => Some(Self::Unspecified),
539                    "SAFETY" => Some(Self::Safety),
540                    "OTHER" => Some(Self::Other),
541                    "BLOCKLIST" => Some(Self::Blocklist),
542                    "PROHIBITED_CONTENT" => Some(Self::ProhibitedContent),
543                    _ => None,
544                }
545            }
546        }
547    }
548    /// Metadata on the generation request's token usage.
549    #[derive(Clone, Copy, PartialEq, ::prost::Message)]
550    pub struct UsageMetadata {
551        /// Number of tokens in the prompt. When `cached_content` is set, this is
552        /// still the total effective prompt size meaning this includes the number of
553        /// tokens in the cached content.
554        #[prost(int32, tag = "1")]
555        pub prompt_token_count: i32,
556        /// Total number of tokens across all the generated response candidates.
557        #[prost(int32, tag = "2")]
558        pub candidates_token_count: i32,
559        /// Total token count for the generation request (prompt + response
560        /// candidates).
561        #[prost(int32, tag = "3")]
562        pub total_token_count: i32,
563    }
564}
565/// A response candidate generated from the model.
566#[derive(Clone, PartialEq, ::prost::Message)]
567pub struct Candidate {
568    /// Output only. Index of the candidate in the list of response candidates.
569    #[prost(int32, optional, tag = "3")]
570    pub index: ::core::option::Option<i32>,
571    /// Output only. Generated content returned from the model.
572    #[prost(message, optional, tag = "1")]
573    pub content: ::core::option::Option<Content>,
574    /// Optional. Output only. The reason why the model stopped generating tokens.
575    ///
576    /// If empty, the model has not stopped generating tokens.
577    #[prost(enumeration = "candidate::FinishReason", tag = "2")]
578    pub finish_reason: i32,
579    /// List of ratings for the safety of a response candidate.
580    ///
581    /// There is at most one rating per category.
582    #[prost(message, repeated, tag = "5")]
583    pub safety_ratings: ::prost::alloc::vec::Vec<SafetyRating>,
584    /// Output only. Citation information for model-generated candidate.
585    ///
586    /// This field may be populated with recitation information for any text
587    /// included in the `content`. These are passages that are "recited" from
588    /// copyrighted material in the foundational LLM's training data.
589    #[prost(message, optional, tag = "6")]
590    pub citation_metadata: ::core::option::Option<CitationMetadata>,
591    /// Output only. Token count for this candidate.
592    #[prost(int32, tag = "7")]
593    pub token_count: i32,
594    /// Output only.
595    #[prost(double, tag = "10")]
596    pub avg_logprobs: f64,
597    /// Output only. Log-likelihood scores for the response tokens and top tokens
598    #[prost(message, optional, tag = "11")]
599    pub logprobs_result: ::core::option::Option<LogprobsResult>,
600}
601/// Nested message and enum types in `Candidate`.
602pub mod candidate {
603    /// Defines the reason why the model stopped generating tokens.
604    #[derive(
605        Clone,
606        Copy,
607        Debug,
608        PartialEq,
609        Eq,
610        Hash,
611        PartialOrd,
612        Ord,
613        ::prost::Enumeration
614    )]
615    #[repr(i32)]
616    pub enum FinishReason {
617        /// Default value. This value is unused.
618        Unspecified = 0,
619        /// Natural stop point of the model or provided stop sequence.
620        Stop = 1,
621        /// The maximum number of tokens as specified in the request was reached.
622        MaxTokens = 2,
623        /// The response candidate content was flagged for safety reasons.
624        Safety = 3,
625        /// The response candidate content was flagged for recitation reasons.
626        Recitation = 4,
627        /// The response candidate content was flagged for using an unsupported
628        /// language.
629        Language = 6,
630        /// Unknown reason.
631        Other = 5,
632        /// Token generation stopped because the content contains forbidden terms.
633        Blocklist = 7,
634        /// Token generation stopped for potentially containing prohibited content.
635        ProhibitedContent = 8,
636        /// Token generation stopped because the content potentially contains
637        /// Sensitive Personally Identifiable Information (SPII).
638        Spii = 9,
639        /// The function call generated by the model is invalid.
640        MalformedFunctionCall = 10,
641    }
642    impl FinishReason {
643        /// String value of the enum field names used in the ProtoBuf definition.
644        ///
645        /// The values are not transformed in any way and thus are considered stable
646        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
647        pub fn as_str_name(&self) -> &'static str {
648            match self {
649                Self::Unspecified => "FINISH_REASON_UNSPECIFIED",
650                Self::Stop => "STOP",
651                Self::MaxTokens => "MAX_TOKENS",
652                Self::Safety => "SAFETY",
653                Self::Recitation => "RECITATION",
654                Self::Language => "LANGUAGE",
655                Self::Other => "OTHER",
656                Self::Blocklist => "BLOCKLIST",
657                Self::ProhibitedContent => "PROHIBITED_CONTENT",
658                Self::Spii => "SPII",
659                Self::MalformedFunctionCall => "MALFORMED_FUNCTION_CALL",
660            }
661        }
662        /// Creates an enum from field names used in the ProtoBuf definition.
663        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
664            match value {
665                "FINISH_REASON_UNSPECIFIED" => Some(Self::Unspecified),
666                "STOP" => Some(Self::Stop),
667                "MAX_TOKENS" => Some(Self::MaxTokens),
668                "SAFETY" => Some(Self::Safety),
669                "RECITATION" => Some(Self::Recitation),
670                "LANGUAGE" => Some(Self::Language),
671                "OTHER" => Some(Self::Other),
672                "BLOCKLIST" => Some(Self::Blocklist),
673                "PROHIBITED_CONTENT" => Some(Self::ProhibitedContent),
674                "SPII" => Some(Self::Spii),
675                "MALFORMED_FUNCTION_CALL" => Some(Self::MalformedFunctionCall),
676                _ => None,
677            }
678        }
679    }
680}
681/// Logprobs Result
682#[derive(Clone, PartialEq, ::prost::Message)]
683pub struct LogprobsResult {
684    /// Length = total number of decoding steps.
685    #[prost(message, repeated, tag = "1")]
686    pub top_candidates: ::prost::alloc::vec::Vec<logprobs_result::TopCandidates>,
687    /// Length = total number of decoding steps.
688    /// The chosen candidates may or may not be in top_candidates.
689    #[prost(message, repeated, tag = "2")]
690    pub chosen_candidates: ::prost::alloc::vec::Vec<logprobs_result::Candidate>,
691}
692/// Nested message and enum types in `LogprobsResult`.
693pub mod logprobs_result {
694    /// Candidate for the logprobs token and score.
695    #[derive(Clone, PartialEq, ::prost::Message)]
696    pub struct Candidate {
697        /// The candidate’s token string value.
698        #[prost(string, optional, tag = "1")]
699        pub token: ::core::option::Option<::prost::alloc::string::String>,
700        /// The candidate’s token id value.
701        #[prost(int32, optional, tag = "3")]
702        pub token_id: ::core::option::Option<i32>,
703        /// The candidate's log probability.
704        #[prost(float, optional, tag = "2")]
705        pub log_probability: ::core::option::Option<f32>,
706    }
707    /// Candidates with top log probabilities at each decoding step.
708    #[derive(Clone, PartialEq, ::prost::Message)]
709    pub struct TopCandidates {
710        /// Sorted by log probability in descending order.
711        #[prost(message, repeated, tag = "1")]
712        pub candidates: ::prost::alloc::vec::Vec<Candidate>,
713    }
714}
715/// Request containing the `Content` for the model to embed.
716#[derive(Clone, PartialEq, ::prost::Message)]
717pub struct EmbedContentRequest {
718    /// Required. The model's resource name. This serves as an ID for the Model to
719    /// use.
720    ///
721    /// This name should match a model name returned by the `ListModels` method.
722    ///
723    /// Format: `models/{model}`
724    #[prost(string, tag = "1")]
725    pub model: ::prost::alloc::string::String,
726    /// Required. The content to embed. Only the `parts.text` fields will be
727    /// counted.
728    #[prost(message, optional, tag = "2")]
729    pub content: ::core::option::Option<Content>,
730    /// Optional. Optional task type for which the embeddings will be used. Can
731    /// only be set for `models/embedding-001`.
732    #[prost(enumeration = "TaskType", optional, tag = "3")]
733    pub task_type: ::core::option::Option<i32>,
734    /// Optional. An optional title for the text. Only applicable when TaskType is
735    /// `RETRIEVAL_DOCUMENT`.
736    ///
737    /// Note: Specifying a `title` for `RETRIEVAL_DOCUMENT` provides better quality
738    /// embeddings for retrieval.
739    #[prost(string, optional, tag = "4")]
740    pub title: ::core::option::Option<::prost::alloc::string::String>,
741    /// Optional. Optional reduced dimension for the output embedding. If set,
742    /// excessive values in the output embedding are truncated from the end.
743    /// Supported by newer models since 2024 only. You cannot set this value if
744    /// using the earlier model (`models/embedding-001`).
745    #[prost(int32, optional, tag = "5")]
746    pub output_dimensionality: ::core::option::Option<i32>,
747}
748/// A list of floats representing an embedding.
749#[derive(Clone, PartialEq, ::prost::Message)]
750pub struct ContentEmbedding {
751    /// The embedding values.
752    #[prost(float, repeated, tag = "1")]
753    pub values: ::prost::alloc::vec::Vec<f32>,
754}
755/// The response to an `EmbedContentRequest`.
756#[derive(Clone, PartialEq, ::prost::Message)]
757pub struct EmbedContentResponse {
758    /// Output only. The embedding generated from the input content.
759    #[prost(message, optional, tag = "1")]
760    pub embedding: ::core::option::Option<ContentEmbedding>,
761}
762/// Batch request to get embeddings from the model for a list of prompts.
763#[derive(Clone, PartialEq, ::prost::Message)]
764pub struct BatchEmbedContentsRequest {
765    /// Required. The model's resource name. This serves as an ID for the Model to
766    /// use.
767    ///
768    /// This name should match a model name returned by the `ListModels` method.
769    ///
770    /// Format: `models/{model}`
771    #[prost(string, tag = "1")]
772    pub model: ::prost::alloc::string::String,
773    /// Required. Embed requests for the batch. The model in each of these requests
774    /// must match the model specified `BatchEmbedContentsRequest.model`.
775    #[prost(message, repeated, tag = "2")]
776    pub requests: ::prost::alloc::vec::Vec<EmbedContentRequest>,
777}
778/// The response to a `BatchEmbedContentsRequest`.
779#[derive(Clone, PartialEq, ::prost::Message)]
780pub struct BatchEmbedContentsResponse {
781    /// Output only. The embeddings for each request, in the same order as provided
782    /// in the batch request.
783    #[prost(message, repeated, tag = "1")]
784    pub embeddings: ::prost::alloc::vec::Vec<ContentEmbedding>,
785}
786/// Counts the number of tokens in the `prompt` sent to a model.
787///
788/// Models may tokenize text differently, so each model may return a different
789/// `token_count`.
790#[derive(Clone, PartialEq, ::prost::Message)]
791pub struct CountTokensRequest {
792    /// Required. The model's resource name. This serves as an ID for the Model to
793    /// use.
794    ///
795    /// This name should match a model name returned by the `ListModels` method.
796    ///
797    /// Format: `models/{model}`
798    #[prost(string, tag = "1")]
799    pub model: ::prost::alloc::string::String,
800    /// Optional. The input given to the model as a prompt. This field is ignored
801    /// when `generate_content_request` is set.
802    #[prost(message, repeated, tag = "2")]
803    pub contents: ::prost::alloc::vec::Vec<Content>,
804    /// Optional. The overall input given to the `Model`. This includes the prompt
805    /// as well as other model steering information like [system
806    /// instructions](<https://ai.google.dev/gemini-api/docs/system-instructions>),
807    /// and/or function declarations for [function
808    /// calling](<https://ai.google.dev/gemini-api/docs/function-calling>).
809    /// `Model`s/`Content`s and `generate_content_request`s are mutually
810    /// exclusive. You can either send `Model` + `Content`s or a
811    /// `generate_content_request`, but never both.
812    #[prost(message, optional, tag = "3")]
813    pub generate_content_request: ::core::option::Option<GenerateContentRequest>,
814}
815/// A response from `CountTokens`.
816///
817/// It returns the model's `token_count` for the `prompt`.
818#[derive(Clone, Copy, PartialEq, ::prost::Message)]
819pub struct CountTokensResponse {
820    /// The number of tokens that the `Model` tokenizes the `prompt` into. Always
821    /// non-negative.
822    #[prost(int32, tag = "1")]
823    pub total_tokens: i32,
824}
825/// Type of task for which the embedding will be used.
826#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
827#[repr(i32)]
828pub enum TaskType {
829    /// Unset value, which will default to one of the other enum values.
830    Unspecified = 0,
831    /// Specifies the given text is a query in a search/retrieval setting.
832    RetrievalQuery = 1,
833    /// Specifies the given text is a document from the corpus being searched.
834    RetrievalDocument = 2,
835    /// Specifies the given text will be used for STS.
836    SemanticSimilarity = 3,
837    /// Specifies that the given text will be classified.
838    Classification = 4,
839    /// Specifies that the embeddings will be used for clustering.
840    Clustering = 5,
841    /// Specifies that the given text will be used for question answering.
842    QuestionAnswering = 6,
843    /// Specifies that the given text will be used for fact verification.
844    FactVerification = 7,
845}
846impl TaskType {
847    /// String value of the enum field names used in the ProtoBuf definition.
848    ///
849    /// The values are not transformed in any way and thus are considered stable
850    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
851    pub fn as_str_name(&self) -> &'static str {
852        match self {
853            Self::Unspecified => "TASK_TYPE_UNSPECIFIED",
854            Self::RetrievalQuery => "RETRIEVAL_QUERY",
855            Self::RetrievalDocument => "RETRIEVAL_DOCUMENT",
856            Self::SemanticSimilarity => "SEMANTIC_SIMILARITY",
857            Self::Classification => "CLASSIFICATION",
858            Self::Clustering => "CLUSTERING",
859            Self::QuestionAnswering => "QUESTION_ANSWERING",
860            Self::FactVerification => "FACT_VERIFICATION",
861        }
862    }
863    /// Creates an enum from field names used in the ProtoBuf definition.
864    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
865        match value {
866            "TASK_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
867            "RETRIEVAL_QUERY" => Some(Self::RetrievalQuery),
868            "RETRIEVAL_DOCUMENT" => Some(Self::RetrievalDocument),
869            "SEMANTIC_SIMILARITY" => Some(Self::SemanticSimilarity),
870            "CLASSIFICATION" => Some(Self::Classification),
871            "CLUSTERING" => Some(Self::Clustering),
872            "QUESTION_ANSWERING" => Some(Self::QuestionAnswering),
873            "FACT_VERIFICATION" => Some(Self::FactVerification),
874            _ => None,
875        }
876    }
877}
878/// Generated client implementations.
879pub mod generative_service_client {
880    #![allow(
881        unused_variables,
882        dead_code,
883        missing_docs,
884        clippy::wildcard_imports,
885        clippy::let_unit_value,
886    )]
887    use tonic::codegen::*;
888    use tonic::codegen::http::Uri;
889    /// API for using Large Models that generate multimodal content and have
890    /// additional capabilities beyond text generation.
891    #[derive(Debug, Clone)]
892    pub struct GenerativeServiceClient<T> {
893        inner: tonic::client::Grpc<T>,
894    }
895    impl GenerativeServiceClient<tonic::transport::Channel> {
896        /// Attempt to create a new client by connecting to a given endpoint.
897        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
898        where
899            D: TryInto<tonic::transport::Endpoint>,
900            D::Error: Into<StdError>,
901        {
902            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
903            Ok(Self::new(conn))
904        }
905    }
906    impl<T> GenerativeServiceClient<T>
907    where
908        T: tonic::client::GrpcService<tonic::body::BoxBody>,
909        T::Error: Into<StdError>,
910        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
911        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
912    {
913        pub fn new(inner: T) -> Self {
914            let inner = tonic::client::Grpc::new(inner);
915            Self { inner }
916        }
917        pub fn with_origin(inner: T, origin: Uri) -> Self {
918            let inner = tonic::client::Grpc::with_origin(inner, origin);
919            Self { inner }
920        }
921        pub fn with_interceptor<F>(
922            inner: T,
923            interceptor: F,
924        ) -> GenerativeServiceClient<InterceptedService<T, F>>
925        where
926            F: tonic::service::Interceptor,
927            T::ResponseBody: Default,
928            T: tonic::codegen::Service<
929                http::Request<tonic::body::BoxBody>,
930                Response = http::Response<
931                    <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
932                >,
933            >,
934            <T as tonic::codegen::Service<
935                http::Request<tonic::body::BoxBody>,
936            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
937        {
938            GenerativeServiceClient::new(InterceptedService::new(inner, interceptor))
939        }
940        /// Compress requests with the given encoding.
941        ///
942        /// This requires the server to support it otherwise it might respond with an
943        /// error.
944        #[must_use]
945        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
946            self.inner = self.inner.send_compressed(encoding);
947            self
948        }
949        /// Enable decompressing responses.
950        #[must_use]
951        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
952            self.inner = self.inner.accept_compressed(encoding);
953            self
954        }
955        /// Limits the maximum size of a decoded message.
956        ///
957        /// Default: `4MB`
958        #[must_use]
959        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
960            self.inner = self.inner.max_decoding_message_size(limit);
961            self
962        }
963        /// Limits the maximum size of an encoded message.
964        ///
965        /// Default: `usize::MAX`
966        #[must_use]
967        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
968            self.inner = self.inner.max_encoding_message_size(limit);
969            self
970        }
971        /// Generates a model response given an input `GenerateContentRequest`.
972        /// Refer to the [text generation
973        /// guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed
974        /// usage information. Input capabilities differ between models, including
975        /// tuned models. Refer to the [model
976        /// guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning
977        /// guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details.
978        pub async fn generate_content(
979            &mut self,
980            request: impl tonic::IntoRequest<super::GenerateContentRequest>,
981        ) -> std::result::Result<
982            tonic::Response<super::GenerateContentResponse>,
983            tonic::Status,
984        > {
985            self.inner
986                .ready()
987                .await
988                .map_err(|e| {
989                    tonic::Status::unknown(
990                        format!("Service was not ready: {}", e.into()),
991                    )
992                })?;
993            let codec = tonic::codec::ProstCodec::default();
994            let path = http::uri::PathAndQuery::from_static(
995                "/google.ai.generativelanguage.v1.GenerativeService/GenerateContent",
996            );
997            let mut req = request.into_request();
998            req.extensions_mut()
999                .insert(
1000                    GrpcMethod::new(
1001                        "google.ai.generativelanguage.v1.GenerativeService",
1002                        "GenerateContent",
1003                    ),
1004                );
1005            self.inner.unary(req, path, codec).await
1006        }
1007        /// Generates a [streamed
1008        /// response](https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream)
1009        /// from the model given an input `GenerateContentRequest`.
1010        pub async fn stream_generate_content(
1011            &mut self,
1012            request: impl tonic::IntoRequest<super::GenerateContentRequest>,
1013        ) -> std::result::Result<
1014            tonic::Response<tonic::codec::Streaming<super::GenerateContentResponse>>,
1015            tonic::Status,
1016        > {
1017            self.inner
1018                .ready()
1019                .await
1020                .map_err(|e| {
1021                    tonic::Status::unknown(
1022                        format!("Service was not ready: {}", e.into()),
1023                    )
1024                })?;
1025            let codec = tonic::codec::ProstCodec::default();
1026            let path = http::uri::PathAndQuery::from_static(
1027                "/google.ai.generativelanguage.v1.GenerativeService/StreamGenerateContent",
1028            );
1029            let mut req = request.into_request();
1030            req.extensions_mut()
1031                .insert(
1032                    GrpcMethod::new(
1033                        "google.ai.generativelanguage.v1.GenerativeService",
1034                        "StreamGenerateContent",
1035                    ),
1036                );
1037            self.inner.server_streaming(req, path, codec).await
1038        }
1039        /// Generates a text embedding vector from the input `Content` using the
1040        /// specified [Gemini Embedding
1041        /// model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding).
1042        pub async fn embed_content(
1043            &mut self,
1044            request: impl tonic::IntoRequest<super::EmbedContentRequest>,
1045        ) -> std::result::Result<
1046            tonic::Response<super::EmbedContentResponse>,
1047            tonic::Status,
1048        > {
1049            self.inner
1050                .ready()
1051                .await
1052                .map_err(|e| {
1053                    tonic::Status::unknown(
1054                        format!("Service was not ready: {}", e.into()),
1055                    )
1056                })?;
1057            let codec = tonic::codec::ProstCodec::default();
1058            let path = http::uri::PathAndQuery::from_static(
1059                "/google.ai.generativelanguage.v1.GenerativeService/EmbedContent",
1060            );
1061            let mut req = request.into_request();
1062            req.extensions_mut()
1063                .insert(
1064                    GrpcMethod::new(
1065                        "google.ai.generativelanguage.v1.GenerativeService",
1066                        "EmbedContent",
1067                    ),
1068                );
1069            self.inner.unary(req, path, codec).await
1070        }
1071        /// Generates multiple embedding vectors from the input `Content` which
1072        /// consists of a batch of strings represented as `EmbedContentRequest`
1073        /// objects.
1074        pub async fn batch_embed_contents(
1075            &mut self,
1076            request: impl tonic::IntoRequest<super::BatchEmbedContentsRequest>,
1077        ) -> std::result::Result<
1078            tonic::Response<super::BatchEmbedContentsResponse>,
1079            tonic::Status,
1080        > {
1081            self.inner
1082                .ready()
1083                .await
1084                .map_err(|e| {
1085                    tonic::Status::unknown(
1086                        format!("Service was not ready: {}", e.into()),
1087                    )
1088                })?;
1089            let codec = tonic::codec::ProstCodec::default();
1090            let path = http::uri::PathAndQuery::from_static(
1091                "/google.ai.generativelanguage.v1.GenerativeService/BatchEmbedContents",
1092            );
1093            let mut req = request.into_request();
1094            req.extensions_mut()
1095                .insert(
1096                    GrpcMethod::new(
1097                        "google.ai.generativelanguage.v1.GenerativeService",
1098                        "BatchEmbedContents",
1099                    ),
1100                );
1101            self.inner.unary(req, path, codec).await
1102        }
1103        /// Runs a model's tokenizer on input `Content` and returns the token count.
1104        /// Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens)
1105        /// to learn more about tokens.
1106        pub async fn count_tokens(
1107            &mut self,
1108            request: impl tonic::IntoRequest<super::CountTokensRequest>,
1109        ) -> std::result::Result<
1110            tonic::Response<super::CountTokensResponse>,
1111            tonic::Status,
1112        > {
1113            self.inner
1114                .ready()
1115                .await
1116                .map_err(|e| {
1117                    tonic::Status::unknown(
1118                        format!("Service was not ready: {}", e.into()),
1119                    )
1120                })?;
1121            let codec = tonic::codec::ProstCodec::default();
1122            let path = http::uri::PathAndQuery::from_static(
1123                "/google.ai.generativelanguage.v1.GenerativeService/CountTokens",
1124            );
1125            let mut req = request.into_request();
1126            req.extensions_mut()
1127                .insert(
1128                    GrpcMethod::new(
1129                        "google.ai.generativelanguage.v1.GenerativeService",
1130                        "CountTokens",
1131                    ),
1132                );
1133            self.inner.unary(req, path, codec).await
1134        }
1135    }
1136}