Skip to main content

nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct ClassifyErrorRequest {
4    /// The error message or description to classify
5    #[prost(string, tag = "1")]
6    pub error_message: ::prost::alloc::string::String,
7}
8#[derive(Clone, PartialEq, ::prost::Message)]
9pub struct ClassifyErrorResponse {
10    /// The classification result
11    #[prost(enumeration = "ErrorClassification", tag = "1")]
12    pub classification: i32,
13    /// Explanation for why this classification was chosen
14    #[prost(string, tag = "2")]
15    pub reason: ::prost::alloc::string::String,
16}
17#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
18#[repr(i32)]
19pub enum ErrorClassification {
20    Unspecified = 0,
21    Client = 1,
22    Server = 2,
23}
24impl ErrorClassification {
25    /// String value of the enum field names used in the ProtoBuf definition.
26    ///
27    /// The values are not transformed in any way and thus are considered stable
28    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
29    pub fn as_str_name(&self) -> &'static str {
30        match self {
31            Self::Unspecified => "ERROR_CLASSIFICATION_UNSPECIFIED",
32            Self::Client => "ERROR_CLASSIFICATION_CLIENT",
33            Self::Server => "ERROR_CLASSIFICATION_SERVER",
34        }
35    }
36    /// Creates an enum from field names used in the ProtoBuf definition.
37    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
38        match value {
39            "ERROR_CLASSIFICATION_UNSPECIFIED" => Some(Self::Unspecified),
40            "ERROR_CLASSIFICATION_CLIENT" => Some(Self::Client),
41            "ERROR_CLASSIFICATION_SERVER" => Some(Self::Server),
42            _ => None,
43        }
44    }
45}
46/// Generated client implementations.
47pub mod data_ingestion_error_classifier_service_client {
48    #![allow(
49        unused_variables,
50        dead_code,
51        missing_docs,
52        clippy::wildcard_imports,
53        clippy::let_unit_value,
54    )]
55    use tonic::codegen::*;
56    use tonic::codegen::http::Uri;
57    /// DataIngestionErrorClassifierService classifies data ingestion errors as CLIENT or SERVER issues.
58    /// CLIENT errors are customer data issues (empty files, schema conflicts, timestamp problems, etc.)
59    /// SERVER errors are infrastructure issues (internal errors, timeouts, capacity limits, etc.)
60    #[derive(Debug, Clone)]
61    pub struct DataIngestionErrorClassifierServiceClient<T> {
62        inner: tonic::client::Grpc<T>,
63    }
64    impl DataIngestionErrorClassifierServiceClient<tonic::transport::Channel> {
65        /// Attempt to create a new client by connecting to a given endpoint.
66        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
67        where
68            D: TryInto<tonic::transport::Endpoint>,
69            D::Error: Into<StdError>,
70        {
71            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
72            Ok(Self::new(conn))
73        }
74    }
75    impl<T> DataIngestionErrorClassifierServiceClient<T>
76    where
77        T: tonic::client::GrpcService<tonic::body::Body>,
78        T::Error: Into<StdError>,
79        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
80        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
81    {
82        pub fn new(inner: T) -> Self {
83            let inner = tonic::client::Grpc::new(inner);
84            Self { inner }
85        }
86        pub fn with_origin(inner: T, origin: Uri) -> Self {
87            let inner = tonic::client::Grpc::with_origin(inner, origin);
88            Self { inner }
89        }
90        pub fn with_interceptor<F>(
91            inner: T,
92            interceptor: F,
93        ) -> DataIngestionErrorClassifierServiceClient<InterceptedService<T, F>>
94        where
95            F: tonic::service::Interceptor,
96            T::ResponseBody: Default,
97            T: tonic::codegen::Service<
98                http::Request<tonic::body::Body>,
99                Response = http::Response<
100                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
101                >,
102            >,
103            <T as tonic::codegen::Service<
104                http::Request<tonic::body::Body>,
105            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
106        {
107            DataIngestionErrorClassifierServiceClient::new(
108                InterceptedService::new(inner, interceptor),
109            )
110        }
111        /// Compress requests with the given encoding.
112        ///
113        /// This requires the server to support it otherwise it might respond with an
114        /// error.
115        #[must_use]
116        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
117            self.inner = self.inner.send_compressed(encoding);
118            self
119        }
120        /// Enable decompressing responses.
121        #[must_use]
122        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
123            self.inner = self.inner.accept_compressed(encoding);
124            self
125        }
126        /// Limits the maximum size of a decoded message.
127        ///
128        /// Default: `4MB`
129        #[must_use]
130        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
131            self.inner = self.inner.max_decoding_message_size(limit);
132            self
133        }
134        /// Limits the maximum size of an encoded message.
135        ///
136        /// Default: `usize::MAX`
137        #[must_use]
138        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
139            self.inner = self.inner.max_encoding_message_size(limit);
140            self
141        }
142        /// ClassifyError analyzes an error message and classifies it as CLIENT or SERVER.
143        pub async fn classify_error(
144            &mut self,
145            request: impl tonic::IntoRequest<super::ClassifyErrorRequest>,
146        ) -> std::result::Result<
147            tonic::Response<super::ClassifyErrorResponse>,
148            tonic::Status,
149        > {
150            self.inner
151                .ready()
152                .await
153                .map_err(|e| {
154                    tonic::Status::unknown(
155                        format!("Service was not ready: {}", e.into()),
156                    )
157                })?;
158            let codec = tonic::codec::ProstCodec::default();
159            let path = http::uri::PathAndQuery::from_static(
160                "/nominal.ai.v1.DataIngestionErrorClassifierService/ClassifyError",
161            );
162            let mut req = request.into_request();
163            req.extensions_mut()
164                .insert(
165                    GrpcMethod::new(
166                        "nominal.ai.v1.DataIngestionErrorClassifierService",
167                        "ClassifyError",
168                    ),
169                );
170            self.inner.unary(req, path, codec).await
171        }
172    }
173}
174#[derive(Clone, Copy, PartialEq, ::prost::Message)]
175pub struct GetProviderStatusRequest {}
176#[derive(Clone, Copy, PartialEq, ::prost::Message)]
177pub struct GetProviderStatusResponse {
178    /// Timestamp when the last status was determined
179    #[prost(message, optional, tag = "1")]
180    pub timestamp: ::core::option::Option<
181        super::super::super::google::protobuf::Timestamp,
182    >,
183    /// Status of the most recent health check probe
184    #[prost(message, optional, tag = "2")]
185    pub last_status: ::core::option::Option<ProviderStatus>,
186    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
187    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
188    #[deprecated]
189    #[prost(message, optional, tag = "3")]
190    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
191    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
192    #[prost(message, optional, tag = "4")]
193    pub aggregated_status: ::core::option::Option<ProviderStatus>,
194}
195#[derive(Clone, Copy, PartialEq, ::prost::Message)]
196pub struct ProviderStatus {
197    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
198    pub status: ::core::option::Option<provider_status::Status>,
199}
200/// Nested message and enum types in `ProviderStatus`.
201pub mod provider_status {
202    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
203    pub enum Status {
204        #[prost(message, tag = "1")]
205        Healthy(super::Healthy),
206        #[prost(message, tag = "2")]
207        Degraded(super::Degraded),
208    }
209}
210#[derive(Clone, Copy, PartialEq, ::prost::Message)]
211pub struct Healthy {}
212#[derive(Clone, Copy, PartialEq, ::prost::Message)]
213pub struct Degraded {
214    #[prost(enumeration = "DegradationReason", tag = "1")]
215    pub reason: i32,
216}
217#[derive(Clone, Copy, PartialEq, ::prost::Message)]
218pub struct ProviderMetrics {
219    #[prost(int32, tag = "1")]
220    pub time_to_first_token_ms: i32,
221    #[prost(int32, tag = "2")]
222    pub total_time_ms: i32,
223}
224#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
225#[repr(i32)]
226pub enum DegradationReason {
227    Unspecified = 0,
228    HighLatency = 1,
229    Failures = 2,
230    HighLatencyAndFailures = 3,
231}
232impl DegradationReason {
233    /// String value of the enum field names used in the ProtoBuf definition.
234    ///
235    /// The values are not transformed in any way and thus are considered stable
236    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
237    pub fn as_str_name(&self) -> &'static str {
238        match self {
239            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
240            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
241            Self::Failures => "DEGRADATION_REASON_FAILURES",
242            Self::HighLatencyAndFailures => {
243                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
244            }
245        }
246    }
247    /// Creates an enum from field names used in the ProtoBuf definition.
248    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
249        match value {
250            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
251            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
252            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
253            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
254                Some(Self::HighLatencyAndFailures)
255            }
256            _ => None,
257        }
258    }
259}
260/// Generated client implementations.
261pub mod model_provider_health_service_client {
262    #![allow(
263        unused_variables,
264        dead_code,
265        missing_docs,
266        clippy::wildcard_imports,
267        clippy::let_unit_value,
268    )]
269    use tonic::codegen::*;
270    use tonic::codegen::http::Uri;
271    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
272    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
273    /// independent of the complexity of user prompts.
274    #[derive(Debug, Clone)]
275    pub struct ModelProviderHealthServiceClient<T> {
276        inner: tonic::client::Grpc<T>,
277    }
278    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
279        /// Attempt to create a new client by connecting to a given endpoint.
280        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
281        where
282            D: TryInto<tonic::transport::Endpoint>,
283            D::Error: Into<StdError>,
284        {
285            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
286            Ok(Self::new(conn))
287        }
288    }
289    impl<T> ModelProviderHealthServiceClient<T>
290    where
291        T: tonic::client::GrpcService<tonic::body::Body>,
292        T::Error: Into<StdError>,
293        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
294        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
295    {
296        pub fn new(inner: T) -> Self {
297            let inner = tonic::client::Grpc::new(inner);
298            Self { inner }
299        }
300        pub fn with_origin(inner: T, origin: Uri) -> Self {
301            let inner = tonic::client::Grpc::with_origin(inner, origin);
302            Self { inner }
303        }
304        pub fn with_interceptor<F>(
305            inner: T,
306            interceptor: F,
307        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
308        where
309            F: tonic::service::Interceptor,
310            T::ResponseBody: Default,
311            T: tonic::codegen::Service<
312                http::Request<tonic::body::Body>,
313                Response = http::Response<
314                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
315                >,
316            >,
317            <T as tonic::codegen::Service<
318                http::Request<tonic::body::Body>,
319            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
320        {
321            ModelProviderHealthServiceClient::new(
322                InterceptedService::new(inner, interceptor),
323            )
324        }
325        /// Compress requests with the given encoding.
326        ///
327        /// This requires the server to support it otherwise it might respond with an
328        /// error.
329        #[must_use]
330        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
331            self.inner = self.inner.send_compressed(encoding);
332            self
333        }
334        /// Enable decompressing responses.
335        #[must_use]
336        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
337            self.inner = self.inner.accept_compressed(encoding);
338            self
339        }
340        /// Limits the maximum size of a decoded message.
341        ///
342        /// Default: `4MB`
343        #[must_use]
344        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
345            self.inner = self.inner.max_decoding_message_size(limit);
346            self
347        }
348        /// Limits the maximum size of an encoded message.
349        ///
350        /// Default: `usize::MAX`
351        #[must_use]
352        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
353            self.inner = self.inner.max_encoding_message_size(limit);
354            self
355        }
356        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
357        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
358        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
359        pub async fn get_provider_status(
360            &mut self,
361            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
362        ) -> std::result::Result<
363            tonic::Response<super::GetProviderStatusResponse>,
364            tonic::Status,
365        > {
366            self.inner
367                .ready()
368                .await
369                .map_err(|e| {
370                    tonic::Status::unknown(
371                        format!("Service was not ready: {}", e.into()),
372                    )
373                })?;
374            let codec = tonic::codec::ProstCodec::default();
375            let path = http::uri::PathAndQuery::from_static(
376                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
377            );
378            let mut req = request.into_request();
379            req.extensions_mut()
380                .insert(
381                    GrpcMethod::new(
382                        "nominal.ai.v1.ModelProviderHealthService",
383                        "GetProviderStatus",
384                    ),
385                );
386            self.inner.unary(req, path, codec).await
387        }
388    }
389}
390/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
391/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct CreateOrUpdateKnowledgeBaseRequest {
394    #[prost(string, tag = "1")]
395    pub attachment_rid: ::prost::alloc::string::String,
396    /// summary of the knowledge base, will be used by the LLM to decide when to use it
397    #[prost(string, tag = "2")]
398    pub summary_description: ::prost::alloc::string::String,
399    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
400    pub r#type: ::core::option::Option<i32>,
401}
402/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
403#[derive(Clone, PartialEq, ::prost::Message)]
404pub struct CreateOrUpdateKnowledgeBaseResponse {
405    #[prost(string, tag = "1")]
406    pub knowledge_base_rid: ::prost::alloc::string::String,
407}
408/// KnowledgeBase represents a knowledge base entry
409#[derive(Clone, PartialEq, ::prost::Message)]
410pub struct KnowledgeBase {
411    #[prost(string, tag = "1")]
412    pub knowledge_base_rid: ::prost::alloc::string::String,
413    #[prost(string, tag = "2")]
414    pub attachment_rid: ::prost::alloc::string::String,
415    #[prost(string, tag = "3")]
416    pub workspace_rid: ::prost::alloc::string::String,
417    #[prost(string, tag = "4")]
418    pub summary_description: ::prost::alloc::string::String,
419    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
420    pub r#type: i32,
421    #[prost(int32, tag = "6")]
422    pub version: i32,
423}
424#[derive(Clone, PartialEq, ::prost::Message)]
425pub struct ListRequest {
426    #[prost(string, tag = "1")]
427    pub workspace_rid: ::prost::alloc::string::String,
428}
429#[derive(Clone, PartialEq, ::prost::Message)]
430pub struct ListResponse {
431    #[prost(message, repeated, tag = "1")]
432    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
433}
434#[derive(Clone, PartialEq, ::prost::Message)]
435pub struct DeleteRequest {
436    #[prost(string, tag = "1")]
437    pub knowledge_base_rid: ::prost::alloc::string::String,
438}
439#[derive(Clone, Copy, PartialEq, ::prost::Message)]
440pub struct DeleteResponse {
441    #[prost(bool, tag = "1")]
442    pub success: bool,
443}
444#[derive(Clone, PartialEq, ::prost::Message)]
445pub struct GetBatchRequest {
446    #[prost(string, repeated, tag = "1")]
447    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
448}
449#[derive(Clone, PartialEq, ::prost::Message)]
450pub struct GetBatchResponse {
451    #[prost(message, repeated, tag = "1")]
452    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
453}
454/// generate summary description is intentionally going to return the generated description to the frontend
455/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
456#[derive(Clone, PartialEq, ::prost::Message)]
457pub struct GenerateSummaryDescriptionRequest {
458    #[prost(string, tag = "1")]
459    pub attachment_rid: ::prost::alloc::string::String,
460}
461#[derive(Clone, PartialEq, ::prost::Message)]
462pub struct GenerateSummaryDescriptionResponse {
463    #[prost(string, tag = "1")]
464    pub summary_description: ::prost::alloc::string::String,
465}
466/// KnowledgeBaseType defines the types of knowledge base
467#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
468#[repr(i32)]
469pub enum KnowledgeBaseType {
470    /// defaults to PROMPT
471    Unspecified = 0,
472    /// knowledge base gets added directly to prompt (needs to be small enough!)
473    Prompt = 1,
474    /// knowledge base gets used via vector search on embeddings
475    Embedding = 2,
476}
477impl KnowledgeBaseType {
478    /// String value of the enum field names used in the ProtoBuf definition.
479    ///
480    /// The values are not transformed in any way and thus are considered stable
481    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
482    pub fn as_str_name(&self) -> &'static str {
483        match self {
484            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
485            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
486            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
487        }
488    }
489    /// Creates an enum from field names used in the ProtoBuf definition.
490    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
491        match value {
492            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
493            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
494            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
495            _ => None,
496        }
497    }
498}
499/// Generated client implementations.
500pub mod knowledge_base_service_client {
501    #![allow(
502        unused_variables,
503        dead_code,
504        missing_docs,
505        clippy::wildcard_imports,
506        clippy::let_unit_value,
507    )]
508    use tonic::codegen::*;
509    use tonic::codegen::http::Uri;
510    /// KnowledgeBaseService provides AI-powered knowledge base management
511    #[derive(Debug, Clone)]
512    pub struct KnowledgeBaseServiceClient<T> {
513        inner: tonic::client::Grpc<T>,
514    }
515    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
516        /// Attempt to create a new client by connecting to a given endpoint.
517        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
518        where
519            D: TryInto<tonic::transport::Endpoint>,
520            D::Error: Into<StdError>,
521        {
522            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
523            Ok(Self::new(conn))
524        }
525    }
526    impl<T> KnowledgeBaseServiceClient<T>
527    where
528        T: tonic::client::GrpcService<tonic::body::Body>,
529        T::Error: Into<StdError>,
530        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
531        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
532    {
533        pub fn new(inner: T) -> Self {
534            let inner = tonic::client::Grpc::new(inner);
535            Self { inner }
536        }
537        pub fn with_origin(inner: T, origin: Uri) -> Self {
538            let inner = tonic::client::Grpc::with_origin(inner, origin);
539            Self { inner }
540        }
541        pub fn with_interceptor<F>(
542            inner: T,
543            interceptor: F,
544        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
545        where
546            F: tonic::service::Interceptor,
547            T::ResponseBody: Default,
548            T: tonic::codegen::Service<
549                http::Request<tonic::body::Body>,
550                Response = http::Response<
551                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
552                >,
553            >,
554            <T as tonic::codegen::Service<
555                http::Request<tonic::body::Body>,
556            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
557        {
558            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
559        }
560        /// Compress requests with the given encoding.
561        ///
562        /// This requires the server to support it otherwise it might respond with an
563        /// error.
564        #[must_use]
565        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
566            self.inner = self.inner.send_compressed(encoding);
567            self
568        }
569        /// Enable decompressing responses.
570        #[must_use]
571        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
572            self.inner = self.inner.accept_compressed(encoding);
573            self
574        }
575        /// Limits the maximum size of a decoded message.
576        ///
577        /// Default: `4MB`
578        #[must_use]
579        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
580            self.inner = self.inner.max_decoding_message_size(limit);
581            self
582        }
583        /// Limits the maximum size of an encoded message.
584        ///
585        /// Default: `usize::MAX`
586        #[must_use]
587        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
588            self.inner = self.inner.max_encoding_message_size(limit);
589            self
590        }
591        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
592        pub async fn create_or_update_knowledge_base(
593            &mut self,
594            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
595        ) -> std::result::Result<
596            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
597            tonic::Status,
598        > {
599            self.inner
600                .ready()
601                .await
602                .map_err(|e| {
603                    tonic::Status::unknown(
604                        format!("Service was not ready: {}", e.into()),
605                    )
606                })?;
607            let codec = tonic::codec::ProstCodec::default();
608            let path = http::uri::PathAndQuery::from_static(
609                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
610            );
611            let mut req = request.into_request();
612            req.extensions_mut()
613                .insert(
614                    GrpcMethod::new(
615                        "nominal.ai.v1.KnowledgeBaseService",
616                        "CreateOrUpdateKnowledgeBase",
617                    ),
618                );
619            self.inner.unary(req, path, codec).await
620        }
621        /// List returns all knowledge bases in the specified workspace
622        pub async fn list(
623            &mut self,
624            request: impl tonic::IntoRequest<super::ListRequest>,
625        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
626            self.inner
627                .ready()
628                .await
629                .map_err(|e| {
630                    tonic::Status::unknown(
631                        format!("Service was not ready: {}", e.into()),
632                    )
633                })?;
634            let codec = tonic::codec::ProstCodec::default();
635            let path = http::uri::PathAndQuery::from_static(
636                "/nominal.ai.v1.KnowledgeBaseService/List",
637            );
638            let mut req = request.into_request();
639            req.extensions_mut()
640                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
641            self.inner.unary(req, path, codec).await
642        }
643        /// Delete removes a knowledge base by its RID
644        pub async fn delete(
645            &mut self,
646            request: impl tonic::IntoRequest<super::DeleteRequest>,
647        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
648            self.inner
649                .ready()
650                .await
651                .map_err(|e| {
652                    tonic::Status::unknown(
653                        format!("Service was not ready: {}", e.into()),
654                    )
655                })?;
656            let codec = tonic::codec::ProstCodec::default();
657            let path = http::uri::PathAndQuery::from_static(
658                "/nominal.ai.v1.KnowledgeBaseService/Delete",
659            );
660            let mut req = request.into_request();
661            req.extensions_mut()
662                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
663            self.inner.unary(req, path, codec).await
664        }
665        /// GetBatch retrieves multiple knowledge bases by their RIDs
666        pub async fn get_batch(
667            &mut self,
668            request: impl tonic::IntoRequest<super::GetBatchRequest>,
669        ) -> std::result::Result<
670            tonic::Response<super::GetBatchResponse>,
671            tonic::Status,
672        > {
673            self.inner
674                .ready()
675                .await
676                .map_err(|e| {
677                    tonic::Status::unknown(
678                        format!("Service was not ready: {}", e.into()),
679                    )
680                })?;
681            let codec = tonic::codec::ProstCodec::default();
682            let path = http::uri::PathAndQuery::from_static(
683                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
684            );
685            let mut req = request.into_request();
686            req.extensions_mut()
687                .insert(
688                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
689                );
690            self.inner.unary(req, path, codec).await
691        }
692        /// GenerateSummaryDescription generates a summary description for an attachment rid
693        pub async fn generate_summary_description(
694            &mut self,
695            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
696        ) -> std::result::Result<
697            tonic::Response<super::GenerateSummaryDescriptionResponse>,
698            tonic::Status,
699        > {
700            self.inner
701                .ready()
702                .await
703                .map_err(|e| {
704                    tonic::Status::unknown(
705                        format!("Service was not ready: {}", e.into()),
706                    )
707                })?;
708            let codec = tonic::codec::ProstCodec::default();
709            let path = http::uri::PathAndQuery::from_static(
710                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
711            );
712            let mut req = request.into_request();
713            req.extensions_mut()
714                .insert(
715                    GrpcMethod::new(
716                        "nominal.ai.v1.KnowledgeBaseService",
717                        "GenerateSummaryDescription",
718                    ),
719                );
720            self.inner.unary(req, path, codec).await
721        }
722    }
723}
724#[derive(Clone, PartialEq, ::prost::Message)]
725pub struct GetSnapshotRidByUserMessageIdRequest {
726    #[prost(string, tag = "1")]
727    pub conversation_rid: ::prost::alloc::string::String,
728    #[prost(string, tag = "2")]
729    pub message_id: ::prost::alloc::string::String,
730}
731/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
732/// This occurs in the instance where a message was sent in a non-workbook context
733#[derive(Clone, PartialEq, ::prost::Message)]
734pub struct GetSnapshotRidByUserMessageIdResponse {
735    #[prost(string, optional, tag = "1")]
736    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
737}
738/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
739#[derive(Clone, Copy, PartialEq, ::prost::Message)]
740pub struct ReadOnlyMode {}
741/// EditMode configures edit mode where all tools are available
742#[derive(Clone, Copy, PartialEq, ::prost::Message)]
743pub struct EditMode {
744    /// when set to true, we auto accept edits for any tools typically requiring approval
745    #[prost(bool, optional, tag = "1")]
746    pub auto_accept: ::core::option::Option<bool>,
747}
748/// ConversationMode specifies the mode of the conversation
749#[derive(Clone, Copy, PartialEq, ::prost::Message)]
750pub struct ConversationMode {
751    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
752    pub mode: ::core::option::Option<conversation_mode::Mode>,
753}
754/// Nested message and enum types in `ConversationMode`.
755pub mod conversation_mode {
756    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
757    pub enum Mode {
758        #[prost(message, tag = "1")]
759        ReadOnly(super::ReadOnlyMode),
760        #[prost(message, tag = "2")]
761        Edit(super::EditMode),
762    }
763}
764/// When the agent makes a request to use a tool, the user responds
765/// with one of these for every request - mapping a tool id to its approval/denial
766#[derive(Clone, PartialEq, ::prost::Message)]
767pub struct ToolApprovalResult {
768    /// identifies the tool call
769    #[prost(string, tag = "1")]
770    pub tool_call_id: ::prost::alloc::string::String,
771    #[prost(oneof = "tool_approval_result::Response", tags = "2, 3")]
772    pub response: ::core::option::Option<tool_approval_result::Response>,
773}
774/// Nested message and enum types in `ToolApprovalResult`.
775pub mod tool_approval_result {
776    #[derive(Clone, PartialEq, ::prost::Oneof)]
777    pub enum Response {
778        #[prost(message, tag = "2")]
779        Approved(super::ToolApprovedResponse),
780        #[prost(message, tag = "3")]
781        Denied(super::ToolDeniedResponse),
782    }
783}
784#[derive(Clone, PartialEq, ::prost::Message)]
785pub struct ToolApprovedResponse {
786    /// json string representation of the override argument if the user
787    /// needs it to be changed in some way
788    #[prost(string, optional, tag = "1")]
789    pub override_args: ::core::option::Option<::prost::alloc::string::String>,
790}
791#[derive(Clone, PartialEq, ::prost::Message)]
792pub struct ToolDeniedResponse {
793    #[prost(string, tag = "2")]
794    pub denial_reason: ::prost::alloc::string::String,
795}
796/// RetryRequest retries the last request (e.g., if it was interrupted/failed part-way through)
797#[derive(Clone, Copy, PartialEq, ::prost::Message)]
798pub struct RetryRequest {}
799/// UserPromptRequest contains a new user message
800#[derive(Clone, PartialEq, ::prost::Message)]
801pub struct UserPromptRequest {
802    #[prost(message, optional, tag = "1")]
803    pub message: ::core::option::Option<UserModelMessage>,
804    /// Optional: image files to provide to the agent
805    #[prost(message, repeated, tag = "2")]
806    pub images: ::prost::alloc::vec::Vec<ImagePart>,
807}
808/// ToolApprovalRequest contains tool approval results
809#[derive(Clone, PartialEq, ::prost::Message)]
810pub struct ToolApprovalRequest {
811    #[prost(message, repeated, tag = "1")]
812    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
813}
814/// StreamChatRequest is a request to stream chat messages for AI agent.
815#[derive(Clone, PartialEq, ::prost::Message)]
816pub struct StreamChatRequest {
817    /// The conversation ID
818    #[prost(string, tag = "1")]
819    pub conversation_rid: ::prost::alloc::string::String,
820    /// DEPRECATED: use request_type.user_prompt.message instead
821    #[deprecated]
822    #[prost(message, optional, tag = "2")]
823    pub message: ::core::option::Option<UserModelMessage>,
824    /// DEPRECATED: use request_type.user_prompt.images instead
825    #[deprecated]
826    #[prost(message, repeated, tag = "3")]
827    pub images: ::prost::alloc::vec::Vec<ImagePart>,
828    /// DEPRECATED: use request_type.tool_approval instead
829    #[deprecated]
830    #[prost(message, repeated, tag = "6")]
831    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
832    /// The type of request - exactly one should be set
833    #[prost(oneof = "stream_chat_request::RequestType", tags = "7, 8, 9")]
834    pub request_type: ::core::option::Option<stream_chat_request::RequestType>,
835    /// Context-specific fields based on the oneofKind.
836    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5, 10")]
837    pub context: ::core::option::Option<stream_chat_request::Context>,
838}
839/// Nested message and enum types in `StreamChatRequest`.
840pub mod stream_chat_request {
841    /// The type of request - exactly one should be set
842    #[derive(Clone, PartialEq, ::prost::Oneof)]
843    pub enum RequestType {
844        #[prost(message, tag = "7")]
845        Retry(super::RetryRequest),
846        #[prost(message, tag = "8")]
847        UserPrompt(super::UserPromptRequest),
848        #[prost(message, tag = "9")]
849        ToolApproval(super::ToolApprovalRequest),
850    }
851    /// Context-specific fields based on the oneofKind.
852    #[derive(Clone, PartialEq, ::prost::Oneof)]
853    pub enum Context {
854        #[prost(message, tag = "4")]
855        Workbook(super::WorkbookContext),
856        #[prost(message, tag = "5")]
857        Global(super::GlobalContext),
858        #[prost(message, tag = "10")]
859        Checklist(super::ChecklistContext),
860    }
861}
862/// WorkbookContext contains workbook-specific context fields
863#[derive(Clone, PartialEq, ::prost::Message)]
864pub struct WorkbookContext {
865    /// RID of the workbook to use for context
866    #[prost(string, tag = "1")]
867    pub workbook_rid: ::prost::alloc::string::String,
868    /// The user's presence in the workbook
869    #[prost(message, optional, tag = "2")]
870    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
871}
872/// ChecklistContext for use when the agent is being messaged in the context of a
873/// checklist currently being edited
874#[derive(Clone, PartialEq, ::prost::Message)]
875pub struct ChecklistContext {
876    /// RID of the checklist being edited
877    #[prost(string, tag = "1")]
878    pub checklist_rid: ::prost::alloc::string::String,
879    /// Name of the branch that this edit is being done on
880    /// This is equivalent to the 'draft name' for a check being edited in the UI
881    #[prost(string, tag = "2")]
882    pub branch_name: ::prost::alloc::string::String,
883    /// A checklist opened in edit mode will always have some resource by which it
884    /// is referencing for viewing. It can either be an asset or a run
885    #[prost(oneof = "checklist_context::ReferenceRid", tags = "3, 4")]
886    pub reference_rid: ::core::option::Option<checklist_context::ReferenceRid>,
887}
888/// Nested message and enum types in `ChecklistContext`.
889pub mod checklist_context {
890    /// A checklist opened in edit mode will always have some resource by which it
891    /// is referencing for viewing. It can either be an asset or a run
892    #[derive(Clone, PartialEq, ::prost::Oneof)]
893    pub enum ReferenceRid {
894        #[prost(string, tag = "3")]
895        Asset(::prost::alloc::string::String),
896        #[prost(string, tag = "4")]
897        Run(::prost::alloc::string::String),
898    }
899}
900/// DefaultContext (no context)
901#[derive(Clone, Copy, PartialEq, ::prost::Message)]
902pub struct GlobalContext {}
903/// WorkbookUserPresence contains the user's presence in the workbook
904/// which is used to describe what the user is viewing at the time of the message.
905#[derive(Clone, Copy, PartialEq, ::prost::Message)]
906pub struct WorkbookUserPresence {
907    #[prost(int32, tag = "1")]
908    pub tab_index: i32,
909    #[prost(message, optional, tag = "2")]
910    pub range: ::core::option::Option<TimeRange>,
911}
912/// CreateConversation request will create a new conversation thread
913/// if old conversation id is not set, a brand new, clear chat is created
914/// If old conversation id is set without a previous message id, the full conversation thread will be copied
915/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
916/// the above case is useful for branching a conversation into a new thread
917#[derive(Clone, PartialEq, ::prost::Message)]
918pub struct CreateConversationRequest {
919    #[prost(string, tag = "1")]
920    pub title: ::prost::alloc::string::String,
921    #[prost(string, tag = "2")]
922    pub workspace_rid: ::prost::alloc::string::String,
923    #[prost(string, optional, tag = "3")]
924    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
925    #[prost(string, optional, tag = "4")]
926    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
927    #[prost(message, optional, tag = "5")]
928    pub conversation_mode: ::core::option::Option<ConversationMode>,
929}
930/// CreateConversationResponse will return the conversation id for the new conversation
931#[derive(Clone, PartialEq, ::prost::Message)]
932pub struct CreateConversationResponse {
933    #[prost(string, tag = "1")]
934    pub new_conversation_rid: ::prost::alloc::string::String,
935}
936/// Updates the fields if specified (optional means no change for that field)
937#[derive(Clone, PartialEq, ::prost::Message)]
938pub struct UpdateConversationMetadataRequest {
939    #[prost(string, optional, tag = "1")]
940    pub title: ::core::option::Option<::prost::alloc::string::String>,
941    #[prost(string, tag = "2")]
942    pub conversation_rid: ::prost::alloc::string::String,
943    #[prost(message, optional, tag = "3")]
944    pub conversation_mode: ::core::option::Option<ConversationMode>,
945}
946#[derive(Clone, Copy, PartialEq, ::prost::Message)]
947pub struct UpdateConversationMetadataResponse {}
948#[derive(Clone, PartialEq, ::prost::Message)]
949pub struct DeleteConversationRequest {
950    #[prost(string, tag = "1")]
951    pub conversation_rid: ::prost::alloc::string::String,
952}
953#[derive(Clone, Copy, PartialEq, ::prost::Message)]
954pub struct DeleteConversationResponse {}
955/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
956/// by provided rid. To start from a particular message - you can also provide a message id.
957#[derive(Clone, PartialEq, ::prost::Message)]
958pub struct GetConversationRequest {
959    #[prost(string, tag = "1")]
960    pub conversation_rid: ::prost::alloc::string::String,
961    #[prost(string, optional, tag = "2")]
962    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
963    #[prost(int32, optional, tag = "3")]
964    pub max_message_count: ::core::option::Option<i32>,
965}
966/// a CompactConversationRequest allows you to reduce the token count in your conversation by up to half
967/// will be a no op if current conversation has not yet reached at least half of the max token count
968#[derive(Clone, PartialEq, ::prost::Message)]
969pub struct CompactConversationRequest {
970    #[prost(string, tag = "1")]
971    pub conversation_rid: ::prost::alloc::string::String,
972}
973/// returns the new token count of the now compacted conversation
974#[derive(Clone, Copy, PartialEq, ::prost::Message)]
975pub struct CompactConversationResponse {
976    #[prost(message, optional, tag = "1")]
977    pub context: ::core::option::Option<ContextStatus>,
978}
979/// Model message with id allows you to identify the message ID of a given message
980#[derive(Clone, PartialEq, ::prost::Message)]
981pub struct ModelMessageWithId {
982    #[prost(string, tag = "3")]
983    pub message_id: ::prost::alloc::string::String,
984    /// WB agent user messages can have snapshot rids associated with them
985    #[prost(string, optional, tag = "4")]
986    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
987    #[prost(message, repeated, tag = "5")]
988    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
989    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2, 6")]
990    pub content: ::core::option::Option<model_message_with_id::Content>,
991}
992/// Nested message and enum types in `ModelMessageWithId`.
993pub mod model_message_with_id {
994    #[derive(Clone, PartialEq, ::prost::Oneof)]
995    pub enum Content {
996        #[prost(message, tag = "1")]
997        Message(super::ModelMessage),
998        #[prost(message, tag = "2")]
999        ToolAction(super::ToolAction),
1000        #[prost(message, tag = "6")]
1001        ToolActionConfirmation(super::ToolActionConfirmation),
1002    }
1003}
1004#[derive(Clone, PartialEq, ::prost::Message)]
1005pub struct GetConversationResponse {
1006    #[prost(message, repeated, tag = "1")]
1007    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
1008    #[prost(message, optional, tag = "2")]
1009    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
1010}
1011#[derive(Clone, PartialEq, ::prost::Message)]
1012pub struct GetConversationMetadataRequest {
1013    #[prost(string, tag = "1")]
1014    pub conversation_rid: ::prost::alloc::string::String,
1015}
1016#[derive(Clone, PartialEq, ::prost::Message)]
1017pub struct GetConversationMetadataResponse {
1018    #[prost(message, optional, tag = "1")]
1019    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
1020}
1021#[derive(Clone, PartialEq, ::prost::Message)]
1022pub struct GetConversationMessagesRequest {
1023    #[prost(string, tag = "1")]
1024    pub conversation_rid: ::prost::alloc::string::String,
1025    #[prost(string, optional, tag = "2")]
1026    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
1027    #[prost(int32, optional, tag = "3")]
1028    pub max_message_count: ::core::option::Option<i32>,
1029}
1030#[derive(Clone, PartialEq, ::prost::Message)]
1031pub struct GetConversationMessagesResponse {
1032    #[prost(message, repeated, tag = "1")]
1033    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
1034    #[prost(string, optional, tag = "2")]
1035    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1036}
1037/// Will generate all conversation threads that this user has in this workspace
1038#[derive(Clone, PartialEq, ::prost::Message)]
1039pub struct ListConversationsRequest {
1040    #[prost(string, tag = "1")]
1041    pub workspace_rid: ::prost::alloc::string::String,
1042    #[prost(string, optional, tag = "2")]
1043    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1044    #[prost(int32, optional, tag = "3")]
1045    pub page_size: ::core::option::Option<i32>,
1046}
1047#[derive(Clone, PartialEq, ::prost::Message)]
1048pub struct ConversationMetadata {
1049    #[prost(string, tag = "1")]
1050    pub conversation_rid: ::prost::alloc::string::String,
1051    #[prost(string, tag = "2")]
1052    pub title: ::prost::alloc::string::String,
1053    #[prost(message, optional, tag = "3")]
1054    pub created_at: ::core::option::Option<
1055        super::super::super::google::protobuf::Timestamp,
1056    >,
1057    #[prost(message, optional, tag = "4")]
1058    pub last_updated_at: ::core::option::Option<
1059        super::super::super::google::protobuf::Timestamp,
1060    >,
1061    #[prost(message, optional, tag = "5")]
1062    pub mode: ::core::option::Option<ConversationMode>,
1063    #[prost(message, optional, tag = "6")]
1064    pub current_context: ::core::option::Option<ContextStatus>,
1065}
1066/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
1067/// to get a full conversation from storage. These are ordered by creation time.
1068#[derive(Clone, PartialEq, ::prost::Message)]
1069pub struct ListConversationsResponse {
1070    #[prost(message, repeated, tag = "1")]
1071    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
1072    #[prost(string, optional, tag = "2")]
1073    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1074}
1075#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1076pub struct TimeRange {
1077    #[prost(message, optional, tag = "1")]
1078    pub range_start: ::core::option::Option<Timestamp>,
1079    #[prost(message, optional, tag = "2")]
1080    pub range_end: ::core::option::Option<Timestamp>,
1081}
1082#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1083pub struct Timestamp {
1084    #[prost(int32, tag = "1")]
1085    pub seconds: i32,
1086    #[prost(int32, tag = "2")]
1087    pub nanoseconds: i32,
1088}
1089/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
1090/// Each message type has its own structure and content.
1091#[derive(Clone, PartialEq, ::prost::Message)]
1092pub struct ModelMessage {
1093    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
1094    pub kind: ::core::option::Option<model_message::Kind>,
1095}
1096/// Nested message and enum types in `ModelMessage`.
1097pub mod model_message {
1098    #[derive(Clone, PartialEq, ::prost::Oneof)]
1099    pub enum Kind {
1100        #[prost(message, tag = "1")]
1101        User(super::UserModelMessage),
1102        #[prost(message, tag = "2")]
1103        Assistant(super::AssistantModelMessage),
1104    }
1105}
1106/// A user message containing text
1107#[derive(Clone, PartialEq, ::prost::Message)]
1108pub struct UserModelMessage {
1109    #[prost(message, repeated, tag = "1")]
1110    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
1111}
1112/// An assistant message containing text
1113#[derive(Clone, PartialEq, ::prost::Message)]
1114pub struct AssistantModelMessage {
1115    #[prost(message, repeated, tag = "1")]
1116    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
1117}
1118#[derive(Clone, PartialEq, ::prost::Message)]
1119pub struct UserContentPart {
1120    #[prost(oneof = "user_content_part::Part", tags = "1")]
1121    pub part: ::core::option::Option<user_content_part::Part>,
1122}
1123/// Nested message and enum types in `UserContentPart`.
1124pub mod user_content_part {
1125    #[derive(Clone, PartialEq, ::prost::Oneof)]
1126    pub enum Part {
1127        #[prost(message, tag = "1")]
1128        Text(super::TextPart),
1129    }
1130}
1131/// Content part for assistant messages: can be text, reasoning, or mutation.
1132#[derive(Clone, PartialEq, ::prost::Message)]
1133pub struct AssistantContentPart {
1134    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
1135    pub part: ::core::option::Option<assistant_content_part::Part>,
1136}
1137/// Nested message and enum types in `AssistantContentPart`.
1138pub mod assistant_content_part {
1139    #[derive(Clone, PartialEq, ::prost::Oneof)]
1140    pub enum Part {
1141        #[prost(message, tag = "1")]
1142        Text(super::TextPart),
1143        #[prost(message, tag = "2")]
1144        Reasoning(super::ReasoningPart),
1145    }
1146}
1147/// Text part for user or assistant messages.
1148#[derive(Clone, PartialEq, ::prost::Message)]
1149pub struct TextPart {
1150    #[prost(string, tag = "1")]
1151    pub text: ::prost::alloc::string::String,
1152}
1153/// User-supplied image part.
1154#[derive(Clone, PartialEq, ::prost::Message)]
1155pub struct ImagePart {
1156    /// The base64-encoded image data
1157    #[prost(bytes = "vec", tag = "1")]
1158    pub data: ::prost::alloc::vec::Vec<u8>,
1159    /// The media type of the image (e.g. "image/png", "image/jpeg")
1160    #[prost(string, optional, tag = "2")]
1161    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
1162    /// Optional: the filename of the image
1163    #[prost(string, optional, tag = "3")]
1164    pub filename: ::core::option::Option<::prost::alloc::string::String>,
1165}
1166/// Reasoning part for assistant messages.
1167#[derive(Clone, PartialEq, ::prost::Message)]
1168pub struct ReasoningPart {
1169    #[prost(string, tag = "1")]
1170    pub reasoning: ::prost::alloc::string::String,
1171}
1172/// StreamChatResponse is a discriminated union response to a StreamChatRequest
1173#[derive(Clone, PartialEq, ::prost::Message)]
1174pub struct StreamChatResponse {
1175    #[prost(
1176        oneof = "stream_chat_response::Response",
1177        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10, 11"
1178    )]
1179    pub response: ::core::option::Option<stream_chat_response::Response>,
1180}
1181/// Nested message and enum types in `StreamChatResponse`.
1182pub mod stream_chat_response {
1183    #[derive(Clone, PartialEq, ::prost::Oneof)]
1184    pub enum Response {
1185        #[prost(message, tag = "1")]
1186        Finish(super::Finish),
1187        #[prost(message, tag = "2")]
1188        Error(super::Error),
1189        #[prost(message, tag = "3")]
1190        TextStart(super::TextStart),
1191        #[prost(message, tag = "4")]
1192        TextDelta(super::TextDelta),
1193        #[prost(message, tag = "5")]
1194        TextEnd(super::TextEnd),
1195        #[prost(message, tag = "6")]
1196        ReasoningStart(super::ReasoningStart),
1197        #[prost(message, tag = "7")]
1198        ReasoningDelta(super::ReasoningDelta),
1199        #[prost(message, tag = "8")]
1200        ReasoningEnd(super::ReasoningEnd),
1201        #[prost(message, tag = "10")]
1202        ToolAction(super::ToolAction),
1203        #[prost(message, tag = "11")]
1204        ToolActionConfirmation(super::ToolActionConfirmation),
1205    }
1206}
1207#[derive(Clone, PartialEq, ::prost::Message)]
1208pub struct ToolCallDescription {
1209    #[prost(string, tag = "1")]
1210    pub tool_call_id: ::prost::alloc::string::String,
1211    #[prost(string, tag = "2")]
1212    pub tool_name: ::prost::alloc::string::String,
1213    /// string representation of the proposed tool args for display
1214    #[prost(string, tag = "3")]
1215    pub tool_args_json_string: ::prost::alloc::string::String,
1216    /// used to conditionally render an approval button based the outcome of the tool call
1217    #[prost(enumeration = "ToolCallStatus", tag = "4")]
1218    pub status: i32,
1219}
1220/// Indicates the end of a chat session
1221#[derive(Clone, PartialEq, ::prost::Message)]
1222pub struct Finish {
1223    /// The message ids in order of all generated messages for this agent run
1224    /// These ids can be used to branch a message from that specific message
1225    #[prost(string, repeated, tag = "1")]
1226    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1227    /// In the case that this is the first agent run in a conversation thread, we also
1228    /// return the new conversation title generated
1229    #[prost(string, optional, tag = "2")]
1230    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
1231    #[prost(message, repeated, tag = "3")]
1232    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
1233    #[prost(message, optional, tag = "4")]
1234    pub updated_context: ::core::option::Option<ContextStatus>,
1235}
1236/// An error that occurred during the chat session
1237#[derive(Clone, PartialEq, ::prost::Message)]
1238pub struct Error {
1239    #[prost(string, tag = "1")]
1240    pub message: ::prost::alloc::string::String,
1241}
1242/// Indicates the start of a text message from the agent
1243#[derive(Clone, PartialEq, ::prost::Message)]
1244pub struct TextStart {
1245    /// uniquely identifies the text message (e.g. uuid) so that the client can
1246    /// merge parallel message streams (if it happens).
1247    #[prost(string, tag = "1")]
1248    pub id: ::prost::alloc::string::String,
1249}
1250/// A delta (continuation) of a text message from the agent
1251#[derive(Clone, PartialEq, ::prost::Message)]
1252pub struct TextDelta {
1253    #[prost(string, tag = "1")]
1254    pub id: ::prost::alloc::string::String,
1255    /// The next chunk of text
1256    #[prost(string, tag = "2")]
1257    pub delta: ::prost::alloc::string::String,
1258}
1259/// Indicates the end of a text message from the agent
1260#[derive(Clone, PartialEq, ::prost::Message)]
1261pub struct TextEnd {
1262    #[prost(string, tag = "1")]
1263    pub id: ::prost::alloc::string::String,
1264}
1265/// Indicates the start of a reasoning message from the agent
1266#[derive(Clone, PartialEq, ::prost::Message)]
1267pub struct ReasoningStart {
1268    #[prost(string, tag = "1")]
1269    pub id: ::prost::alloc::string::String,
1270}
1271/// A delta (continuation) of a reasoning message from the agent
1272#[derive(Clone, PartialEq, ::prost::Message)]
1273pub struct ReasoningDelta {
1274    #[prost(string, tag = "1")]
1275    pub id: ::prost::alloc::string::String,
1276    /// The next chunk of reasoning
1277    #[prost(string, tag = "2")]
1278    pub delta: ::prost::alloc::string::String,
1279}
1280/// Indicates the end of a reasoning message from the agent
1281#[derive(Clone, PartialEq, ::prost::Message)]
1282pub struct ReasoningEnd {
1283    #[prost(string, tag = "1")]
1284    pub id: ::prost::alloc::string::String,
1285}
1286/// this is a concise description of a tool call that the agent is making internally
1287/// without revealing too much detail about the tool call, it informs the user what the agent is doing
1288/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
1289/// "Search channels for My Datasource"
1290#[derive(Clone, PartialEq, ::prost::Message)]
1291pub struct ToolAction {
1292    #[prost(string, tag = "1")]
1293    pub id: ::prost::alloc::string::String,
1294    /// "Thought", "Read", "Find", "Look-up", etc.
1295    #[prost(string, tag = "2")]
1296    pub tool_action_verb: ::prost::alloc::string::String,
1297    /// "workbook", "channel", "variable", "panel", etc.
1298    #[prost(string, optional, tag = "3")]
1299    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
1300}
1301/// When we see a ToolAction in the stream, we wait for the corresponding ToolActionConfirmation
1302/// to indicate whether or not the tool call has successfully executed
1303#[derive(Clone, PartialEq, ::prost::Message)]
1304pub struct ToolActionConfirmation {
1305    #[prost(string, tag = "1")]
1306    pub id: ::prost::alloc::string::String,
1307    #[prost(oneof = "tool_action_confirmation::Outcome", tags = "2, 3")]
1308    pub outcome: ::core::option::Option<tool_action_confirmation::Outcome>,
1309}
1310/// Nested message and enum types in `ToolActionConfirmation`.
1311pub mod tool_action_confirmation {
1312    #[derive(Clone, PartialEq, ::prost::Oneof)]
1313    pub enum Outcome {
1314        #[prost(message, tag = "2")]
1315        Success(super::ToolActionSuccess),
1316        #[prost(message, tag = "3")]
1317        Failure(super::ToolActionFailure),
1318    }
1319}
1320#[derive(Clone, PartialEq, ::prost::Message)]
1321pub struct ToolActionSuccess {
1322    #[prost(string, tag = "1")]
1323    pub tool_success_message: ::prost::alloc::string::String,
1324}
1325#[derive(Clone, PartialEq, ::prost::Message)]
1326pub struct ToolActionFailure {
1327    #[prost(string, tag = "1")]
1328    pub tool_error_message: ::prost::alloc::string::String,
1329}
1330/// ContextStatus represents the current token usage of a conversation relative to the selected model's context limit.
1331/// NOTE: A curr_token_count of zero means the token count is not yet known (e.g., no prompt has been sent yet,
1332/// or the conversation was just branched). It does not necessarily mean the conversation is empty.
1333#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1334pub struct ContextStatus {
1335    #[prost(int32, tag = "1")]
1336    pub curr_token_count: i32,
1337    #[prost(int32, tag = "2")]
1338    pub model_context_limit: i32,
1339}
1340#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1341#[repr(i32)]
1342pub enum ToolCallStatus {
1343    Unspecified = 0,
1344    Approved = 1,
1345    Denied = 2,
1346    AwaitingApproval = 3,
1347}
1348impl ToolCallStatus {
1349    /// String value of the enum field names used in the ProtoBuf definition.
1350    ///
1351    /// The values are not transformed in any way and thus are considered stable
1352    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1353    pub fn as_str_name(&self) -> &'static str {
1354        match self {
1355            Self::Unspecified => "TOOL_CALL_STATUS_UNSPECIFIED",
1356            Self::Approved => "TOOL_CALL_STATUS_APPROVED",
1357            Self::Denied => "TOOL_CALL_STATUS_DENIED",
1358            Self::AwaitingApproval => "TOOL_CALL_STATUS_AWAITING_APPROVAL",
1359        }
1360    }
1361    /// Creates an enum from field names used in the ProtoBuf definition.
1362    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1363        match value {
1364            "TOOL_CALL_STATUS_UNSPECIFIED" => Some(Self::Unspecified),
1365            "TOOL_CALL_STATUS_APPROVED" => Some(Self::Approved),
1366            "TOOL_CALL_STATUS_DENIED" => Some(Self::Denied),
1367            "TOOL_CALL_STATUS_AWAITING_APPROVAL" => Some(Self::AwaitingApproval),
1368            _ => None,
1369        }
1370    }
1371}
1372/// Generated client implementations.
1373pub mod ai_agent_service_client {
1374    #![allow(
1375        unused_variables,
1376        dead_code,
1377        missing_docs,
1378        clippy::wildcard_imports,
1379        clippy::let_unit_value,
1380    )]
1381    use tonic::codegen::*;
1382    use tonic::codegen::http::Uri;
1383    /// AIAgentService provides AI-powered assistance for general operations
1384    #[derive(Debug, Clone)]
1385    pub struct AiAgentServiceClient<T> {
1386        inner: tonic::client::Grpc<T>,
1387    }
1388    impl AiAgentServiceClient<tonic::transport::Channel> {
1389        /// Attempt to create a new client by connecting to a given endpoint.
1390        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1391        where
1392            D: TryInto<tonic::transport::Endpoint>,
1393            D::Error: Into<StdError>,
1394        {
1395            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1396            Ok(Self::new(conn))
1397        }
1398    }
1399    impl<T> AiAgentServiceClient<T>
1400    where
1401        T: tonic::client::GrpcService<tonic::body::Body>,
1402        T::Error: Into<StdError>,
1403        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1404        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1405    {
1406        pub fn new(inner: T) -> Self {
1407            let inner = tonic::client::Grpc::new(inner);
1408            Self { inner }
1409        }
1410        pub fn with_origin(inner: T, origin: Uri) -> Self {
1411            let inner = tonic::client::Grpc::with_origin(inner, origin);
1412            Self { inner }
1413        }
1414        pub fn with_interceptor<F>(
1415            inner: T,
1416            interceptor: F,
1417        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1418        where
1419            F: tonic::service::Interceptor,
1420            T::ResponseBody: Default,
1421            T: tonic::codegen::Service<
1422                http::Request<tonic::body::Body>,
1423                Response = http::Response<
1424                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1425                >,
1426            >,
1427            <T as tonic::codegen::Service<
1428                http::Request<tonic::body::Body>,
1429            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1430        {
1431            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1432        }
1433        /// Compress requests with the given encoding.
1434        ///
1435        /// This requires the server to support it otherwise it might respond with an
1436        /// error.
1437        #[must_use]
1438        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1439            self.inner = self.inner.send_compressed(encoding);
1440            self
1441        }
1442        /// Enable decompressing responses.
1443        #[must_use]
1444        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1445            self.inner = self.inner.accept_compressed(encoding);
1446            self
1447        }
1448        /// Limits the maximum size of a decoded message.
1449        ///
1450        /// Default: `4MB`
1451        #[must_use]
1452        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1453            self.inner = self.inner.max_decoding_message_size(limit);
1454            self
1455        }
1456        /// Limits the maximum size of an encoded message.
1457        ///
1458        /// Default: `usize::MAX`
1459        #[must_use]
1460        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1461            self.inner = self.inner.max_encoding_message_size(limit);
1462            self
1463        }
1464        /// StreamChat handles bidirectional streaming chat for AI agent
1465        pub async fn stream_chat(
1466            &mut self,
1467            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1468        ) -> std::result::Result<
1469            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1470            tonic::Status,
1471        > {
1472            self.inner
1473                .ready()
1474                .await
1475                .map_err(|e| {
1476                    tonic::Status::unknown(
1477                        format!("Service was not ready: {}", e.into()),
1478                    )
1479                })?;
1480            let codec = tonic::codec::ProstCodec::default();
1481            let path = http::uri::PathAndQuery::from_static(
1482                "/nominal.ai.v1.AIAgentService/StreamChat",
1483            );
1484            let mut req = request.into_request();
1485            req.extensions_mut()
1486                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1487            self.inner.server_streaming(req, path, codec).await
1488        }
1489        /// GetConversation [DEPRACATED] handles getting a complete conversation list, with an optional limit on number of messages returned
1490        #[deprecated]
1491        pub async fn get_conversation(
1492            &mut self,
1493            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1494        ) -> std::result::Result<
1495            tonic::Response<super::GetConversationResponse>,
1496            tonic::Status,
1497        > {
1498            self.inner
1499                .ready()
1500                .await
1501                .map_err(|e| {
1502                    tonic::Status::unknown(
1503                        format!("Service was not ready: {}", e.into()),
1504                    )
1505                })?;
1506            let codec = tonic::codec::ProstCodec::default();
1507            let path = http::uri::PathAndQuery::from_static(
1508                "/nominal.ai.v1.AIAgentService/GetConversation",
1509            );
1510            let mut req = request.into_request();
1511            req.extensions_mut()
1512                .insert(
1513                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1514                );
1515            self.inner.unary(req, path, codec).await
1516        }
1517        /// GetConversationMetadata handles getting the conversation metadata like title, current token count etc
1518        pub async fn get_conversation_metadata(
1519            &mut self,
1520            request: impl tonic::IntoRequest<super::GetConversationMetadataRequest>,
1521        ) -> std::result::Result<
1522            tonic::Response<super::GetConversationMetadataResponse>,
1523            tonic::Status,
1524        > {
1525            self.inner
1526                .ready()
1527                .await
1528                .map_err(|e| {
1529                    tonic::Status::unknown(
1530                        format!("Service was not ready: {}", e.into()),
1531                    )
1532                })?;
1533            let codec = tonic::codec::ProstCodec::default();
1534            let path = http::uri::PathAndQuery::from_static(
1535                "/nominal.ai.v1.AIAgentService/GetConversationMetadata",
1536            );
1537            let mut req = request.into_request();
1538            req.extensions_mut()
1539                .insert(
1540                    GrpcMethod::new(
1541                        "nominal.ai.v1.AIAgentService",
1542                        "GetConversationMetadata",
1543                    ),
1544                );
1545            self.inner.unary(req, path, codec).await
1546        }
1547        /// GetConversationMessages handles retrieving the set of conversation messages. Supports pagination
1548        pub async fn get_conversation_messages(
1549            &mut self,
1550            request: impl tonic::IntoRequest<super::GetConversationMessagesRequest>,
1551        ) -> std::result::Result<
1552            tonic::Response<super::GetConversationMessagesResponse>,
1553            tonic::Status,
1554        > {
1555            self.inner
1556                .ready()
1557                .await
1558                .map_err(|e| {
1559                    tonic::Status::unknown(
1560                        format!("Service was not ready: {}", e.into()),
1561                    )
1562                })?;
1563            let codec = tonic::codec::ProstCodec::default();
1564            let path = http::uri::PathAndQuery::from_static(
1565                "/nominal.ai.v1.AIAgentService/GetConversationMessages",
1566            );
1567            let mut req = request.into_request();
1568            req.extensions_mut()
1569                .insert(
1570                    GrpcMethod::new(
1571                        "nominal.ai.v1.AIAgentService",
1572                        "GetConversationMessages",
1573                    ),
1574                );
1575            self.inner.unary(req, path, codec).await
1576        }
1577        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1578        pub async fn list_conversations(
1579            &mut self,
1580            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1581        ) -> std::result::Result<
1582            tonic::Response<super::ListConversationsResponse>,
1583            tonic::Status,
1584        > {
1585            self.inner
1586                .ready()
1587                .await
1588                .map_err(|e| {
1589                    tonic::Status::unknown(
1590                        format!("Service was not ready: {}", e.into()),
1591                    )
1592                })?;
1593            let codec = tonic::codec::ProstCodec::default();
1594            let path = http::uri::PathAndQuery::from_static(
1595                "/nominal.ai.v1.AIAgentService/ListConversations",
1596            );
1597            let mut req = request.into_request();
1598            req.extensions_mut()
1599                .insert(
1600                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1601                );
1602            self.inner.unary(req, path, codec).await
1603        }
1604        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1605        pub async fn create_conversation(
1606            &mut self,
1607            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1608        ) -> std::result::Result<
1609            tonic::Response<super::CreateConversationResponse>,
1610            tonic::Status,
1611        > {
1612            self.inner
1613                .ready()
1614                .await
1615                .map_err(|e| {
1616                    tonic::Status::unknown(
1617                        format!("Service was not ready: {}", e.into()),
1618                    )
1619                })?;
1620            let codec = tonic::codec::ProstCodec::default();
1621            let path = http::uri::PathAndQuery::from_static(
1622                "/nominal.ai.v1.AIAgentService/CreateConversation",
1623            );
1624            let mut req = request.into_request();
1625            req.extensions_mut()
1626                .insert(
1627                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1628                );
1629            self.inner.unary(req, path, codec).await
1630        }
1631        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1632        pub async fn update_conversation_metadata(
1633            &mut self,
1634            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1635        ) -> std::result::Result<
1636            tonic::Response<super::UpdateConversationMetadataResponse>,
1637            tonic::Status,
1638        > {
1639            self.inner
1640                .ready()
1641                .await
1642                .map_err(|e| {
1643                    tonic::Status::unknown(
1644                        format!("Service was not ready: {}", e.into()),
1645                    )
1646                })?;
1647            let codec = tonic::codec::ProstCodec::default();
1648            let path = http::uri::PathAndQuery::from_static(
1649                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1650            );
1651            let mut req = request.into_request();
1652            req.extensions_mut()
1653                .insert(
1654                    GrpcMethod::new(
1655                        "nominal.ai.v1.AIAgentService",
1656                        "UpdateConversationMetadata",
1657                    ),
1658                );
1659            self.inner.unary(req, path, codec).await
1660        }
1661        /// DeleteConversation handles deleting a specific conversation by conversation rid
1662        pub async fn delete_conversation(
1663            &mut self,
1664            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1665        ) -> std::result::Result<
1666            tonic::Response<super::DeleteConversationResponse>,
1667            tonic::Status,
1668        > {
1669            self.inner
1670                .ready()
1671                .await
1672                .map_err(|e| {
1673                    tonic::Status::unknown(
1674                        format!("Service was not ready: {}", e.into()),
1675                    )
1676                })?;
1677            let codec = tonic::codec::ProstCodec::default();
1678            let path = http::uri::PathAndQuery::from_static(
1679                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1680            );
1681            let mut req = request.into_request();
1682            req.extensions_mut()
1683                .insert(
1684                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1685                );
1686            self.inner.unary(req, path, codec).await
1687        }
1688        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1689        pub async fn get_snapshot_rid_by_user_message_id(
1690            &mut self,
1691            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1692        ) -> std::result::Result<
1693            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1694            tonic::Status,
1695        > {
1696            self.inner
1697                .ready()
1698                .await
1699                .map_err(|e| {
1700                    tonic::Status::unknown(
1701                        format!("Service was not ready: {}", e.into()),
1702                    )
1703                })?;
1704            let codec = tonic::codec::ProstCodec::default();
1705            let path = http::uri::PathAndQuery::from_static(
1706                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1707            );
1708            let mut req = request.into_request();
1709            req.extensions_mut()
1710                .insert(
1711                    GrpcMethod::new(
1712                        "nominal.ai.v1.AIAgentService",
1713                        "GetSnapshotRidByUserMessageId",
1714                    ),
1715                );
1716            self.inner.unary(req, path, codec).await
1717        }
1718        /// CompactConversation handles compacting the conversation context into approximately half its original size
1719        pub async fn compact_conversation(
1720            &mut self,
1721            request: impl tonic::IntoRequest<super::CompactConversationRequest>,
1722        ) -> std::result::Result<
1723            tonic::Response<super::CompactConversationResponse>,
1724            tonic::Status,
1725        > {
1726            self.inner
1727                .ready()
1728                .await
1729                .map_err(|e| {
1730                    tonic::Status::unknown(
1731                        format!("Service was not ready: {}", e.into()),
1732                    )
1733                })?;
1734            let codec = tonic::codec::ProstCodec::default();
1735            let path = http::uri::PathAndQuery::from_static(
1736                "/nominal.ai.v1.AIAgentService/CompactConversation",
1737            );
1738            let mut req = request.into_request();
1739            req.extensions_mut()
1740                .insert(
1741                    GrpcMethod::new(
1742                        "nominal.ai.v1.AIAgentService",
1743                        "CompactConversation",
1744                    ),
1745                );
1746            self.inner.unary(req, path, codec).await
1747        }
1748    }
1749}