Skip to main content

nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct ClassifyErrorRequest {
4    /// The error message or description to classify
5    #[prost(string, tag = "1")]
6    pub error_message: ::prost::alloc::string::String,
7}
8#[derive(Clone, PartialEq, ::prost::Message)]
9pub struct ClassifyErrorResponse {
10    /// The classification result
11    #[prost(enumeration = "ErrorClassification", tag = "1")]
12    pub classification: i32,
13    /// Explanation for why this classification was chosen
14    #[prost(string, tag = "2")]
15    pub reason: ::prost::alloc::string::String,
16}
17#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
18#[repr(i32)]
19pub enum ErrorClassification {
20    Unspecified = 0,
21    Client = 1,
22    Server = 2,
23}
24impl ErrorClassification {
25    /// String value of the enum field names used in the ProtoBuf definition.
26    ///
27    /// The values are not transformed in any way and thus are considered stable
28    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
29    pub fn as_str_name(&self) -> &'static str {
30        match self {
31            Self::Unspecified => "ERROR_CLASSIFICATION_UNSPECIFIED",
32            Self::Client => "ERROR_CLASSIFICATION_CLIENT",
33            Self::Server => "ERROR_CLASSIFICATION_SERVER",
34        }
35    }
36    /// Creates an enum from field names used in the ProtoBuf definition.
37    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
38        match value {
39            "ERROR_CLASSIFICATION_UNSPECIFIED" => Some(Self::Unspecified),
40            "ERROR_CLASSIFICATION_CLIENT" => Some(Self::Client),
41            "ERROR_CLASSIFICATION_SERVER" => Some(Self::Server),
42            _ => None,
43        }
44    }
45}
46/// Generated client implementations.
47pub mod data_ingestion_error_classifier_service_client {
48    #![allow(
49        unused_variables,
50        dead_code,
51        missing_docs,
52        clippy::wildcard_imports,
53        clippy::let_unit_value,
54    )]
55    use tonic::codegen::*;
56    use tonic::codegen::http::Uri;
57    /// DataIngestionErrorClassifierService classifies data ingestion errors as CLIENT or SERVER issues.
58    /// CLIENT errors are customer data issues (empty files, schema conflicts, timestamp problems, etc.)
59    /// SERVER errors are infrastructure issues (internal errors, timeouts, capacity limits, etc.)
60    #[derive(Debug, Clone)]
61    pub struct DataIngestionErrorClassifierServiceClient<T> {
62        inner: tonic::client::Grpc<T>,
63    }
64    impl DataIngestionErrorClassifierServiceClient<tonic::transport::Channel> {
65        /// Attempt to create a new client by connecting to a given endpoint.
66        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
67        where
68            D: TryInto<tonic::transport::Endpoint>,
69            D::Error: Into<StdError>,
70        {
71            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
72            Ok(Self::new(conn))
73        }
74    }
75    impl<T> DataIngestionErrorClassifierServiceClient<T>
76    where
77        T: tonic::client::GrpcService<tonic::body::Body>,
78        T::Error: Into<StdError>,
79        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
80        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
81    {
82        pub fn new(inner: T) -> Self {
83            let inner = tonic::client::Grpc::new(inner);
84            Self { inner }
85        }
86        pub fn with_origin(inner: T, origin: Uri) -> Self {
87            let inner = tonic::client::Grpc::with_origin(inner, origin);
88            Self { inner }
89        }
90        pub fn with_interceptor<F>(
91            inner: T,
92            interceptor: F,
93        ) -> DataIngestionErrorClassifierServiceClient<InterceptedService<T, F>>
94        where
95            F: tonic::service::Interceptor,
96            T::ResponseBody: Default,
97            T: tonic::codegen::Service<
98                http::Request<tonic::body::Body>,
99                Response = http::Response<
100                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
101                >,
102            >,
103            <T as tonic::codegen::Service<
104                http::Request<tonic::body::Body>,
105            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
106        {
107            DataIngestionErrorClassifierServiceClient::new(
108                InterceptedService::new(inner, interceptor),
109            )
110        }
111        /// Compress requests with the given encoding.
112        ///
113        /// This requires the server to support it otherwise it might respond with an
114        /// error.
115        #[must_use]
116        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
117            self.inner = self.inner.send_compressed(encoding);
118            self
119        }
120        /// Enable decompressing responses.
121        #[must_use]
122        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
123            self.inner = self.inner.accept_compressed(encoding);
124            self
125        }
126        /// Limits the maximum size of a decoded message.
127        ///
128        /// Default: `4MB`
129        #[must_use]
130        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
131            self.inner = self.inner.max_decoding_message_size(limit);
132            self
133        }
134        /// Limits the maximum size of an encoded message.
135        ///
136        /// Default: `usize::MAX`
137        #[must_use]
138        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
139            self.inner = self.inner.max_encoding_message_size(limit);
140            self
141        }
142        /// ClassifyError analyzes an error message and classifies it as CLIENT or SERVER.
143        pub async fn classify_error(
144            &mut self,
145            request: impl tonic::IntoRequest<super::ClassifyErrorRequest>,
146        ) -> std::result::Result<
147            tonic::Response<super::ClassifyErrorResponse>,
148            tonic::Status,
149        > {
150            self.inner
151                .ready()
152                .await
153                .map_err(|e| {
154                    tonic::Status::unknown(
155                        format!("Service was not ready: {}", e.into()),
156                    )
157                })?;
158            let codec = tonic::codec::ProstCodec::default();
159            let path = http::uri::PathAndQuery::from_static(
160                "/nominal.ai.v1.DataIngestionErrorClassifierService/ClassifyError",
161            );
162            let mut req = request.into_request();
163            req.extensions_mut()
164                .insert(
165                    GrpcMethod::new(
166                        "nominal.ai.v1.DataIngestionErrorClassifierService",
167                        "ClassifyError",
168                    ),
169                );
170            self.inner.unary(req, path, codec).await
171        }
172    }
173}
174#[derive(Clone, Copy, PartialEq, ::prost::Message)]
175pub struct GetProviderStatusRequest {}
176#[derive(Clone, Copy, PartialEq, ::prost::Message)]
177pub struct GetProviderStatusResponse {
178    /// Timestamp when the last status was determined
179    #[prost(message, optional, tag = "1")]
180    pub timestamp: ::core::option::Option<
181        super::super::super::google::protobuf::Timestamp,
182    >,
183    /// Status of the most recent health check probe
184    #[prost(message, optional, tag = "2")]
185    pub last_status: ::core::option::Option<ProviderStatus>,
186    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
187    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
188    #[deprecated]
189    #[prost(message, optional, tag = "3")]
190    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
191    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
192    #[prost(message, optional, tag = "4")]
193    pub aggregated_status: ::core::option::Option<ProviderStatus>,
194}
195#[derive(Clone, Copy, PartialEq, ::prost::Message)]
196pub struct ProviderStatus {
197    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
198    pub status: ::core::option::Option<provider_status::Status>,
199}
200/// Nested message and enum types in `ProviderStatus`.
201pub mod provider_status {
202    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
203    pub enum Status {
204        #[prost(message, tag = "1")]
205        Healthy(super::Healthy),
206        #[prost(message, tag = "2")]
207        Degraded(super::Degraded),
208    }
209}
210#[derive(Clone, Copy, PartialEq, ::prost::Message)]
211pub struct Healthy {}
212#[derive(Clone, Copy, PartialEq, ::prost::Message)]
213pub struct Degraded {
214    #[prost(enumeration = "DegradationReason", tag = "1")]
215    pub reason: i32,
216}
217#[derive(Clone, Copy, PartialEq, ::prost::Message)]
218pub struct ProviderMetrics {
219    #[prost(int32, tag = "1")]
220    pub time_to_first_token_ms: i32,
221    #[prost(int32, tag = "2")]
222    pub total_time_ms: i32,
223}
224#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
225#[repr(i32)]
226pub enum DegradationReason {
227    Unspecified = 0,
228    HighLatency = 1,
229    Failures = 2,
230    HighLatencyAndFailures = 3,
231}
232impl DegradationReason {
233    /// String value of the enum field names used in the ProtoBuf definition.
234    ///
235    /// The values are not transformed in any way and thus are considered stable
236    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
237    pub fn as_str_name(&self) -> &'static str {
238        match self {
239            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
240            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
241            Self::Failures => "DEGRADATION_REASON_FAILURES",
242            Self::HighLatencyAndFailures => {
243                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
244            }
245        }
246    }
247    /// Creates an enum from field names used in the ProtoBuf definition.
248    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
249        match value {
250            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
251            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
252            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
253            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
254                Some(Self::HighLatencyAndFailures)
255            }
256            _ => None,
257        }
258    }
259}
260/// Generated client implementations.
261pub mod model_provider_health_service_client {
262    #![allow(
263        unused_variables,
264        dead_code,
265        missing_docs,
266        clippy::wildcard_imports,
267        clippy::let_unit_value,
268    )]
269    use tonic::codegen::*;
270    use tonic::codegen::http::Uri;
271    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
272    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
273    /// independent of the complexity of user prompts.
274    #[derive(Debug, Clone)]
275    pub struct ModelProviderHealthServiceClient<T> {
276        inner: tonic::client::Grpc<T>,
277    }
278    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
279        /// Attempt to create a new client by connecting to a given endpoint.
280        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
281        where
282            D: TryInto<tonic::transport::Endpoint>,
283            D::Error: Into<StdError>,
284        {
285            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
286            Ok(Self::new(conn))
287        }
288    }
289    impl<T> ModelProviderHealthServiceClient<T>
290    where
291        T: tonic::client::GrpcService<tonic::body::Body>,
292        T::Error: Into<StdError>,
293        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
294        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
295    {
296        pub fn new(inner: T) -> Self {
297            let inner = tonic::client::Grpc::new(inner);
298            Self { inner }
299        }
300        pub fn with_origin(inner: T, origin: Uri) -> Self {
301            let inner = tonic::client::Grpc::with_origin(inner, origin);
302            Self { inner }
303        }
304        pub fn with_interceptor<F>(
305            inner: T,
306            interceptor: F,
307        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
308        where
309            F: tonic::service::Interceptor,
310            T::ResponseBody: Default,
311            T: tonic::codegen::Service<
312                http::Request<tonic::body::Body>,
313                Response = http::Response<
314                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
315                >,
316            >,
317            <T as tonic::codegen::Service<
318                http::Request<tonic::body::Body>,
319            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
320        {
321            ModelProviderHealthServiceClient::new(
322                InterceptedService::new(inner, interceptor),
323            )
324        }
325        /// Compress requests with the given encoding.
326        ///
327        /// This requires the server to support it otherwise it might respond with an
328        /// error.
329        #[must_use]
330        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
331            self.inner = self.inner.send_compressed(encoding);
332            self
333        }
334        /// Enable decompressing responses.
335        #[must_use]
336        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
337            self.inner = self.inner.accept_compressed(encoding);
338            self
339        }
340        /// Limits the maximum size of a decoded message.
341        ///
342        /// Default: `4MB`
343        #[must_use]
344        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
345            self.inner = self.inner.max_decoding_message_size(limit);
346            self
347        }
348        /// Limits the maximum size of an encoded message.
349        ///
350        /// Default: `usize::MAX`
351        #[must_use]
352        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
353            self.inner = self.inner.max_encoding_message_size(limit);
354            self
355        }
356        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
357        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
358        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
359        pub async fn get_provider_status(
360            &mut self,
361            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
362        ) -> std::result::Result<
363            tonic::Response<super::GetProviderStatusResponse>,
364            tonic::Status,
365        > {
366            self.inner
367                .ready()
368                .await
369                .map_err(|e| {
370                    tonic::Status::unknown(
371                        format!("Service was not ready: {}", e.into()),
372                    )
373                })?;
374            let codec = tonic::codec::ProstCodec::default();
375            let path = http::uri::PathAndQuery::from_static(
376                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
377            );
378            let mut req = request.into_request();
379            req.extensions_mut()
380                .insert(
381                    GrpcMethod::new(
382                        "nominal.ai.v1.ModelProviderHealthService",
383                        "GetProviderStatus",
384                    ),
385                );
386            self.inner.unary(req, path, codec).await
387        }
388    }
389}
390/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
391/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct CreateOrUpdateKnowledgeBaseRequest {
394    #[prost(string, tag = "1")]
395    pub attachment_rid: ::prost::alloc::string::String,
396    /// summary of the knowledge base, will be used by the LLM to decide when to use it
397    #[prost(string, tag = "2")]
398    pub summary_description: ::prost::alloc::string::String,
399    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
400    pub r#type: ::core::option::Option<i32>,
401}
402/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
403#[derive(Clone, PartialEq, ::prost::Message)]
404pub struct CreateOrUpdateKnowledgeBaseResponse {
405    #[prost(string, tag = "1")]
406    pub knowledge_base_rid: ::prost::alloc::string::String,
407}
408/// KnowledgeBase represents a knowledge base entry
409#[derive(Clone, PartialEq, ::prost::Message)]
410pub struct KnowledgeBase {
411    #[prost(string, tag = "1")]
412    pub knowledge_base_rid: ::prost::alloc::string::String,
413    #[prost(string, tag = "2")]
414    pub attachment_rid: ::prost::alloc::string::String,
415    #[prost(string, tag = "3")]
416    pub workspace_rid: ::prost::alloc::string::String,
417    #[prost(string, tag = "4")]
418    pub summary_description: ::prost::alloc::string::String,
419    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
420    pub r#type: i32,
421    #[prost(int32, tag = "6")]
422    pub version: i32,
423}
424#[derive(Clone, PartialEq, ::prost::Message)]
425pub struct ListRequest {
426    #[prost(string, tag = "1")]
427    pub workspace_rid: ::prost::alloc::string::String,
428}
429#[derive(Clone, PartialEq, ::prost::Message)]
430pub struct ListResponse {
431    #[prost(message, repeated, tag = "1")]
432    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
433}
434#[derive(Clone, PartialEq, ::prost::Message)]
435pub struct DeleteRequest {
436    #[prost(string, tag = "1")]
437    pub knowledge_base_rid: ::prost::alloc::string::String,
438}
439#[derive(Clone, Copy, PartialEq, ::prost::Message)]
440pub struct DeleteResponse {
441    #[prost(bool, tag = "1")]
442    pub success: bool,
443}
444#[derive(Clone, PartialEq, ::prost::Message)]
445pub struct GetBatchRequest {
446    #[prost(string, repeated, tag = "1")]
447    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
448}
449#[derive(Clone, PartialEq, ::prost::Message)]
450pub struct GetBatchResponse {
451    #[prost(message, repeated, tag = "1")]
452    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
453}
454/// generate summary description is intentionally going to return the generated description to the frontend
455/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
456#[derive(Clone, PartialEq, ::prost::Message)]
457pub struct GenerateSummaryDescriptionRequest {
458    #[prost(string, tag = "1")]
459    pub attachment_rid: ::prost::alloc::string::String,
460}
461#[derive(Clone, PartialEq, ::prost::Message)]
462pub struct GenerateSummaryDescriptionResponse {
463    #[prost(string, tag = "1")]
464    pub summary_description: ::prost::alloc::string::String,
465}
466/// KnowledgeBaseType defines the types of knowledge base
467#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
468#[repr(i32)]
469pub enum KnowledgeBaseType {
470    /// defaults to PROMPT
471    Unspecified = 0,
472    /// knowledge base gets added directly to prompt (needs to be small enough!)
473    Prompt = 1,
474    /// knowledge base gets used via vector search on embeddings
475    Embedding = 2,
476}
477impl KnowledgeBaseType {
478    /// String value of the enum field names used in the ProtoBuf definition.
479    ///
480    /// The values are not transformed in any way and thus are considered stable
481    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
482    pub fn as_str_name(&self) -> &'static str {
483        match self {
484            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
485            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
486            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
487        }
488    }
489    /// Creates an enum from field names used in the ProtoBuf definition.
490    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
491        match value {
492            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
493            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
494            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
495            _ => None,
496        }
497    }
498}
499/// Generated client implementations.
500pub mod knowledge_base_service_client {
501    #![allow(
502        unused_variables,
503        dead_code,
504        missing_docs,
505        clippy::wildcard_imports,
506        clippy::let_unit_value,
507    )]
508    use tonic::codegen::*;
509    use tonic::codegen::http::Uri;
510    /// KnowledgeBaseService provides AI-powered knowledge base management
511    #[derive(Debug, Clone)]
512    pub struct KnowledgeBaseServiceClient<T> {
513        inner: tonic::client::Grpc<T>,
514    }
515    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
516        /// Attempt to create a new client by connecting to a given endpoint.
517        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
518        where
519            D: TryInto<tonic::transport::Endpoint>,
520            D::Error: Into<StdError>,
521        {
522            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
523            Ok(Self::new(conn))
524        }
525    }
526    impl<T> KnowledgeBaseServiceClient<T>
527    where
528        T: tonic::client::GrpcService<tonic::body::Body>,
529        T::Error: Into<StdError>,
530        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
531        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
532    {
533        pub fn new(inner: T) -> Self {
534            let inner = tonic::client::Grpc::new(inner);
535            Self { inner }
536        }
537        pub fn with_origin(inner: T, origin: Uri) -> Self {
538            let inner = tonic::client::Grpc::with_origin(inner, origin);
539            Self { inner }
540        }
541        pub fn with_interceptor<F>(
542            inner: T,
543            interceptor: F,
544        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
545        where
546            F: tonic::service::Interceptor,
547            T::ResponseBody: Default,
548            T: tonic::codegen::Service<
549                http::Request<tonic::body::Body>,
550                Response = http::Response<
551                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
552                >,
553            >,
554            <T as tonic::codegen::Service<
555                http::Request<tonic::body::Body>,
556            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
557        {
558            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
559        }
560        /// Compress requests with the given encoding.
561        ///
562        /// This requires the server to support it otherwise it might respond with an
563        /// error.
564        #[must_use]
565        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
566            self.inner = self.inner.send_compressed(encoding);
567            self
568        }
569        /// Enable decompressing responses.
570        #[must_use]
571        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
572            self.inner = self.inner.accept_compressed(encoding);
573            self
574        }
575        /// Limits the maximum size of a decoded message.
576        ///
577        /// Default: `4MB`
578        #[must_use]
579        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
580            self.inner = self.inner.max_decoding_message_size(limit);
581            self
582        }
583        /// Limits the maximum size of an encoded message.
584        ///
585        /// Default: `usize::MAX`
586        #[must_use]
587        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
588            self.inner = self.inner.max_encoding_message_size(limit);
589            self
590        }
591        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
592        pub async fn create_or_update_knowledge_base(
593            &mut self,
594            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
595        ) -> std::result::Result<
596            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
597            tonic::Status,
598        > {
599            self.inner
600                .ready()
601                .await
602                .map_err(|e| {
603                    tonic::Status::unknown(
604                        format!("Service was not ready: {}", e.into()),
605                    )
606                })?;
607            let codec = tonic::codec::ProstCodec::default();
608            let path = http::uri::PathAndQuery::from_static(
609                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
610            );
611            let mut req = request.into_request();
612            req.extensions_mut()
613                .insert(
614                    GrpcMethod::new(
615                        "nominal.ai.v1.KnowledgeBaseService",
616                        "CreateOrUpdateKnowledgeBase",
617                    ),
618                );
619            self.inner.unary(req, path, codec).await
620        }
621        /// List returns all knowledge bases in the specified workspace
622        pub async fn list(
623            &mut self,
624            request: impl tonic::IntoRequest<super::ListRequest>,
625        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
626            self.inner
627                .ready()
628                .await
629                .map_err(|e| {
630                    tonic::Status::unknown(
631                        format!("Service was not ready: {}", e.into()),
632                    )
633                })?;
634            let codec = tonic::codec::ProstCodec::default();
635            let path = http::uri::PathAndQuery::from_static(
636                "/nominal.ai.v1.KnowledgeBaseService/List",
637            );
638            let mut req = request.into_request();
639            req.extensions_mut()
640                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
641            self.inner.unary(req, path, codec).await
642        }
643        /// Delete removes a knowledge base by its RID
644        pub async fn delete(
645            &mut self,
646            request: impl tonic::IntoRequest<super::DeleteRequest>,
647        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
648            self.inner
649                .ready()
650                .await
651                .map_err(|e| {
652                    tonic::Status::unknown(
653                        format!("Service was not ready: {}", e.into()),
654                    )
655                })?;
656            let codec = tonic::codec::ProstCodec::default();
657            let path = http::uri::PathAndQuery::from_static(
658                "/nominal.ai.v1.KnowledgeBaseService/Delete",
659            );
660            let mut req = request.into_request();
661            req.extensions_mut()
662                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
663            self.inner.unary(req, path, codec).await
664        }
665        /// GetBatch retrieves multiple knowledge bases by their RIDs
666        pub async fn get_batch(
667            &mut self,
668            request: impl tonic::IntoRequest<super::GetBatchRequest>,
669        ) -> std::result::Result<
670            tonic::Response<super::GetBatchResponse>,
671            tonic::Status,
672        > {
673            self.inner
674                .ready()
675                .await
676                .map_err(|e| {
677                    tonic::Status::unknown(
678                        format!("Service was not ready: {}", e.into()),
679                    )
680                })?;
681            let codec = tonic::codec::ProstCodec::default();
682            let path = http::uri::PathAndQuery::from_static(
683                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
684            );
685            let mut req = request.into_request();
686            req.extensions_mut()
687                .insert(
688                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
689                );
690            self.inner.unary(req, path, codec).await
691        }
692        /// GenerateSummaryDescription generates a summary description for an attachment rid
693        pub async fn generate_summary_description(
694            &mut self,
695            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
696        ) -> std::result::Result<
697            tonic::Response<super::GenerateSummaryDescriptionResponse>,
698            tonic::Status,
699        > {
700            self.inner
701                .ready()
702                .await
703                .map_err(|e| {
704                    tonic::Status::unknown(
705                        format!("Service was not ready: {}", e.into()),
706                    )
707                })?;
708            let codec = tonic::codec::ProstCodec::default();
709            let path = http::uri::PathAndQuery::from_static(
710                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
711            );
712            let mut req = request.into_request();
713            req.extensions_mut()
714                .insert(
715                    GrpcMethod::new(
716                        "nominal.ai.v1.KnowledgeBaseService",
717                        "GenerateSummaryDescription",
718                    ),
719                );
720            self.inner.unary(req, path, codec).await
721        }
722    }
723}
724#[derive(Clone, PartialEq, ::prost::Message)]
725pub struct GetSnapshotRidByUserMessageIdRequest {
726    #[prost(string, tag = "1")]
727    pub conversation_rid: ::prost::alloc::string::String,
728    #[prost(string, tag = "2")]
729    pub message_id: ::prost::alloc::string::String,
730}
731/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
732/// This occurs in the instance where a message was sent in a non-workbook context
733#[derive(Clone, PartialEq, ::prost::Message)]
734pub struct GetSnapshotRidByUserMessageIdResponse {
735    #[prost(string, optional, tag = "1")]
736    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
737}
738/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
739#[derive(Clone, Copy, PartialEq, ::prost::Message)]
740pub struct ReadOnlyMode {}
741/// EditMode configures edit mode where all tools are available
742#[derive(Clone, Copy, PartialEq, ::prost::Message)]
743pub struct EditMode {
744    /// when set to true, we auto accept edits for any tools typically requiring approval
745    #[prost(bool, optional, tag = "1")]
746    pub auto_accept: ::core::option::Option<bool>,
747}
748/// ConversationMode specifies the mode of the conversation
749#[derive(Clone, Copy, PartialEq, ::prost::Message)]
750pub struct ConversationMode {
751    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
752    pub mode: ::core::option::Option<conversation_mode::Mode>,
753}
754/// Nested message and enum types in `ConversationMode`.
755pub mod conversation_mode {
756    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
757    pub enum Mode {
758        #[prost(message, tag = "1")]
759        ReadOnly(super::ReadOnlyMode),
760        #[prost(message, tag = "2")]
761        Edit(super::EditMode),
762    }
763}
764/// When the agent makes a request to use a tool, the user responds
765/// with one of these for every request - mapping a tool id to its approval/denial
766#[derive(Clone, PartialEq, ::prost::Message)]
767pub struct ToolApprovalResult {
768    /// identifies the tool call
769    #[prost(string, tag = "1")]
770    pub tool_call_id: ::prost::alloc::string::String,
771    #[prost(oneof = "tool_approval_result::Response", tags = "2, 3")]
772    pub response: ::core::option::Option<tool_approval_result::Response>,
773}
774/// Nested message and enum types in `ToolApprovalResult`.
775pub mod tool_approval_result {
776    #[derive(Clone, PartialEq, ::prost::Oneof)]
777    pub enum Response {
778        #[prost(message, tag = "2")]
779        Approved(super::ToolApprovedResponse),
780        #[prost(message, tag = "3")]
781        Denied(super::ToolDeniedResponse),
782    }
783}
784#[derive(Clone, PartialEq, ::prost::Message)]
785pub struct ToolApprovedResponse {
786    /// json string representation of the override argument if the user
787    /// needs it to be changed in some way
788    #[prost(string, optional, tag = "1")]
789    pub override_args: ::core::option::Option<::prost::alloc::string::String>,
790}
791#[derive(Clone, PartialEq, ::prost::Message)]
792pub struct ToolDeniedResponse {
793    #[prost(string, tag = "2")]
794    pub denial_reason: ::prost::alloc::string::String,
795}
796/// RetryRequest retries the last request (e.g., if it was interrupted/failed part-way through)
797#[derive(Clone, Copy, PartialEq, ::prost::Message)]
798pub struct RetryRequest {}
799/// UserPromptRequest contains a new user message
800#[derive(Clone, PartialEq, ::prost::Message)]
801pub struct UserPromptRequest {
802    #[prost(message, optional, tag = "1")]
803    pub message: ::core::option::Option<UserModelMessage>,
804    /// Optional: image files to provide to the agent
805    #[prost(message, repeated, tag = "2")]
806    pub images: ::prost::alloc::vec::Vec<ImagePart>,
807}
808/// ToolApprovalRequest contains tool approval results
809#[derive(Clone, PartialEq, ::prost::Message)]
810pub struct ToolApprovalRequest {
811    #[prost(message, repeated, tag = "1")]
812    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
813}
814/// StreamChatRequest is a request to stream chat messages for AI agent.
815#[derive(Clone, PartialEq, ::prost::Message)]
816pub struct StreamChatRequest {
817    /// The conversation ID
818    #[prost(string, tag = "1")]
819    pub conversation_rid: ::prost::alloc::string::String,
820    /// DEPRECATED: use request_type.user_prompt.message instead
821    #[deprecated]
822    #[prost(message, optional, tag = "2")]
823    pub message: ::core::option::Option<UserModelMessage>,
824    /// DEPRECATED: use request_type.user_prompt.images instead
825    #[deprecated]
826    #[prost(message, repeated, tag = "3")]
827    pub images: ::prost::alloc::vec::Vec<ImagePart>,
828    /// DEPRECATED: use request_type.tool_approval instead
829    #[deprecated]
830    #[prost(message, repeated, tag = "6")]
831    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
832    /// The type of request - exactly one should be set
833    #[prost(oneof = "stream_chat_request::RequestType", tags = "7, 8, 9")]
834    pub request_type: ::core::option::Option<stream_chat_request::RequestType>,
835    /// Context-specific fields based on the oneofKind.
836    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5, 10, 11")]
837    pub context: ::core::option::Option<stream_chat_request::Context>,
838}
839/// Nested message and enum types in `StreamChatRequest`.
840pub mod stream_chat_request {
841    /// The type of request - exactly one should be set
842    #[derive(Clone, PartialEq, ::prost::Oneof)]
843    pub enum RequestType {
844        #[prost(message, tag = "7")]
845        Retry(super::RetryRequest),
846        #[prost(message, tag = "8")]
847        UserPrompt(super::UserPromptRequest),
848        #[prost(message, tag = "9")]
849        ToolApproval(super::ToolApprovalRequest),
850    }
851    /// Context-specific fields based on the oneofKind.
852    #[derive(Clone, PartialEq, ::prost::Oneof)]
853    pub enum Context {
854        #[prost(message, tag = "4")]
855        Workbook(super::WorkbookContext),
856        #[prost(message, tag = "5")]
857        Global(super::GlobalContext),
858        #[prost(message, tag = "10")]
859        Checklist(super::ChecklistContext),
860        #[prost(message, tag = "11")]
861        Template(super::TemplateContext),
862    }
863}
864/// WorkbookContext contains workbook-specific context fields
865#[derive(Clone, PartialEq, ::prost::Message)]
866pub struct WorkbookContext {
867    /// RID of the workbook to use for context
868    #[prost(string, tag = "1")]
869    pub workbook_rid: ::prost::alloc::string::String,
870    /// The user's presence in the workbook
871    #[prost(message, optional, tag = "2")]
872    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
873}
874/// ChecklistContext for use when the agent is being messaged in the context of a
875/// checklist currently being edited
876#[derive(Clone, PartialEq, ::prost::Message)]
877pub struct ChecklistContext {
878    /// RID of the checklist being edited
879    #[prost(string, tag = "1")]
880    pub checklist_rid: ::prost::alloc::string::String,
881    /// Name of the branch that this edit is being done on
882    /// This is equivalent to the 'draft name' for a check being edited in the UI
883    #[prost(string, tag = "2")]
884    pub branch_name: ::prost::alloc::string::String,
885    /// A checklist opened in edit mode will always have some resource by which it
886    /// is referencing for viewing. It can either be an asset or a run
887    #[prost(oneof = "checklist_context::ReferenceRid", tags = "3, 4")]
888    pub reference_rid: ::core::option::Option<checklist_context::ReferenceRid>,
889}
890/// Nested message and enum types in `ChecklistContext`.
891pub mod checklist_context {
892    /// A checklist opened in edit mode will always have some resource by which it
893    /// is referencing for viewing. It can either be an asset or a run
894    #[derive(Clone, PartialEq, ::prost::Oneof)]
895    pub enum ReferenceRid {
896        #[prost(string, tag = "3")]
897        Asset(::prost::alloc::string::String),
898        #[prost(string, tag = "4")]
899        Run(::prost::alloc::string::String),
900    }
901}
902/// TemplateContext for use when the agent is being messaged in the context of a
903/// template currently being edited.
904/// This provides the agent with the same toolset as the workbook tools
905#[derive(Clone, PartialEq, ::prost::Message)]
906pub struct TemplateContext {
907    /// RID of template being edited
908    #[prost(string, tag = "1")]
909    pub template_rid: ::prost::alloc::string::String,
910    /// Name of the branch this edit is being done on
911    #[prost(string, tag = "2")]
912    pub branch_name: ::prost::alloc::string::String,
913    /// Defines the datascope in view in the editor
914    #[prost(oneof = "template_context::ReferenceRid", tags = "3, 4")]
915    pub reference_rid: ::core::option::Option<template_context::ReferenceRid>,
916}
917/// Nested message and enum types in `TemplateContext`.
918pub mod template_context {
919    /// Defines the datascope in view in the editor
920    #[derive(Clone, PartialEq, ::prost::Oneof)]
921    pub enum ReferenceRid {
922        #[prost(string, tag = "3")]
923        Asset(::prost::alloc::string::String),
924        #[prost(string, tag = "4")]
925        Run(::prost::alloc::string::String),
926    }
927}
928/// DefaultContext (no context)
929#[derive(Clone, Copy, PartialEq, ::prost::Message)]
930pub struct GlobalContext {}
931/// WorkbookUserPresence contains the user's presence in the workbook
932/// which is used to describe what the user is viewing at the time of the message.
933#[derive(Clone, Copy, PartialEq, ::prost::Message)]
934pub struct WorkbookUserPresence {
935    #[prost(int32, tag = "1")]
936    pub tab_index: i32,
937    #[prost(message, optional, tag = "2")]
938    pub range: ::core::option::Option<TimeRange>,
939}
940/// CreateConversation request will create a new conversation thread
941/// if old conversation id is not set, a brand new, clear chat is created
942/// If old conversation id is set without a previous message id, the full conversation thread will be copied
943/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
944/// the above case is useful for branching a conversation into a new thread
945#[derive(Clone, PartialEq, ::prost::Message)]
946pub struct CreateConversationRequest {
947    #[prost(string, tag = "1")]
948    pub title: ::prost::alloc::string::String,
949    #[prost(string, tag = "2")]
950    pub workspace_rid: ::prost::alloc::string::String,
951    #[prost(string, optional, tag = "3")]
952    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
953    #[prost(string, optional, tag = "4")]
954    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
955    #[prost(message, optional, tag = "5")]
956    pub conversation_mode: ::core::option::Option<ConversationMode>,
957}
958/// CreateConversationResponse will return the conversation id for the new conversation
959#[derive(Clone, PartialEq, ::prost::Message)]
960pub struct CreateConversationResponse {
961    #[prost(string, tag = "1")]
962    pub new_conversation_rid: ::prost::alloc::string::String,
963}
964/// Updates the fields if specified (optional means no change for that field)
965#[derive(Clone, PartialEq, ::prost::Message)]
966pub struct UpdateConversationMetadataRequest {
967    #[prost(string, optional, tag = "1")]
968    pub title: ::core::option::Option<::prost::alloc::string::String>,
969    #[prost(string, tag = "2")]
970    pub conversation_rid: ::prost::alloc::string::String,
971    #[prost(message, optional, tag = "3")]
972    pub conversation_mode: ::core::option::Option<ConversationMode>,
973}
974#[derive(Clone, Copy, PartialEq, ::prost::Message)]
975pub struct UpdateConversationMetadataResponse {}
976#[derive(Clone, PartialEq, ::prost::Message)]
977pub struct DeleteConversationRequest {
978    #[prost(string, tag = "1")]
979    pub conversation_rid: ::prost::alloc::string::String,
980}
981#[derive(Clone, Copy, PartialEq, ::prost::Message)]
982pub struct DeleteConversationResponse {}
983/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
984/// by provided rid. To start from a particular message - you can also provide a message id.
985#[derive(Clone, PartialEq, ::prost::Message)]
986pub struct GetConversationRequest {
987    #[prost(string, tag = "1")]
988    pub conversation_rid: ::prost::alloc::string::String,
989    #[prost(string, optional, tag = "2")]
990    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
991    #[prost(int32, optional, tag = "3")]
992    pub max_message_count: ::core::option::Option<i32>,
993}
994/// a CompactConversationRequest allows you to reduce the token count in your conversation by up to half
995/// will be a no op if current conversation has not yet reached at least half of the max token count
996#[derive(Clone, PartialEq, ::prost::Message)]
997pub struct CompactConversationRequest {
998    #[prost(string, tag = "1")]
999    pub conversation_rid: ::prost::alloc::string::String,
1000}
1001/// returns the new token count of the now compacted conversation
1002#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1003pub struct CompactConversationResponse {
1004    #[prost(message, optional, tag = "1")]
1005    pub context: ::core::option::Option<ContextStatus>,
1006}
1007/// Model message with id allows you to identify the message ID of a given message
1008#[derive(Clone, PartialEq, ::prost::Message)]
1009pub struct ModelMessageWithId {
1010    #[prost(string, tag = "3")]
1011    pub message_id: ::prost::alloc::string::String,
1012    /// WB agent user messages can have snapshot rids associated with them
1013    #[prost(string, optional, tag = "4")]
1014    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
1015    #[prost(message, repeated, tag = "5")]
1016    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
1017    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2, 6")]
1018    pub content: ::core::option::Option<model_message_with_id::Content>,
1019}
1020/// Nested message and enum types in `ModelMessageWithId`.
1021pub mod model_message_with_id {
1022    #[derive(Clone, PartialEq, ::prost::Oneof)]
1023    pub enum Content {
1024        #[prost(message, tag = "1")]
1025        Message(super::ModelMessage),
1026        #[prost(message, tag = "2")]
1027        ToolAction(super::ToolAction),
1028        #[prost(message, tag = "6")]
1029        ToolActionConfirmation(super::ToolActionConfirmation),
1030    }
1031}
1032#[derive(Clone, PartialEq, ::prost::Message)]
1033pub struct GetConversationResponse {
1034    #[prost(message, repeated, tag = "1")]
1035    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
1036    #[prost(message, optional, tag = "2")]
1037    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
1038}
1039#[derive(Clone, PartialEq, ::prost::Message)]
1040pub struct GetConversationMetadataRequest {
1041    #[prost(string, tag = "1")]
1042    pub conversation_rid: ::prost::alloc::string::String,
1043}
1044#[derive(Clone, PartialEq, ::prost::Message)]
1045pub struct GetConversationMetadataResponse {
1046    #[prost(message, optional, tag = "1")]
1047    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
1048}
1049#[derive(Clone, PartialEq, ::prost::Message)]
1050pub struct GetConversationMessagesRequest {
1051    #[prost(string, tag = "1")]
1052    pub conversation_rid: ::prost::alloc::string::String,
1053    #[prost(string, optional, tag = "2")]
1054    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
1055    #[prost(int32, optional, tag = "3")]
1056    pub max_message_count: ::core::option::Option<i32>,
1057}
1058#[derive(Clone, PartialEq, ::prost::Message)]
1059pub struct GetConversationMessagesResponse {
1060    #[prost(message, repeated, tag = "1")]
1061    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
1062    #[prost(string, optional, tag = "2")]
1063    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1064}
1065/// Will generate all conversation threads that this user has in this workspace
1066#[derive(Clone, PartialEq, ::prost::Message)]
1067pub struct ListConversationsRequest {
1068    #[prost(string, tag = "1")]
1069    pub workspace_rid: ::prost::alloc::string::String,
1070    #[prost(string, optional, tag = "2")]
1071    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1072    #[prost(int32, optional, tag = "3")]
1073    pub page_size: ::core::option::Option<i32>,
1074}
1075#[derive(Clone, PartialEq, ::prost::Message)]
1076pub struct ConversationMetadata {
1077    #[prost(string, tag = "1")]
1078    pub conversation_rid: ::prost::alloc::string::String,
1079    #[prost(string, tag = "2")]
1080    pub title: ::prost::alloc::string::String,
1081    #[prost(message, optional, tag = "3")]
1082    pub created_at: ::core::option::Option<
1083        super::super::super::google::protobuf::Timestamp,
1084    >,
1085    #[prost(message, optional, tag = "4")]
1086    pub last_updated_at: ::core::option::Option<
1087        super::super::super::google::protobuf::Timestamp,
1088    >,
1089    #[prost(message, optional, tag = "5")]
1090    pub mode: ::core::option::Option<ConversationMode>,
1091    #[prost(message, optional, tag = "6")]
1092    pub current_context: ::core::option::Option<ContextStatus>,
1093}
1094/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
1095/// to get a full conversation from storage. These are ordered by creation time.
1096#[derive(Clone, PartialEq, ::prost::Message)]
1097pub struct ListConversationsResponse {
1098    #[prost(message, repeated, tag = "1")]
1099    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
1100    #[prost(string, optional, tag = "2")]
1101    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1102}
1103#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1104pub struct TimeRange {
1105    #[prost(message, optional, tag = "1")]
1106    pub range_start: ::core::option::Option<Timestamp>,
1107    #[prost(message, optional, tag = "2")]
1108    pub range_end: ::core::option::Option<Timestamp>,
1109}
1110#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1111pub struct Timestamp {
1112    #[prost(int32, tag = "1")]
1113    pub seconds: i32,
1114    #[prost(int32, tag = "2")]
1115    pub nanoseconds: i32,
1116}
1117/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
1118/// Each message type has its own structure and content.
1119#[derive(Clone, PartialEq, ::prost::Message)]
1120pub struct ModelMessage {
1121    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
1122    pub kind: ::core::option::Option<model_message::Kind>,
1123}
1124/// Nested message and enum types in `ModelMessage`.
1125pub mod model_message {
1126    #[derive(Clone, PartialEq, ::prost::Oneof)]
1127    pub enum Kind {
1128        #[prost(message, tag = "1")]
1129        User(super::UserModelMessage),
1130        #[prost(message, tag = "2")]
1131        Assistant(super::AssistantModelMessage),
1132    }
1133}
1134/// A user message containing text
1135#[derive(Clone, PartialEq, ::prost::Message)]
1136pub struct UserModelMessage {
1137    #[prost(message, repeated, tag = "1")]
1138    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
1139}
1140/// An assistant message containing text
1141#[derive(Clone, PartialEq, ::prost::Message)]
1142pub struct AssistantModelMessage {
1143    #[prost(message, repeated, tag = "1")]
1144    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
1145}
1146#[derive(Clone, PartialEq, ::prost::Message)]
1147pub struct UserContentPart {
1148    #[prost(oneof = "user_content_part::Part", tags = "1")]
1149    pub part: ::core::option::Option<user_content_part::Part>,
1150}
1151/// Nested message and enum types in `UserContentPart`.
1152pub mod user_content_part {
1153    #[derive(Clone, PartialEq, ::prost::Oneof)]
1154    pub enum Part {
1155        #[prost(message, tag = "1")]
1156        Text(super::TextPart),
1157    }
1158}
1159/// Content part for assistant messages: can be text, reasoning, or mutation.
1160#[derive(Clone, PartialEq, ::prost::Message)]
1161pub struct AssistantContentPart {
1162    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
1163    pub part: ::core::option::Option<assistant_content_part::Part>,
1164}
1165/// Nested message and enum types in `AssistantContentPart`.
1166pub mod assistant_content_part {
1167    #[derive(Clone, PartialEq, ::prost::Oneof)]
1168    pub enum Part {
1169        #[prost(message, tag = "1")]
1170        Text(super::TextPart),
1171        #[prost(message, tag = "2")]
1172        Reasoning(super::ReasoningPart),
1173    }
1174}
1175/// Text part for user or assistant messages.
1176#[derive(Clone, PartialEq, ::prost::Message)]
1177pub struct TextPart {
1178    #[prost(string, tag = "1")]
1179    pub text: ::prost::alloc::string::String,
1180}
1181/// User-supplied image part.
1182#[derive(Clone, PartialEq, ::prost::Message)]
1183pub struct ImagePart {
1184    /// The base64-encoded image data
1185    #[prost(bytes = "vec", tag = "1")]
1186    pub data: ::prost::alloc::vec::Vec<u8>,
1187    /// The media type of the image (e.g. "image/png", "image/jpeg")
1188    #[prost(string, optional, tag = "2")]
1189    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
1190    /// Optional: the filename of the image
1191    #[prost(string, optional, tag = "3")]
1192    pub filename: ::core::option::Option<::prost::alloc::string::String>,
1193}
1194/// Reasoning part for assistant messages.
1195#[derive(Clone, PartialEq, ::prost::Message)]
1196pub struct ReasoningPart {
1197    #[prost(string, tag = "1")]
1198    pub reasoning: ::prost::alloc::string::String,
1199}
1200/// StreamChatResponse is a discriminated union response to a StreamChatRequest
1201#[derive(Clone, PartialEq, ::prost::Message)]
1202pub struct StreamChatResponse {
1203    #[prost(
1204        oneof = "stream_chat_response::Response",
1205        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10, 11"
1206    )]
1207    pub response: ::core::option::Option<stream_chat_response::Response>,
1208}
1209/// Nested message and enum types in `StreamChatResponse`.
1210pub mod stream_chat_response {
1211    #[derive(Clone, PartialEq, ::prost::Oneof)]
1212    pub enum Response {
1213        #[prost(message, tag = "1")]
1214        Finish(super::Finish),
1215        #[prost(message, tag = "2")]
1216        Error(super::Error),
1217        #[prost(message, tag = "3")]
1218        TextStart(super::TextStart),
1219        #[prost(message, tag = "4")]
1220        TextDelta(super::TextDelta),
1221        #[prost(message, tag = "5")]
1222        TextEnd(super::TextEnd),
1223        #[prost(message, tag = "6")]
1224        ReasoningStart(super::ReasoningStart),
1225        #[prost(message, tag = "7")]
1226        ReasoningDelta(super::ReasoningDelta),
1227        #[prost(message, tag = "8")]
1228        ReasoningEnd(super::ReasoningEnd),
1229        #[prost(message, tag = "10")]
1230        ToolAction(super::ToolAction),
1231        #[prost(message, tag = "11")]
1232        ToolActionConfirmation(super::ToolActionConfirmation),
1233    }
1234}
1235#[derive(Clone, PartialEq, ::prost::Message)]
1236pub struct ToolCallDescription {
1237    #[prost(string, tag = "1")]
1238    pub tool_call_id: ::prost::alloc::string::String,
1239    #[prost(string, tag = "2")]
1240    pub tool_name: ::prost::alloc::string::String,
1241    /// string representation of the proposed tool args for display
1242    #[prost(string, tag = "3")]
1243    pub tool_args_json_string: ::prost::alloc::string::String,
1244    /// used to conditionally render an approval button based the outcome of the tool call
1245    #[prost(enumeration = "ToolCallStatus", tag = "4")]
1246    pub status: i32,
1247}
1248/// Indicates the end of a chat session
1249#[derive(Clone, PartialEq, ::prost::Message)]
1250pub struct Finish {
1251    /// The message ids in order of all generated messages for this agent run
1252    /// These ids can be used to branch a message from that specific message
1253    #[prost(string, repeated, tag = "1")]
1254    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1255    /// In the case that this is the first agent run in a conversation thread, we also
1256    /// return the new conversation title generated
1257    #[prost(string, optional, tag = "2")]
1258    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
1259    #[prost(message, repeated, tag = "3")]
1260    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
1261    #[prost(message, optional, tag = "4")]
1262    pub updated_context: ::core::option::Option<ContextStatus>,
1263}
1264/// An error that occurred during the chat session
1265#[derive(Clone, PartialEq, ::prost::Message)]
1266pub struct Error {
1267    #[prost(string, tag = "1")]
1268    pub message: ::prost::alloc::string::String,
1269}
1270/// Indicates the start of a text message from the agent
1271#[derive(Clone, PartialEq, ::prost::Message)]
1272pub struct TextStart {
1273    /// uniquely identifies the text message (e.g. uuid) so that the client can
1274    /// merge parallel message streams (if it happens).
1275    #[prost(string, tag = "1")]
1276    pub id: ::prost::alloc::string::String,
1277}
1278/// A delta (continuation) of a text message from the agent
1279#[derive(Clone, PartialEq, ::prost::Message)]
1280pub struct TextDelta {
1281    #[prost(string, tag = "1")]
1282    pub id: ::prost::alloc::string::String,
1283    /// The next chunk of text
1284    #[prost(string, tag = "2")]
1285    pub delta: ::prost::alloc::string::String,
1286}
1287/// Indicates the end of a text message from the agent
1288#[derive(Clone, PartialEq, ::prost::Message)]
1289pub struct TextEnd {
1290    #[prost(string, tag = "1")]
1291    pub id: ::prost::alloc::string::String,
1292}
1293/// Indicates the start of a reasoning message from the agent
1294#[derive(Clone, PartialEq, ::prost::Message)]
1295pub struct ReasoningStart {
1296    #[prost(string, tag = "1")]
1297    pub id: ::prost::alloc::string::String,
1298}
1299/// A delta (continuation) of a reasoning message from the agent
1300#[derive(Clone, PartialEq, ::prost::Message)]
1301pub struct ReasoningDelta {
1302    #[prost(string, tag = "1")]
1303    pub id: ::prost::alloc::string::String,
1304    /// The next chunk of reasoning
1305    #[prost(string, tag = "2")]
1306    pub delta: ::prost::alloc::string::String,
1307}
1308/// Indicates the end of a reasoning message from the agent
1309#[derive(Clone, PartialEq, ::prost::Message)]
1310pub struct ReasoningEnd {
1311    #[prost(string, tag = "1")]
1312    pub id: ::prost::alloc::string::String,
1313}
1314/// this is a concise description of a tool call that the agent is making internally
1315/// without revealing too much detail about the tool call, it informs the user what the agent is doing
1316/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
1317/// "Search channels for My Datasource"
1318#[derive(Clone, PartialEq, ::prost::Message)]
1319pub struct ToolAction {
1320    #[prost(string, tag = "1")]
1321    pub id: ::prost::alloc::string::String,
1322    /// "Thought", "Read", "Find", "Look-up", etc.
1323    #[prost(string, tag = "2")]
1324    pub tool_action_verb: ::prost::alloc::string::String,
1325    /// "workbook", "channel", "variable", "panel", etc.
1326    #[prost(string, optional, tag = "3")]
1327    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
1328}
1329/// When we see a ToolAction in the stream, we wait for the corresponding ToolActionConfirmation
1330/// to indicate whether or not the tool call has successfully executed
1331#[derive(Clone, PartialEq, ::prost::Message)]
1332pub struct ToolActionConfirmation {
1333    #[prost(string, tag = "1")]
1334    pub id: ::prost::alloc::string::String,
1335    #[prost(oneof = "tool_action_confirmation::Outcome", tags = "2, 3")]
1336    pub outcome: ::core::option::Option<tool_action_confirmation::Outcome>,
1337}
1338/// Nested message and enum types in `ToolActionConfirmation`.
1339pub mod tool_action_confirmation {
1340    #[derive(Clone, PartialEq, ::prost::Oneof)]
1341    pub enum Outcome {
1342        #[prost(message, tag = "2")]
1343        Success(super::ToolActionSuccess),
1344        #[prost(message, tag = "3")]
1345        Failure(super::ToolActionFailure),
1346    }
1347}
1348#[derive(Clone, PartialEq, ::prost::Message)]
1349pub struct ToolActionSuccess {
1350    #[prost(string, tag = "1")]
1351    pub tool_success_message: ::prost::alloc::string::String,
1352}
1353#[derive(Clone, PartialEq, ::prost::Message)]
1354pub struct ToolActionFailure {
1355    #[prost(string, tag = "1")]
1356    pub tool_error_message: ::prost::alloc::string::String,
1357}
1358/// ContextStatus represents the current token usage of a conversation relative to the selected model's context limit.
1359/// NOTE: A curr_token_count of zero means the token count is not yet known (e.g., no prompt has been sent yet,
1360/// or the conversation was just branched). It does not necessarily mean the conversation is empty.
1361#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1362pub struct ContextStatus {
1363    #[prost(int32, tag = "1")]
1364    pub curr_token_count: i32,
1365    #[prost(int32, tag = "2")]
1366    pub model_context_limit: i32,
1367}
1368#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1369#[repr(i32)]
1370pub enum ToolCallStatus {
1371    Unspecified = 0,
1372    Approved = 1,
1373    Denied = 2,
1374    AwaitingApproval = 3,
1375}
1376impl ToolCallStatus {
1377    /// String value of the enum field names used in the ProtoBuf definition.
1378    ///
1379    /// The values are not transformed in any way and thus are considered stable
1380    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1381    pub fn as_str_name(&self) -> &'static str {
1382        match self {
1383            Self::Unspecified => "TOOL_CALL_STATUS_UNSPECIFIED",
1384            Self::Approved => "TOOL_CALL_STATUS_APPROVED",
1385            Self::Denied => "TOOL_CALL_STATUS_DENIED",
1386            Self::AwaitingApproval => "TOOL_CALL_STATUS_AWAITING_APPROVAL",
1387        }
1388    }
1389    /// Creates an enum from field names used in the ProtoBuf definition.
1390    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1391        match value {
1392            "TOOL_CALL_STATUS_UNSPECIFIED" => Some(Self::Unspecified),
1393            "TOOL_CALL_STATUS_APPROVED" => Some(Self::Approved),
1394            "TOOL_CALL_STATUS_DENIED" => Some(Self::Denied),
1395            "TOOL_CALL_STATUS_AWAITING_APPROVAL" => Some(Self::AwaitingApproval),
1396            _ => None,
1397        }
1398    }
1399}
1400/// Generated client implementations.
1401pub mod ai_agent_service_client {
1402    #![allow(
1403        unused_variables,
1404        dead_code,
1405        missing_docs,
1406        clippy::wildcard_imports,
1407        clippy::let_unit_value,
1408    )]
1409    use tonic::codegen::*;
1410    use tonic::codegen::http::Uri;
1411    /// AIAgentService provides AI-powered assistance for general operations
1412    #[derive(Debug, Clone)]
1413    pub struct AiAgentServiceClient<T> {
1414        inner: tonic::client::Grpc<T>,
1415    }
1416    impl AiAgentServiceClient<tonic::transport::Channel> {
1417        /// Attempt to create a new client by connecting to a given endpoint.
1418        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1419        where
1420            D: TryInto<tonic::transport::Endpoint>,
1421            D::Error: Into<StdError>,
1422        {
1423            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1424            Ok(Self::new(conn))
1425        }
1426    }
1427    impl<T> AiAgentServiceClient<T>
1428    where
1429        T: tonic::client::GrpcService<tonic::body::Body>,
1430        T::Error: Into<StdError>,
1431        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1432        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1433    {
1434        pub fn new(inner: T) -> Self {
1435            let inner = tonic::client::Grpc::new(inner);
1436            Self { inner }
1437        }
1438        pub fn with_origin(inner: T, origin: Uri) -> Self {
1439            let inner = tonic::client::Grpc::with_origin(inner, origin);
1440            Self { inner }
1441        }
1442        pub fn with_interceptor<F>(
1443            inner: T,
1444            interceptor: F,
1445        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1446        where
1447            F: tonic::service::Interceptor,
1448            T::ResponseBody: Default,
1449            T: tonic::codegen::Service<
1450                http::Request<tonic::body::Body>,
1451                Response = http::Response<
1452                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1453                >,
1454            >,
1455            <T as tonic::codegen::Service<
1456                http::Request<tonic::body::Body>,
1457            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1458        {
1459            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1460        }
1461        /// Compress requests with the given encoding.
1462        ///
1463        /// This requires the server to support it otherwise it might respond with an
1464        /// error.
1465        #[must_use]
1466        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1467            self.inner = self.inner.send_compressed(encoding);
1468            self
1469        }
1470        /// Enable decompressing responses.
1471        #[must_use]
1472        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1473            self.inner = self.inner.accept_compressed(encoding);
1474            self
1475        }
1476        /// Limits the maximum size of a decoded message.
1477        ///
1478        /// Default: `4MB`
1479        #[must_use]
1480        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1481            self.inner = self.inner.max_decoding_message_size(limit);
1482            self
1483        }
1484        /// Limits the maximum size of an encoded message.
1485        ///
1486        /// Default: `usize::MAX`
1487        #[must_use]
1488        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1489            self.inner = self.inner.max_encoding_message_size(limit);
1490            self
1491        }
1492        /// StreamChat handles bidirectional streaming chat for AI agent
1493        pub async fn stream_chat(
1494            &mut self,
1495            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1496        ) -> std::result::Result<
1497            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1498            tonic::Status,
1499        > {
1500            self.inner
1501                .ready()
1502                .await
1503                .map_err(|e| {
1504                    tonic::Status::unknown(
1505                        format!("Service was not ready: {}", e.into()),
1506                    )
1507                })?;
1508            let codec = tonic::codec::ProstCodec::default();
1509            let path = http::uri::PathAndQuery::from_static(
1510                "/nominal.ai.v1.AIAgentService/StreamChat",
1511            );
1512            let mut req = request.into_request();
1513            req.extensions_mut()
1514                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1515            self.inner.server_streaming(req, path, codec).await
1516        }
1517        /// GetConversation [DEPRACATED] handles getting a complete conversation list, with an optional limit on number of messages returned
1518        #[deprecated]
1519        pub async fn get_conversation(
1520            &mut self,
1521            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1522        ) -> std::result::Result<
1523            tonic::Response<super::GetConversationResponse>,
1524            tonic::Status,
1525        > {
1526            self.inner
1527                .ready()
1528                .await
1529                .map_err(|e| {
1530                    tonic::Status::unknown(
1531                        format!("Service was not ready: {}", e.into()),
1532                    )
1533                })?;
1534            let codec = tonic::codec::ProstCodec::default();
1535            let path = http::uri::PathAndQuery::from_static(
1536                "/nominal.ai.v1.AIAgentService/GetConversation",
1537            );
1538            let mut req = request.into_request();
1539            req.extensions_mut()
1540                .insert(
1541                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1542                );
1543            self.inner.unary(req, path, codec).await
1544        }
1545        /// GetConversationMetadata handles getting the conversation metadata like title, current token count etc
1546        pub async fn get_conversation_metadata(
1547            &mut self,
1548            request: impl tonic::IntoRequest<super::GetConversationMetadataRequest>,
1549        ) -> std::result::Result<
1550            tonic::Response<super::GetConversationMetadataResponse>,
1551            tonic::Status,
1552        > {
1553            self.inner
1554                .ready()
1555                .await
1556                .map_err(|e| {
1557                    tonic::Status::unknown(
1558                        format!("Service was not ready: {}", e.into()),
1559                    )
1560                })?;
1561            let codec = tonic::codec::ProstCodec::default();
1562            let path = http::uri::PathAndQuery::from_static(
1563                "/nominal.ai.v1.AIAgentService/GetConversationMetadata",
1564            );
1565            let mut req = request.into_request();
1566            req.extensions_mut()
1567                .insert(
1568                    GrpcMethod::new(
1569                        "nominal.ai.v1.AIAgentService",
1570                        "GetConversationMetadata",
1571                    ),
1572                );
1573            self.inner.unary(req, path, codec).await
1574        }
1575        /// GetConversationMessages handles retrieving the set of conversation messages. Supports pagination
1576        pub async fn get_conversation_messages(
1577            &mut self,
1578            request: impl tonic::IntoRequest<super::GetConversationMessagesRequest>,
1579        ) -> std::result::Result<
1580            tonic::Response<super::GetConversationMessagesResponse>,
1581            tonic::Status,
1582        > {
1583            self.inner
1584                .ready()
1585                .await
1586                .map_err(|e| {
1587                    tonic::Status::unknown(
1588                        format!("Service was not ready: {}", e.into()),
1589                    )
1590                })?;
1591            let codec = tonic::codec::ProstCodec::default();
1592            let path = http::uri::PathAndQuery::from_static(
1593                "/nominal.ai.v1.AIAgentService/GetConversationMessages",
1594            );
1595            let mut req = request.into_request();
1596            req.extensions_mut()
1597                .insert(
1598                    GrpcMethod::new(
1599                        "nominal.ai.v1.AIAgentService",
1600                        "GetConversationMessages",
1601                    ),
1602                );
1603            self.inner.unary(req, path, codec).await
1604        }
1605        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1606        pub async fn list_conversations(
1607            &mut self,
1608            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1609        ) -> std::result::Result<
1610            tonic::Response<super::ListConversationsResponse>,
1611            tonic::Status,
1612        > {
1613            self.inner
1614                .ready()
1615                .await
1616                .map_err(|e| {
1617                    tonic::Status::unknown(
1618                        format!("Service was not ready: {}", e.into()),
1619                    )
1620                })?;
1621            let codec = tonic::codec::ProstCodec::default();
1622            let path = http::uri::PathAndQuery::from_static(
1623                "/nominal.ai.v1.AIAgentService/ListConversations",
1624            );
1625            let mut req = request.into_request();
1626            req.extensions_mut()
1627                .insert(
1628                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1629                );
1630            self.inner.unary(req, path, codec).await
1631        }
1632        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1633        pub async fn create_conversation(
1634            &mut self,
1635            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1636        ) -> std::result::Result<
1637            tonic::Response<super::CreateConversationResponse>,
1638            tonic::Status,
1639        > {
1640            self.inner
1641                .ready()
1642                .await
1643                .map_err(|e| {
1644                    tonic::Status::unknown(
1645                        format!("Service was not ready: {}", e.into()),
1646                    )
1647                })?;
1648            let codec = tonic::codec::ProstCodec::default();
1649            let path = http::uri::PathAndQuery::from_static(
1650                "/nominal.ai.v1.AIAgentService/CreateConversation",
1651            );
1652            let mut req = request.into_request();
1653            req.extensions_mut()
1654                .insert(
1655                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1656                );
1657            self.inner.unary(req, path, codec).await
1658        }
1659        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1660        pub async fn update_conversation_metadata(
1661            &mut self,
1662            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1663        ) -> std::result::Result<
1664            tonic::Response<super::UpdateConversationMetadataResponse>,
1665            tonic::Status,
1666        > {
1667            self.inner
1668                .ready()
1669                .await
1670                .map_err(|e| {
1671                    tonic::Status::unknown(
1672                        format!("Service was not ready: {}", e.into()),
1673                    )
1674                })?;
1675            let codec = tonic::codec::ProstCodec::default();
1676            let path = http::uri::PathAndQuery::from_static(
1677                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1678            );
1679            let mut req = request.into_request();
1680            req.extensions_mut()
1681                .insert(
1682                    GrpcMethod::new(
1683                        "nominal.ai.v1.AIAgentService",
1684                        "UpdateConversationMetadata",
1685                    ),
1686                );
1687            self.inner.unary(req, path, codec).await
1688        }
1689        /// DeleteConversation handles deleting a specific conversation by conversation rid
1690        pub async fn delete_conversation(
1691            &mut self,
1692            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1693        ) -> std::result::Result<
1694            tonic::Response<super::DeleteConversationResponse>,
1695            tonic::Status,
1696        > {
1697            self.inner
1698                .ready()
1699                .await
1700                .map_err(|e| {
1701                    tonic::Status::unknown(
1702                        format!("Service was not ready: {}", e.into()),
1703                    )
1704                })?;
1705            let codec = tonic::codec::ProstCodec::default();
1706            let path = http::uri::PathAndQuery::from_static(
1707                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1708            );
1709            let mut req = request.into_request();
1710            req.extensions_mut()
1711                .insert(
1712                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1713                );
1714            self.inner.unary(req, path, codec).await
1715        }
1716        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1717        pub async fn get_snapshot_rid_by_user_message_id(
1718            &mut self,
1719            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1720        ) -> std::result::Result<
1721            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1722            tonic::Status,
1723        > {
1724            self.inner
1725                .ready()
1726                .await
1727                .map_err(|e| {
1728                    tonic::Status::unknown(
1729                        format!("Service was not ready: {}", e.into()),
1730                    )
1731                })?;
1732            let codec = tonic::codec::ProstCodec::default();
1733            let path = http::uri::PathAndQuery::from_static(
1734                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1735            );
1736            let mut req = request.into_request();
1737            req.extensions_mut()
1738                .insert(
1739                    GrpcMethod::new(
1740                        "nominal.ai.v1.AIAgentService",
1741                        "GetSnapshotRidByUserMessageId",
1742                    ),
1743                );
1744            self.inner.unary(req, path, codec).await
1745        }
1746        /// CompactConversation handles compacting the conversation context into approximately half its original size
1747        pub async fn compact_conversation(
1748            &mut self,
1749            request: impl tonic::IntoRequest<super::CompactConversationRequest>,
1750        ) -> std::result::Result<
1751            tonic::Response<super::CompactConversationResponse>,
1752            tonic::Status,
1753        > {
1754            self.inner
1755                .ready()
1756                .await
1757                .map_err(|e| {
1758                    tonic::Status::unknown(
1759                        format!("Service was not ready: {}", e.into()),
1760                    )
1761                })?;
1762            let codec = tonic::codec::ProstCodec::default();
1763            let path = http::uri::PathAndQuery::from_static(
1764                "/nominal.ai.v1.AIAgentService/CompactConversation",
1765            );
1766            let mut req = request.into_request();
1767            req.extensions_mut()
1768                .insert(
1769                    GrpcMethod::new(
1770                        "nominal.ai.v1.AIAgentService",
1771                        "CompactConversation",
1772                    ),
1773                );
1774            self.inner.unary(req, path, codec).await
1775        }
1776    }
1777}