Skip to main content

nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, Copy, PartialEq, ::prost::Message)]
3pub struct GetProviderStatusRequest {}
4#[derive(Clone, Copy, PartialEq, ::prost::Message)]
5pub struct GetProviderStatusResponse {
6    /// Timestamp when the last status was determined
7    #[prost(message, optional, tag = "1")]
8    pub timestamp: ::core::option::Option<
9        super::super::super::google::protobuf::Timestamp,
10    >,
11    /// Status of the most recent health check probe
12    #[prost(message, optional, tag = "2")]
13    pub last_status: ::core::option::Option<ProviderStatus>,
14    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
15    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
16    #[deprecated]
17    #[prost(message, optional, tag = "3")]
18    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
19    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
20    #[prost(message, optional, tag = "4")]
21    pub aggregated_status: ::core::option::Option<ProviderStatus>,
22}
23#[derive(Clone, Copy, PartialEq, ::prost::Message)]
24pub struct ProviderStatus {
25    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
26    pub status: ::core::option::Option<provider_status::Status>,
27}
28/// Nested message and enum types in `ProviderStatus`.
29pub mod provider_status {
30    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
31    pub enum Status {
32        #[prost(message, tag = "1")]
33        Healthy(super::Healthy),
34        #[prost(message, tag = "2")]
35        Degraded(super::Degraded),
36    }
37}
38#[derive(Clone, Copy, PartialEq, ::prost::Message)]
39pub struct Healthy {}
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct Degraded {
42    #[prost(enumeration = "DegradationReason", tag = "1")]
43    pub reason: i32,
44}
45#[derive(Clone, Copy, PartialEq, ::prost::Message)]
46pub struct ProviderMetrics {
47    #[prost(int32, tag = "1")]
48    pub time_to_first_token_ms: i32,
49    #[prost(int32, tag = "2")]
50    pub total_time_ms: i32,
51}
52#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
53#[repr(i32)]
54pub enum DegradationReason {
55    Unspecified = 0,
56    HighLatency = 1,
57    Failures = 2,
58    HighLatencyAndFailures = 3,
59}
60impl DegradationReason {
61    /// String value of the enum field names used in the ProtoBuf definition.
62    ///
63    /// The values are not transformed in any way and thus are considered stable
64    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
65    pub fn as_str_name(&self) -> &'static str {
66        match self {
67            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
68            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
69            Self::Failures => "DEGRADATION_REASON_FAILURES",
70            Self::HighLatencyAndFailures => {
71                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
72            }
73        }
74    }
75    /// Creates an enum from field names used in the ProtoBuf definition.
76    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
77        match value {
78            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
79            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
80            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
81            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
82                Some(Self::HighLatencyAndFailures)
83            }
84            _ => None,
85        }
86    }
87}
88/// Generated client implementations.
89pub mod model_provider_health_service_client {
90    #![allow(
91        unused_variables,
92        dead_code,
93        missing_docs,
94        clippy::wildcard_imports,
95        clippy::let_unit_value,
96    )]
97    use tonic::codegen::*;
98    use tonic::codegen::http::Uri;
99    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
100    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
101    /// independent of the complexity of user prompts.
102    #[derive(Debug, Clone)]
103    pub struct ModelProviderHealthServiceClient<T> {
104        inner: tonic::client::Grpc<T>,
105    }
106    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
107        /// Attempt to create a new client by connecting to a given endpoint.
108        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
109        where
110            D: TryInto<tonic::transport::Endpoint>,
111            D::Error: Into<StdError>,
112        {
113            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
114            Ok(Self::new(conn))
115        }
116    }
117    impl<T> ModelProviderHealthServiceClient<T>
118    where
119        T: tonic::client::GrpcService<tonic::body::Body>,
120        T::Error: Into<StdError>,
121        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
122        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
123    {
124        pub fn new(inner: T) -> Self {
125            let inner = tonic::client::Grpc::new(inner);
126            Self { inner }
127        }
128        pub fn with_origin(inner: T, origin: Uri) -> Self {
129            let inner = tonic::client::Grpc::with_origin(inner, origin);
130            Self { inner }
131        }
132        pub fn with_interceptor<F>(
133            inner: T,
134            interceptor: F,
135        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
136        where
137            F: tonic::service::Interceptor,
138            T::ResponseBody: Default,
139            T: tonic::codegen::Service<
140                http::Request<tonic::body::Body>,
141                Response = http::Response<
142                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
143                >,
144            >,
145            <T as tonic::codegen::Service<
146                http::Request<tonic::body::Body>,
147            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
148        {
149            ModelProviderHealthServiceClient::new(
150                InterceptedService::new(inner, interceptor),
151            )
152        }
153        /// Compress requests with the given encoding.
154        ///
155        /// This requires the server to support it otherwise it might respond with an
156        /// error.
157        #[must_use]
158        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
159            self.inner = self.inner.send_compressed(encoding);
160            self
161        }
162        /// Enable decompressing responses.
163        #[must_use]
164        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
165            self.inner = self.inner.accept_compressed(encoding);
166            self
167        }
168        /// Limits the maximum size of a decoded message.
169        ///
170        /// Default: `4MB`
171        #[must_use]
172        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
173            self.inner = self.inner.max_decoding_message_size(limit);
174            self
175        }
176        /// Limits the maximum size of an encoded message.
177        ///
178        /// Default: `usize::MAX`
179        #[must_use]
180        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
181            self.inner = self.inner.max_encoding_message_size(limit);
182            self
183        }
184        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
185        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
186        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
187        pub async fn get_provider_status(
188            &mut self,
189            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
190        ) -> std::result::Result<
191            tonic::Response<super::GetProviderStatusResponse>,
192            tonic::Status,
193        > {
194            self.inner
195                .ready()
196                .await
197                .map_err(|e| {
198                    tonic::Status::unknown(
199                        format!("Service was not ready: {}", e.into()),
200                    )
201                })?;
202            let codec = tonic::codec::ProstCodec::default();
203            let path = http::uri::PathAndQuery::from_static(
204                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
205            );
206            let mut req = request.into_request();
207            req.extensions_mut()
208                .insert(
209                    GrpcMethod::new(
210                        "nominal.ai.v1.ModelProviderHealthService",
211                        "GetProviderStatus",
212                    ),
213                );
214            self.inner.unary(req, path, codec).await
215        }
216    }
217}
218/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
219/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
220#[derive(Clone, PartialEq, ::prost::Message)]
221pub struct CreateOrUpdateKnowledgeBaseRequest {
222    #[prost(string, tag = "1")]
223    pub attachment_rid: ::prost::alloc::string::String,
224    /// summary of the knowledge base, will be used by the LLM to decide when to use it
225    #[prost(string, tag = "2")]
226    pub summary_description: ::prost::alloc::string::String,
227    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
228    pub r#type: ::core::option::Option<i32>,
229}
230/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
231#[derive(Clone, PartialEq, ::prost::Message)]
232pub struct CreateOrUpdateKnowledgeBaseResponse {
233    #[prost(string, tag = "1")]
234    pub knowledge_base_rid: ::prost::alloc::string::String,
235}
236/// KnowledgeBase represents a knowledge base entry
237#[derive(Clone, PartialEq, ::prost::Message)]
238pub struct KnowledgeBase {
239    #[prost(string, tag = "1")]
240    pub knowledge_base_rid: ::prost::alloc::string::String,
241    #[prost(string, tag = "2")]
242    pub attachment_rid: ::prost::alloc::string::String,
243    #[prost(string, tag = "3")]
244    pub workspace_rid: ::prost::alloc::string::String,
245    #[prost(string, tag = "4")]
246    pub summary_description: ::prost::alloc::string::String,
247    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
248    pub r#type: i32,
249    #[prost(int32, tag = "6")]
250    pub version: i32,
251}
252#[derive(Clone, PartialEq, ::prost::Message)]
253pub struct ListRequest {
254    #[prost(string, tag = "1")]
255    pub workspace_rid: ::prost::alloc::string::String,
256}
257#[derive(Clone, PartialEq, ::prost::Message)]
258pub struct ListResponse {
259    #[prost(message, repeated, tag = "1")]
260    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
261}
262#[derive(Clone, PartialEq, ::prost::Message)]
263pub struct DeleteRequest {
264    #[prost(string, tag = "1")]
265    pub knowledge_base_rid: ::prost::alloc::string::String,
266}
267#[derive(Clone, Copy, PartialEq, ::prost::Message)]
268pub struct DeleteResponse {
269    #[prost(bool, tag = "1")]
270    pub success: bool,
271}
272#[derive(Clone, PartialEq, ::prost::Message)]
273pub struct GetBatchRequest {
274    #[prost(string, repeated, tag = "1")]
275    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
276}
277#[derive(Clone, PartialEq, ::prost::Message)]
278pub struct GetBatchResponse {
279    #[prost(message, repeated, tag = "1")]
280    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
281}
282/// generate summary description is intentionally going to return the generated description to the frontend
283/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
284#[derive(Clone, PartialEq, ::prost::Message)]
285pub struct GenerateSummaryDescriptionRequest {
286    #[prost(string, tag = "1")]
287    pub attachment_rid: ::prost::alloc::string::String,
288}
289#[derive(Clone, PartialEq, ::prost::Message)]
290pub struct GenerateSummaryDescriptionResponse {
291    #[prost(string, tag = "1")]
292    pub summary_description: ::prost::alloc::string::String,
293}
294/// KnowledgeBaseType defines the types of knowledge base
295#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
296#[repr(i32)]
297pub enum KnowledgeBaseType {
298    /// defaults to PROMPT
299    Unspecified = 0,
300    /// knowledge base gets added directly to prompt (needs to be small enough!)
301    Prompt = 1,
302    /// knowledge base gets used via vector search on embeddings
303    Embedding = 2,
304}
305impl KnowledgeBaseType {
306    /// String value of the enum field names used in the ProtoBuf definition.
307    ///
308    /// The values are not transformed in any way and thus are considered stable
309    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
310    pub fn as_str_name(&self) -> &'static str {
311        match self {
312            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
313            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
314            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
315        }
316    }
317    /// Creates an enum from field names used in the ProtoBuf definition.
318    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
319        match value {
320            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
321            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
322            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
323            _ => None,
324        }
325    }
326}
327/// Generated client implementations.
328pub mod knowledge_base_service_client {
329    #![allow(
330        unused_variables,
331        dead_code,
332        missing_docs,
333        clippy::wildcard_imports,
334        clippy::let_unit_value,
335    )]
336    use tonic::codegen::*;
337    use tonic::codegen::http::Uri;
338    /// KnowledgeBaseService provides AI-powered knowledge base management
339    #[derive(Debug, Clone)]
340    pub struct KnowledgeBaseServiceClient<T> {
341        inner: tonic::client::Grpc<T>,
342    }
343    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
344        /// Attempt to create a new client by connecting to a given endpoint.
345        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
346        where
347            D: TryInto<tonic::transport::Endpoint>,
348            D::Error: Into<StdError>,
349        {
350            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
351            Ok(Self::new(conn))
352        }
353    }
354    impl<T> KnowledgeBaseServiceClient<T>
355    where
356        T: tonic::client::GrpcService<tonic::body::Body>,
357        T::Error: Into<StdError>,
358        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
359        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
360    {
361        pub fn new(inner: T) -> Self {
362            let inner = tonic::client::Grpc::new(inner);
363            Self { inner }
364        }
365        pub fn with_origin(inner: T, origin: Uri) -> Self {
366            let inner = tonic::client::Grpc::with_origin(inner, origin);
367            Self { inner }
368        }
369        pub fn with_interceptor<F>(
370            inner: T,
371            interceptor: F,
372        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
373        where
374            F: tonic::service::Interceptor,
375            T::ResponseBody: Default,
376            T: tonic::codegen::Service<
377                http::Request<tonic::body::Body>,
378                Response = http::Response<
379                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
380                >,
381            >,
382            <T as tonic::codegen::Service<
383                http::Request<tonic::body::Body>,
384            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
385        {
386            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
387        }
388        /// Compress requests with the given encoding.
389        ///
390        /// This requires the server to support it otherwise it might respond with an
391        /// error.
392        #[must_use]
393        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
394            self.inner = self.inner.send_compressed(encoding);
395            self
396        }
397        /// Enable decompressing responses.
398        #[must_use]
399        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
400            self.inner = self.inner.accept_compressed(encoding);
401            self
402        }
403        /// Limits the maximum size of a decoded message.
404        ///
405        /// Default: `4MB`
406        #[must_use]
407        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
408            self.inner = self.inner.max_decoding_message_size(limit);
409            self
410        }
411        /// Limits the maximum size of an encoded message.
412        ///
413        /// Default: `usize::MAX`
414        #[must_use]
415        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
416            self.inner = self.inner.max_encoding_message_size(limit);
417            self
418        }
419        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
420        pub async fn create_or_update_knowledge_base(
421            &mut self,
422            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
423        ) -> std::result::Result<
424            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
425            tonic::Status,
426        > {
427            self.inner
428                .ready()
429                .await
430                .map_err(|e| {
431                    tonic::Status::unknown(
432                        format!("Service was not ready: {}", e.into()),
433                    )
434                })?;
435            let codec = tonic::codec::ProstCodec::default();
436            let path = http::uri::PathAndQuery::from_static(
437                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
438            );
439            let mut req = request.into_request();
440            req.extensions_mut()
441                .insert(
442                    GrpcMethod::new(
443                        "nominal.ai.v1.KnowledgeBaseService",
444                        "CreateOrUpdateKnowledgeBase",
445                    ),
446                );
447            self.inner.unary(req, path, codec).await
448        }
449        /// List returns all knowledge bases in the specified workspace
450        pub async fn list(
451            &mut self,
452            request: impl tonic::IntoRequest<super::ListRequest>,
453        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
454            self.inner
455                .ready()
456                .await
457                .map_err(|e| {
458                    tonic::Status::unknown(
459                        format!("Service was not ready: {}", e.into()),
460                    )
461                })?;
462            let codec = tonic::codec::ProstCodec::default();
463            let path = http::uri::PathAndQuery::from_static(
464                "/nominal.ai.v1.KnowledgeBaseService/List",
465            );
466            let mut req = request.into_request();
467            req.extensions_mut()
468                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
469            self.inner.unary(req, path, codec).await
470        }
471        /// Delete removes a knowledge base by its RID
472        pub async fn delete(
473            &mut self,
474            request: impl tonic::IntoRequest<super::DeleteRequest>,
475        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
476            self.inner
477                .ready()
478                .await
479                .map_err(|e| {
480                    tonic::Status::unknown(
481                        format!("Service was not ready: {}", e.into()),
482                    )
483                })?;
484            let codec = tonic::codec::ProstCodec::default();
485            let path = http::uri::PathAndQuery::from_static(
486                "/nominal.ai.v1.KnowledgeBaseService/Delete",
487            );
488            let mut req = request.into_request();
489            req.extensions_mut()
490                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
491            self.inner.unary(req, path, codec).await
492        }
493        /// GetBatch retrieves multiple knowledge bases by their RIDs
494        pub async fn get_batch(
495            &mut self,
496            request: impl tonic::IntoRequest<super::GetBatchRequest>,
497        ) -> std::result::Result<
498            tonic::Response<super::GetBatchResponse>,
499            tonic::Status,
500        > {
501            self.inner
502                .ready()
503                .await
504                .map_err(|e| {
505                    tonic::Status::unknown(
506                        format!("Service was not ready: {}", e.into()),
507                    )
508                })?;
509            let codec = tonic::codec::ProstCodec::default();
510            let path = http::uri::PathAndQuery::from_static(
511                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
512            );
513            let mut req = request.into_request();
514            req.extensions_mut()
515                .insert(
516                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
517                );
518            self.inner.unary(req, path, codec).await
519        }
520        /// GenerateSummaryDescription generates a summary description for an attachment rid
521        pub async fn generate_summary_description(
522            &mut self,
523            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
524        ) -> std::result::Result<
525            tonic::Response<super::GenerateSummaryDescriptionResponse>,
526            tonic::Status,
527        > {
528            self.inner
529                .ready()
530                .await
531                .map_err(|e| {
532                    tonic::Status::unknown(
533                        format!("Service was not ready: {}", e.into()),
534                    )
535                })?;
536            let codec = tonic::codec::ProstCodec::default();
537            let path = http::uri::PathAndQuery::from_static(
538                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
539            );
540            let mut req = request.into_request();
541            req.extensions_mut()
542                .insert(
543                    GrpcMethod::new(
544                        "nominal.ai.v1.KnowledgeBaseService",
545                        "GenerateSummaryDescription",
546                    ),
547                );
548            self.inner.unary(req, path, codec).await
549        }
550    }
551}
552#[derive(Clone, PartialEq, ::prost::Message)]
553pub struct ClassifyErrorRequest {
554    /// The error message or description to classify
555    #[prost(string, tag = "1")]
556    pub error_message: ::prost::alloc::string::String,
557}
558#[derive(Clone, PartialEq, ::prost::Message)]
559pub struct ClassifyErrorResponse {
560    /// The classification result
561    #[prost(enumeration = "ErrorClassification", tag = "1")]
562    pub classification: i32,
563    /// Explanation for why this classification was chosen
564    #[prost(string, tag = "2")]
565    pub reason: ::prost::alloc::string::String,
566}
567#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
568#[repr(i32)]
569pub enum ErrorClassification {
570    Unspecified = 0,
571    Client = 1,
572    Server = 2,
573}
574impl ErrorClassification {
575    /// String value of the enum field names used in the ProtoBuf definition.
576    ///
577    /// The values are not transformed in any way and thus are considered stable
578    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
579    pub fn as_str_name(&self) -> &'static str {
580        match self {
581            Self::Unspecified => "ERROR_CLASSIFICATION_UNSPECIFIED",
582            Self::Client => "ERROR_CLASSIFICATION_CLIENT",
583            Self::Server => "ERROR_CLASSIFICATION_SERVER",
584        }
585    }
586    /// Creates an enum from field names used in the ProtoBuf definition.
587    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
588        match value {
589            "ERROR_CLASSIFICATION_UNSPECIFIED" => Some(Self::Unspecified),
590            "ERROR_CLASSIFICATION_CLIENT" => Some(Self::Client),
591            "ERROR_CLASSIFICATION_SERVER" => Some(Self::Server),
592            _ => None,
593        }
594    }
595}
596/// Generated client implementations.
597pub mod data_ingestion_error_classifier_service_client {
598    #![allow(
599        unused_variables,
600        dead_code,
601        missing_docs,
602        clippy::wildcard_imports,
603        clippy::let_unit_value,
604    )]
605    use tonic::codegen::*;
606    use tonic::codegen::http::Uri;
607    /// DataIngestionErrorClassifierService classifies data ingestion errors as CLIENT or SERVER issues.
608    /// CLIENT errors are customer data issues (empty files, schema conflicts, timestamp problems, etc.)
609    /// SERVER errors are infrastructure issues (internal errors, timeouts, capacity limits, etc.)
610    #[derive(Debug, Clone)]
611    pub struct DataIngestionErrorClassifierServiceClient<T> {
612        inner: tonic::client::Grpc<T>,
613    }
614    impl DataIngestionErrorClassifierServiceClient<tonic::transport::Channel> {
615        /// Attempt to create a new client by connecting to a given endpoint.
616        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
617        where
618            D: TryInto<tonic::transport::Endpoint>,
619            D::Error: Into<StdError>,
620        {
621            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
622            Ok(Self::new(conn))
623        }
624    }
625    impl<T> DataIngestionErrorClassifierServiceClient<T>
626    where
627        T: tonic::client::GrpcService<tonic::body::Body>,
628        T::Error: Into<StdError>,
629        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
630        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
631    {
632        pub fn new(inner: T) -> Self {
633            let inner = tonic::client::Grpc::new(inner);
634            Self { inner }
635        }
636        pub fn with_origin(inner: T, origin: Uri) -> Self {
637            let inner = tonic::client::Grpc::with_origin(inner, origin);
638            Self { inner }
639        }
640        pub fn with_interceptor<F>(
641            inner: T,
642            interceptor: F,
643        ) -> DataIngestionErrorClassifierServiceClient<InterceptedService<T, F>>
644        where
645            F: tonic::service::Interceptor,
646            T::ResponseBody: Default,
647            T: tonic::codegen::Service<
648                http::Request<tonic::body::Body>,
649                Response = http::Response<
650                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
651                >,
652            >,
653            <T as tonic::codegen::Service<
654                http::Request<tonic::body::Body>,
655            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
656        {
657            DataIngestionErrorClassifierServiceClient::new(
658                InterceptedService::new(inner, interceptor),
659            )
660        }
661        /// Compress requests with the given encoding.
662        ///
663        /// This requires the server to support it otherwise it might respond with an
664        /// error.
665        #[must_use]
666        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
667            self.inner = self.inner.send_compressed(encoding);
668            self
669        }
670        /// Enable decompressing responses.
671        #[must_use]
672        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
673            self.inner = self.inner.accept_compressed(encoding);
674            self
675        }
676        /// Limits the maximum size of a decoded message.
677        ///
678        /// Default: `4MB`
679        #[must_use]
680        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
681            self.inner = self.inner.max_decoding_message_size(limit);
682            self
683        }
684        /// Limits the maximum size of an encoded message.
685        ///
686        /// Default: `usize::MAX`
687        #[must_use]
688        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
689            self.inner = self.inner.max_encoding_message_size(limit);
690            self
691        }
692        /// ClassifyError analyzes an error message and classifies it as CLIENT or SERVER.
693        pub async fn classify_error(
694            &mut self,
695            request: impl tonic::IntoRequest<super::ClassifyErrorRequest>,
696        ) -> std::result::Result<
697            tonic::Response<super::ClassifyErrorResponse>,
698            tonic::Status,
699        > {
700            self.inner
701                .ready()
702                .await
703                .map_err(|e| {
704                    tonic::Status::unknown(
705                        format!("Service was not ready: {}", e.into()),
706                    )
707                })?;
708            let codec = tonic::codec::ProstCodec::default();
709            let path = http::uri::PathAndQuery::from_static(
710                "/nominal.ai.v1.DataIngestionErrorClassifierService/ClassifyError",
711            );
712            let mut req = request.into_request();
713            req.extensions_mut()
714                .insert(
715                    GrpcMethod::new(
716                        "nominal.ai.v1.DataIngestionErrorClassifierService",
717                        "ClassifyError",
718                    ),
719                );
720            self.inner.unary(req, path, codec).await
721        }
722    }
723}
724#[derive(Clone, PartialEq, ::prost::Message)]
725pub struct GetSnapshotRidByUserMessageIdRequest {
726    #[prost(string, tag = "1")]
727    pub conversation_rid: ::prost::alloc::string::String,
728    #[prost(string, tag = "2")]
729    pub message_id: ::prost::alloc::string::String,
730}
731/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
732/// This occurs in the instance where a message was sent in a non-workbook context
733#[derive(Clone, PartialEq, ::prost::Message)]
734pub struct GetSnapshotRidByUserMessageIdResponse {
735    #[prost(string, optional, tag = "1")]
736    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
737}
738/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
739#[derive(Clone, Copy, PartialEq, ::prost::Message)]
740pub struct ReadOnlyMode {}
741/// EditMode configures edit mode where all tools are available
742#[derive(Clone, Copy, PartialEq, ::prost::Message)]
743pub struct EditMode {
744    /// when set to true, we auto accept edits for any tools typically requiring approval
745    #[prost(bool, optional, tag = "1")]
746    pub auto_accept: ::core::option::Option<bool>,
747}
748/// ConversationMode specifies the mode of the conversation
749#[derive(Clone, Copy, PartialEq, ::prost::Message)]
750pub struct ConversationMode {
751    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
752    pub mode: ::core::option::Option<conversation_mode::Mode>,
753}
754/// Nested message and enum types in `ConversationMode`.
755pub mod conversation_mode {
756    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
757    pub enum Mode {
758        #[prost(message, tag = "1")]
759        ReadOnly(super::ReadOnlyMode),
760        #[prost(message, tag = "2")]
761        Edit(super::EditMode),
762    }
763}
764/// When the agent makes a request to use a tool, the user responds
765/// with one of these for every request - mapping a tool id to its approval/denial
766#[derive(Clone, PartialEq, ::prost::Message)]
767pub struct ToolApprovalResult {
768    /// identifies the tool call
769    #[prost(string, tag = "1")]
770    pub tool_call_id: ::prost::alloc::string::String,
771    #[prost(oneof = "tool_approval_result::Response", tags = "2, 3")]
772    pub response: ::core::option::Option<tool_approval_result::Response>,
773}
774/// Nested message and enum types in `ToolApprovalResult`.
775pub mod tool_approval_result {
776    #[derive(Clone, PartialEq, ::prost::Oneof)]
777    pub enum Response {
778        #[prost(message, tag = "2")]
779        Approved(super::ToolApprovedResponse),
780        #[prost(message, tag = "3")]
781        Denied(super::ToolDeniedResponse),
782    }
783}
784#[derive(Clone, PartialEq, ::prost::Message)]
785pub struct ToolApprovedResponse {
786    /// json string representation of the override argument if the user
787    /// needs it to be changed in some way
788    #[prost(string, optional, tag = "1")]
789    pub override_args: ::core::option::Option<::prost::alloc::string::String>,
790}
791#[derive(Clone, PartialEq, ::prost::Message)]
792pub struct ToolDeniedResponse {
793    #[prost(string, tag = "2")]
794    pub denial_reason: ::prost::alloc::string::String,
795}
796/// RetryRequest retries the last request (e.g., if it was interrupted/failed part-way through)
797#[derive(Clone, Copy, PartialEq, ::prost::Message)]
798pub struct RetryRequest {}
799/// UserPromptRequest contains a new user message
800#[derive(Clone, PartialEq, ::prost::Message)]
801pub struct UserPromptRequest {
802    #[prost(message, optional, tag = "1")]
803    pub message: ::core::option::Option<UserModelMessage>,
804    /// Optional: image files to provide to the agent
805    #[prost(message, repeated, tag = "2")]
806    pub images: ::prost::alloc::vec::Vec<ImagePart>,
807}
808/// ToolApprovalRequest contains tool approval results
809#[derive(Clone, PartialEq, ::prost::Message)]
810pub struct ToolApprovalRequest {
811    #[prost(message, repeated, tag = "1")]
812    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
813}
814/// StreamChatRequest is a request to stream chat messages for AI agent.
815#[derive(Clone, PartialEq, ::prost::Message)]
816pub struct StreamChatRequest {
817    /// The conversation ID
818    #[prost(string, tag = "1")]
819    pub conversation_rid: ::prost::alloc::string::String,
820    /// DEPRECATED: use request_type.user_prompt.message instead
821    #[deprecated]
822    #[prost(message, optional, tag = "2")]
823    pub message: ::core::option::Option<UserModelMessage>,
824    /// DEPRECATED: use request_type.user_prompt.images instead
825    #[deprecated]
826    #[prost(message, repeated, tag = "3")]
827    pub images: ::prost::alloc::vec::Vec<ImagePart>,
828    /// DEPRECATED: use request_type.tool_approval instead
829    #[deprecated]
830    #[prost(message, repeated, tag = "6")]
831    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
832    /// The type of request - exactly one should be set
833    #[prost(oneof = "stream_chat_request::RequestType", tags = "7, 8, 9")]
834    pub request_type: ::core::option::Option<stream_chat_request::RequestType>,
835    /// Context-specific fields based on the oneofKind.
836    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5, 10, 11")]
837    pub context: ::core::option::Option<stream_chat_request::Context>,
838}
839/// Nested message and enum types in `StreamChatRequest`.
840pub mod stream_chat_request {
841    /// The type of request - exactly one should be set
842    #[derive(Clone, PartialEq, ::prost::Oneof)]
843    pub enum RequestType {
844        #[prost(message, tag = "7")]
845        Retry(super::RetryRequest),
846        #[prost(message, tag = "8")]
847        UserPrompt(super::UserPromptRequest),
848        #[prost(message, tag = "9")]
849        ToolApproval(super::ToolApprovalRequest),
850    }
851    /// Context-specific fields based on the oneofKind.
852    #[derive(Clone, PartialEq, ::prost::Oneof)]
853    pub enum Context {
854        #[prost(message, tag = "4")]
855        Workbook(super::WorkbookContext),
856        #[prost(message, tag = "5")]
857        Global(super::GlobalContext),
858        #[prost(message, tag = "10")]
859        Checklist(super::ChecklistContext),
860        #[prost(message, tag = "11")]
861        Template(super::TemplateContext),
862    }
863}
864/// WorkbookContext contains workbook-specific context fields
865#[derive(Clone, PartialEq, ::prost::Message)]
866pub struct WorkbookContext {
867    /// RID of the workbook to use for context
868    #[prost(string, tag = "1")]
869    pub workbook_rid: ::prost::alloc::string::String,
870    /// The user's presence in the workbook
871    #[prost(message, optional, tag = "2")]
872    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
873}
874/// ChecklistContext for use when the agent is being messaged in the context of a
875/// checklist currently being edited
876#[derive(Clone, PartialEq, ::prost::Message)]
877pub struct ChecklistContext {
878    /// RID of the checklist being edited
879    #[prost(string, tag = "1")]
880    pub checklist_rid: ::prost::alloc::string::String,
881    /// Name of the branch that this edit is being done on
882    /// This is equivalent to the 'draft name' for a check being edited in the UI
883    #[prost(string, tag = "2")]
884    pub branch_name: ::prost::alloc::string::String,
885    /// A checklist opened in edit mode will always have some resource by which it
886    /// is referencing for viewing. It can either be an asset or a run
887    #[prost(oneof = "checklist_context::ReferenceRid", tags = "3, 4")]
888    pub reference_rid: ::core::option::Option<checklist_context::ReferenceRid>,
889}
890/// Nested message and enum types in `ChecklistContext`.
891pub mod checklist_context {
892    /// A checklist opened in edit mode will always have some resource by which it
893    /// is referencing for viewing. It can either be an asset or a run
894    #[derive(Clone, PartialEq, ::prost::Oneof)]
895    pub enum ReferenceRid {
896        #[prost(string, tag = "3")]
897        Asset(::prost::alloc::string::String),
898        #[prost(string, tag = "4")]
899        Run(::prost::alloc::string::String),
900    }
901}
902/// TemplateContext for use when the agent is being messaged in the context of a
903/// template currently being edited.
904/// This provides the agent with the same toolset as the workbook tools
905#[derive(Clone, PartialEq, ::prost::Message)]
906pub struct TemplateContext {
907    /// RID of template being edited
908    #[prost(string, tag = "1")]
909    pub template_rid: ::prost::alloc::string::String,
910    /// Name of the branch this edit is being done on
911    #[prost(string, tag = "2")]
912    pub branch_name: ::prost::alloc::string::String,
913    /// Defines the datascope in view in the editor
914    #[prost(oneof = "template_context::ReferenceRid", tags = "3, 4")]
915    pub reference_rid: ::core::option::Option<template_context::ReferenceRid>,
916}
917/// Nested message and enum types in `TemplateContext`.
918pub mod template_context {
919    /// Defines the datascope in view in the editor
920    #[derive(Clone, PartialEq, ::prost::Oneof)]
921    pub enum ReferenceRid {
922        #[prost(string, tag = "3")]
923        Asset(::prost::alloc::string::String),
924        #[prost(string, tag = "4")]
925        Run(::prost::alloc::string::String),
926    }
927}
928/// DefaultContext (no context)
929#[derive(Clone, Copy, PartialEq, ::prost::Message)]
930pub struct GlobalContext {}
931/// WorkbookUserPresence contains the user's presence in the workbook
932/// which is used to describe what the user is viewing at the time of the message.
933#[derive(Clone, Copy, PartialEq, ::prost::Message)]
934pub struct WorkbookUserPresence {
935    #[prost(int32, tag = "1")]
936    pub tab_index: i32,
937    #[prost(message, optional, tag = "2")]
938    pub range: ::core::option::Option<TimeRange>,
939}
940/// CreateConversation request will create a new conversation thread
941/// if old conversation id is not set, a brand new, clear chat is created
942/// If old conversation id is set without a previous message id, the full conversation thread will be copied
943/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
944/// the above case is useful for branching a conversation into a new thread
945#[derive(Clone, PartialEq, ::prost::Message)]
946pub struct CreateConversationRequest {
947    #[prost(string, tag = "1")]
948    pub title: ::prost::alloc::string::String,
949    #[prost(string, tag = "2")]
950    pub workspace_rid: ::prost::alloc::string::String,
951    #[prost(string, optional, tag = "3")]
952    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
953    #[prost(string, optional, tag = "4")]
954    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
955    #[prost(message, optional, tag = "5")]
956    pub conversation_mode: ::core::option::Option<ConversationMode>,
957}
958/// CreateConversationResponse will return the conversation id for the new conversation
959#[derive(Clone, PartialEq, ::prost::Message)]
960pub struct CreateConversationResponse {
961    #[prost(string, tag = "1")]
962    pub new_conversation_rid: ::prost::alloc::string::String,
963}
964/// Updates the fields if specified (optional means no change for that field)
965#[derive(Clone, PartialEq, ::prost::Message)]
966pub struct UpdateConversationMetadataRequest {
967    #[prost(string, optional, tag = "1")]
968    pub title: ::core::option::Option<::prost::alloc::string::String>,
969    #[prost(string, tag = "2")]
970    pub conversation_rid: ::prost::alloc::string::String,
971    #[prost(message, optional, tag = "3")]
972    pub conversation_mode: ::core::option::Option<ConversationMode>,
973}
974#[derive(Clone, Copy, PartialEq, ::prost::Message)]
975pub struct UpdateConversationMetadataResponse {}
976#[derive(Clone, PartialEq, ::prost::Message)]
977pub struct DeleteConversationRequest {
978    #[prost(string, tag = "1")]
979    pub conversation_rid: ::prost::alloc::string::String,
980}
981#[derive(Clone, Copy, PartialEq, ::prost::Message)]
982pub struct DeleteConversationResponse {}
983/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
984/// by provided rid. To start from a particular message - you can also provide a message id.
985#[derive(Clone, PartialEq, ::prost::Message)]
986pub struct GetConversationRequest {
987    #[prost(string, tag = "1")]
988    pub conversation_rid: ::prost::alloc::string::String,
989    #[prost(string, optional, tag = "2")]
990    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
991    #[prost(int32, optional, tag = "3")]
992    pub max_message_count: ::core::option::Option<i32>,
993}
994/// a CompactConversationRequest allows you to reduce the token count in your conversation by up to half
995/// will be a no op if current conversation has not yet reached at least half of the max token count
996#[derive(Clone, PartialEq, ::prost::Message)]
997pub struct CompactConversationRequest {
998    #[prost(string, tag = "1")]
999    pub conversation_rid: ::prost::alloc::string::String,
1000}
1001/// returns the new token count of the now compacted conversation
1002#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1003pub struct CompactConversationResponse {
1004    #[prost(message, optional, tag = "1")]
1005    pub context: ::core::option::Option<ContextStatus>,
1006}
1007/// Model message with id allows you to identify the message ID of a given message
1008#[derive(Clone, PartialEq, ::prost::Message)]
1009pub struct ModelMessageWithId {
1010    #[prost(string, tag = "3")]
1011    pub message_id: ::prost::alloc::string::String,
1012    /// WB agent user messages can have snapshot rids associated with them
1013    #[prost(string, optional, tag = "4")]
1014    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
1015    #[prost(message, repeated, tag = "5")]
1016    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
1017    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2, 6")]
1018    pub content: ::core::option::Option<model_message_with_id::Content>,
1019}
1020/// Nested message and enum types in `ModelMessageWithId`.
1021pub mod model_message_with_id {
1022    #[derive(Clone, PartialEq, ::prost::Oneof)]
1023    pub enum Content {
1024        #[prost(message, tag = "1")]
1025        Message(super::ModelMessage),
1026        #[prost(message, tag = "2")]
1027        ToolAction(super::ToolAction),
1028        #[prost(message, tag = "6")]
1029        ToolActionConfirmation(super::ToolActionConfirmation),
1030    }
1031}
1032#[derive(Clone, PartialEq, ::prost::Message)]
1033pub struct GetConversationResponse {
1034    #[prost(message, repeated, tag = "1")]
1035    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
1036    #[prost(message, optional, tag = "2")]
1037    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
1038}
1039#[derive(Clone, PartialEq, ::prost::Message)]
1040pub struct GetConversationMetadataRequest {
1041    #[prost(string, tag = "1")]
1042    pub conversation_rid: ::prost::alloc::string::String,
1043}
1044#[derive(Clone, PartialEq, ::prost::Message)]
1045pub struct GetConversationMetadataResponse {
1046    #[prost(message, optional, tag = "1")]
1047    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
1048}
1049#[derive(Clone, PartialEq, ::prost::Message)]
1050pub struct GetConversationMessagesRequest {
1051    #[prost(string, tag = "1")]
1052    pub conversation_rid: ::prost::alloc::string::String,
1053    #[prost(string, optional, tag = "2")]
1054    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
1055    #[prost(int32, optional, tag = "3")]
1056    pub max_message_count: ::core::option::Option<i32>,
1057}
1058#[derive(Clone, PartialEq, ::prost::Message)]
1059pub struct GetConversationMessagesResponse {
1060    #[prost(message, repeated, tag = "1")]
1061    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
1062    #[prost(string, optional, tag = "2")]
1063    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1064}
1065/// Will generate all conversation threads that this user has in this workspace
1066#[derive(Clone, PartialEq, ::prost::Message)]
1067pub struct ListConversationsRequest {
1068    #[prost(string, tag = "1")]
1069    pub workspace_rid: ::prost::alloc::string::String,
1070    #[prost(string, optional, tag = "2")]
1071    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1072    #[prost(int32, optional, tag = "3")]
1073    pub page_size: ::core::option::Option<i32>,
1074}
1075#[derive(Clone, PartialEq, ::prost::Message)]
1076pub struct ConversationMetadata {
1077    #[prost(string, tag = "1")]
1078    pub conversation_rid: ::prost::alloc::string::String,
1079    #[prost(string, tag = "2")]
1080    pub title: ::prost::alloc::string::String,
1081    #[prost(message, optional, tag = "3")]
1082    pub created_at: ::core::option::Option<
1083        super::super::super::google::protobuf::Timestamp,
1084    >,
1085    #[prost(message, optional, tag = "4")]
1086    pub last_updated_at: ::core::option::Option<
1087        super::super::super::google::protobuf::Timestamp,
1088    >,
1089    #[prost(message, optional, tag = "5")]
1090    pub mode: ::core::option::Option<ConversationMode>,
1091    #[prost(message, optional, tag = "6")]
1092    pub current_context: ::core::option::Option<ContextStatus>,
1093}
1094/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
1095/// to get a full conversation from storage. These are ordered by creation time.
1096#[derive(Clone, PartialEq, ::prost::Message)]
1097pub struct ListConversationsResponse {
1098    #[prost(message, repeated, tag = "1")]
1099    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
1100    #[prost(string, optional, tag = "2")]
1101    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1102}
1103#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1104pub struct TimeRange {
1105    #[prost(message, optional, tag = "1")]
1106    pub range_start: ::core::option::Option<Timestamp>,
1107    #[prost(message, optional, tag = "2")]
1108    pub range_end: ::core::option::Option<Timestamp>,
1109}
1110#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1111pub struct Timestamp {
1112    #[prost(int32, tag = "1")]
1113    pub seconds: i32,
1114    #[prost(int32, tag = "2")]
1115    pub nanoseconds: i32,
1116}
1117/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
1118/// Each message type has its own structure and content.
1119#[derive(Clone, PartialEq, ::prost::Message)]
1120pub struct ModelMessage {
1121    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
1122    pub kind: ::core::option::Option<model_message::Kind>,
1123}
1124/// Nested message and enum types in `ModelMessage`.
1125pub mod model_message {
1126    #[derive(Clone, PartialEq, ::prost::Oneof)]
1127    pub enum Kind {
1128        #[prost(message, tag = "1")]
1129        User(super::UserModelMessage),
1130        #[prost(message, tag = "2")]
1131        Assistant(super::AssistantModelMessage),
1132    }
1133}
1134/// A user message containing text
1135#[derive(Clone, PartialEq, ::prost::Message)]
1136pub struct UserModelMessage {
1137    #[prost(message, repeated, tag = "1")]
1138    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
1139}
1140/// An assistant message containing text
1141#[derive(Clone, PartialEq, ::prost::Message)]
1142pub struct AssistantModelMessage {
1143    #[prost(message, repeated, tag = "1")]
1144    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
1145}
1146#[derive(Clone, PartialEq, ::prost::Message)]
1147pub struct UserContentPart {
1148    #[prost(oneof = "user_content_part::Part", tags = "1")]
1149    pub part: ::core::option::Option<user_content_part::Part>,
1150}
1151/// Nested message and enum types in `UserContentPart`.
1152pub mod user_content_part {
1153    #[derive(Clone, PartialEq, ::prost::Oneof)]
1154    pub enum Part {
1155        #[prost(message, tag = "1")]
1156        Text(super::TextPart),
1157    }
1158}
1159/// Content part for assistant messages: can be text, reasoning, or mutation.
1160#[derive(Clone, PartialEq, ::prost::Message)]
1161pub struct AssistantContentPart {
1162    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
1163    pub part: ::core::option::Option<assistant_content_part::Part>,
1164}
1165/// Nested message and enum types in `AssistantContentPart`.
1166pub mod assistant_content_part {
1167    #[derive(Clone, PartialEq, ::prost::Oneof)]
1168    pub enum Part {
1169        #[prost(message, tag = "1")]
1170        Text(super::TextPart),
1171        #[prost(message, tag = "2")]
1172        Reasoning(super::ReasoningPart),
1173    }
1174}
1175/// Text part for user or assistant messages.
1176#[derive(Clone, PartialEq, ::prost::Message)]
1177pub struct TextPart {
1178    #[prost(string, tag = "1")]
1179    pub text: ::prost::alloc::string::String,
1180}
1181/// User-supplied image part.
1182#[derive(Clone, PartialEq, ::prost::Message)]
1183pub struct ImagePart {
1184    /// The base64-encoded image data
1185    #[prost(bytes = "vec", tag = "1")]
1186    pub data: ::prost::alloc::vec::Vec<u8>,
1187    /// The media type of the image (e.g. "image/png", "image/jpeg")
1188    #[prost(string, optional, tag = "2")]
1189    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
1190    /// Optional: the filename of the image
1191    #[prost(string, optional, tag = "3")]
1192    pub filename: ::core::option::Option<::prost::alloc::string::String>,
1193}
1194/// Reasoning part for assistant messages.
1195#[derive(Clone, PartialEq, ::prost::Message)]
1196pub struct ReasoningPart {
1197    #[prost(string, tag = "1")]
1198    pub reasoning: ::prost::alloc::string::String,
1199}
1200/// StreamChatResponse is a discriminated union response to a StreamChatRequest
1201#[derive(Clone, PartialEq, ::prost::Message)]
1202pub struct StreamChatResponse {
1203    #[prost(
1204        oneof = "stream_chat_response::Response",
1205        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10, 11"
1206    )]
1207    pub response: ::core::option::Option<stream_chat_response::Response>,
1208}
1209/// Nested message and enum types in `StreamChatResponse`.
1210pub mod stream_chat_response {
1211    #[derive(Clone, PartialEq, ::prost::Oneof)]
1212    pub enum Response {
1213        #[prost(message, tag = "1")]
1214        Finish(super::Finish),
1215        #[prost(message, tag = "2")]
1216        Error(super::Error),
1217        #[prost(message, tag = "3")]
1218        TextStart(super::TextStart),
1219        #[prost(message, tag = "4")]
1220        TextDelta(super::TextDelta),
1221        #[prost(message, tag = "5")]
1222        TextEnd(super::TextEnd),
1223        #[prost(message, tag = "6")]
1224        ReasoningStart(super::ReasoningStart),
1225        #[prost(message, tag = "7")]
1226        ReasoningDelta(super::ReasoningDelta),
1227        #[prost(message, tag = "8")]
1228        ReasoningEnd(super::ReasoningEnd),
1229        #[prost(message, tag = "10")]
1230        ToolAction(super::ToolAction),
1231        #[prost(message, tag = "11")]
1232        ToolActionConfirmation(super::ToolActionConfirmation),
1233    }
1234}
1235#[derive(Clone, PartialEq, ::prost::Message)]
1236pub struct ToolCallDescription {
1237    #[prost(string, tag = "1")]
1238    pub tool_call_id: ::prost::alloc::string::String,
1239    #[prost(string, tag = "2")]
1240    pub tool_name: ::prost::alloc::string::String,
1241    /// string representation of the proposed tool args for display
1242    #[prost(string, tag = "3")]
1243    pub tool_args_json_string: ::prost::alloc::string::String,
1244    /// used to conditionally render an approval button based the outcome of the tool call
1245    #[prost(enumeration = "ToolCallStatus", tag = "4")]
1246    pub status: i32,
1247}
1248/// Indicates the end of a chat session
1249#[derive(Clone, PartialEq, ::prost::Message)]
1250pub struct Finish {
1251    /// The message ids in order of all generated messages for this agent run
1252    /// These ids can be used to branch a message from that specific message
1253    #[prost(string, repeated, tag = "1")]
1254    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1255    /// In the case that this is the first agent run in a conversation thread, we also
1256    /// return the new conversation title generated
1257    #[prost(string, optional, tag = "2")]
1258    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
1259    #[prost(message, repeated, tag = "3")]
1260    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
1261    #[prost(message, optional, tag = "4")]
1262    pub updated_context: ::core::option::Option<ContextStatus>,
1263}
1264/// An error that occurred during the chat session
1265#[derive(Clone, PartialEq, ::prost::Message)]
1266pub struct Error {
1267    #[prost(string, tag = "1")]
1268    pub message: ::prost::alloc::string::String,
1269}
1270/// Indicates the start of a text message from the agent
1271#[derive(Clone, PartialEq, ::prost::Message)]
1272pub struct TextStart {
1273    /// uniquely identifies the text message (e.g. uuid) so that the client can
1274    /// merge parallel message streams (if it happens).
1275    #[prost(string, tag = "1")]
1276    pub id: ::prost::alloc::string::String,
1277}
1278/// A delta (continuation) of a text message from the agent
1279#[derive(Clone, PartialEq, ::prost::Message)]
1280pub struct TextDelta {
1281    #[prost(string, tag = "1")]
1282    pub id: ::prost::alloc::string::String,
1283    /// The next chunk of text
1284    #[prost(string, tag = "2")]
1285    pub delta: ::prost::alloc::string::String,
1286}
1287/// Indicates the end of a text message from the agent
1288#[derive(Clone, PartialEq, ::prost::Message)]
1289pub struct TextEnd {
1290    #[prost(string, tag = "1")]
1291    pub id: ::prost::alloc::string::String,
1292}
1293/// Indicates the start of a reasoning message from the agent
1294#[derive(Clone, PartialEq, ::prost::Message)]
1295pub struct ReasoningStart {
1296    #[prost(string, tag = "1")]
1297    pub id: ::prost::alloc::string::String,
1298}
1299/// A delta (continuation) of a reasoning message from the agent
1300#[derive(Clone, PartialEq, ::prost::Message)]
1301pub struct ReasoningDelta {
1302    #[prost(string, tag = "1")]
1303    pub id: ::prost::alloc::string::String,
1304    /// The next chunk of reasoning
1305    #[prost(string, tag = "2")]
1306    pub delta: ::prost::alloc::string::String,
1307}
1308/// Indicates the end of a reasoning message from the agent
1309#[derive(Clone, PartialEq, ::prost::Message)]
1310pub struct ReasoningEnd {
1311    #[prost(string, tag = "1")]
1312    pub id: ::prost::alloc::string::String,
1313}
1314/// this is a concise description of a tool call that the agent is making internally
1315/// without revealing too much detail about the tool call, it informs the user what the agent is doing
1316/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
1317/// "Search channels for My Datasource"
1318#[derive(Clone, PartialEq, ::prost::Message)]
1319pub struct ToolAction {
1320    #[prost(string, tag = "1")]
1321    pub id: ::prost::alloc::string::String,
1322    /// "Thought", "Read", "Find", "Look-up", etc.
1323    #[prost(string, tag = "2")]
1324    pub tool_action_verb: ::prost::alloc::string::String,
1325    /// "workbook", "channel", "variable", "panel", etc.
1326    #[prost(string, optional, tag = "3")]
1327    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
1328}
1329/// When we see a ToolAction in the stream, we wait for the corresponding ToolActionConfirmation
1330/// to indicate whether or not the tool call has successfully executed
1331#[derive(Clone, PartialEq, ::prost::Message)]
1332pub struct ToolActionConfirmation {
1333    #[prost(string, tag = "1")]
1334    pub id: ::prost::alloc::string::String,
1335    #[prost(oneof = "tool_action_confirmation::Outcome", tags = "2, 3")]
1336    pub outcome: ::core::option::Option<tool_action_confirmation::Outcome>,
1337}
1338/// Nested message and enum types in `ToolActionConfirmation`.
1339pub mod tool_action_confirmation {
1340    #[derive(Clone, PartialEq, ::prost::Oneof)]
1341    pub enum Outcome {
1342        #[prost(message, tag = "2")]
1343        Success(super::ToolActionSuccess),
1344        #[prost(message, tag = "3")]
1345        Failure(super::ToolActionFailure),
1346    }
1347}
1348#[derive(Clone, PartialEq, ::prost::Message)]
1349pub struct ToolActionSuccess {
1350    #[prost(string, tag = "1")]
1351    pub tool_success_message: ::prost::alloc::string::String,
1352}
1353#[derive(Clone, PartialEq, ::prost::Message)]
1354pub struct ToolActionFailure {
1355    #[prost(string, tag = "1")]
1356    pub tool_error_message: ::prost::alloc::string::String,
1357}
1358/// ContextStatus represents the current token usage of a conversation relative to the selected model's context limit.
1359/// NOTE: A curr_token_count of zero means the token count is not yet known (e.g., no prompt has been sent yet,
1360/// or the conversation was just branched). It does not necessarily mean the conversation is empty.
1361#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1362pub struct ContextStatus {
1363    #[prost(int32, tag = "1")]
1364    pub curr_token_count: i32,
1365    #[prost(int32, tag = "2")]
1366    pub model_context_limit: i32,
1367}
1368#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1369#[repr(i32)]
1370pub enum ToolCallStatus {
1371    Unspecified = 0,
1372    Approved = 1,
1373    Denied = 2,
1374    AwaitingApproval = 3,
1375}
1376impl ToolCallStatus {
1377    /// String value of the enum field names used in the ProtoBuf definition.
1378    ///
1379    /// The values are not transformed in any way and thus are considered stable
1380    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1381    pub fn as_str_name(&self) -> &'static str {
1382        match self {
1383            Self::Unspecified => "TOOL_CALL_STATUS_UNSPECIFIED",
1384            Self::Approved => "TOOL_CALL_STATUS_APPROVED",
1385            Self::Denied => "TOOL_CALL_STATUS_DENIED",
1386            Self::AwaitingApproval => "TOOL_CALL_STATUS_AWAITING_APPROVAL",
1387        }
1388    }
1389    /// Creates an enum from field names used in the ProtoBuf definition.
1390    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1391        match value {
1392            "TOOL_CALL_STATUS_UNSPECIFIED" => Some(Self::Unspecified),
1393            "TOOL_CALL_STATUS_APPROVED" => Some(Self::Approved),
1394            "TOOL_CALL_STATUS_DENIED" => Some(Self::Denied),
1395            "TOOL_CALL_STATUS_AWAITING_APPROVAL" => Some(Self::AwaitingApproval),
1396            _ => None,
1397        }
1398    }
1399}
1400/// Generated client implementations.
1401pub mod ai_agent_service_client {
1402    #![allow(
1403        unused_variables,
1404        dead_code,
1405        missing_docs,
1406        clippy::wildcard_imports,
1407        clippy::let_unit_value,
1408    )]
1409    use tonic::codegen::*;
1410    use tonic::codegen::http::Uri;
1411    /// AIAgentService provides AI-powered assistance for general operations
1412    #[derive(Debug, Clone)]
1413    pub struct AiAgentServiceClient<T> {
1414        inner: tonic::client::Grpc<T>,
1415    }
1416    impl AiAgentServiceClient<tonic::transport::Channel> {
1417        /// Attempt to create a new client by connecting to a given endpoint.
1418        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1419        where
1420            D: TryInto<tonic::transport::Endpoint>,
1421            D::Error: Into<StdError>,
1422        {
1423            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1424            Ok(Self::new(conn))
1425        }
1426    }
1427    impl<T> AiAgentServiceClient<T>
1428    where
1429        T: tonic::client::GrpcService<tonic::body::Body>,
1430        T::Error: Into<StdError>,
1431        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1432        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1433    {
1434        pub fn new(inner: T) -> Self {
1435            let inner = tonic::client::Grpc::new(inner);
1436            Self { inner }
1437        }
1438        pub fn with_origin(inner: T, origin: Uri) -> Self {
1439            let inner = tonic::client::Grpc::with_origin(inner, origin);
1440            Self { inner }
1441        }
1442        pub fn with_interceptor<F>(
1443            inner: T,
1444            interceptor: F,
1445        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1446        where
1447            F: tonic::service::Interceptor,
1448            T::ResponseBody: Default,
1449            T: tonic::codegen::Service<
1450                http::Request<tonic::body::Body>,
1451                Response = http::Response<
1452                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1453                >,
1454            >,
1455            <T as tonic::codegen::Service<
1456                http::Request<tonic::body::Body>,
1457            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1458        {
1459            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1460        }
1461        /// Compress requests with the given encoding.
1462        ///
1463        /// This requires the server to support it otherwise it might respond with an
1464        /// error.
1465        #[must_use]
1466        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1467            self.inner = self.inner.send_compressed(encoding);
1468            self
1469        }
1470        /// Enable decompressing responses.
1471        #[must_use]
1472        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1473            self.inner = self.inner.accept_compressed(encoding);
1474            self
1475        }
1476        /// Limits the maximum size of a decoded message.
1477        ///
1478        /// Default: `4MB`
1479        #[must_use]
1480        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1481            self.inner = self.inner.max_decoding_message_size(limit);
1482            self
1483        }
1484        /// Limits the maximum size of an encoded message.
1485        ///
1486        /// Default: `usize::MAX`
1487        #[must_use]
1488        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1489            self.inner = self.inner.max_encoding_message_size(limit);
1490            self
1491        }
1492        /// StreamChat handles bidirectional streaming chat for AI agent
1493        pub async fn stream_chat(
1494            &mut self,
1495            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1496        ) -> std::result::Result<
1497            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1498            tonic::Status,
1499        > {
1500            self.inner
1501                .ready()
1502                .await
1503                .map_err(|e| {
1504                    tonic::Status::unknown(
1505                        format!("Service was not ready: {}", e.into()),
1506                    )
1507                })?;
1508            let codec = tonic::codec::ProstCodec::default();
1509            let path = http::uri::PathAndQuery::from_static(
1510                "/nominal.ai.v1.AIAgentService/StreamChat",
1511            );
1512            let mut req = request.into_request();
1513            req.extensions_mut()
1514                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1515            self.inner.server_streaming(req, path, codec).await
1516        }
1517        /// GetConversation [DEPRACATED] handles getting a complete conversation list, with an optional limit on number of messages returned
1518        #[deprecated]
1519        pub async fn get_conversation(
1520            &mut self,
1521            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1522        ) -> std::result::Result<
1523            tonic::Response<super::GetConversationResponse>,
1524            tonic::Status,
1525        > {
1526            self.inner
1527                .ready()
1528                .await
1529                .map_err(|e| {
1530                    tonic::Status::unknown(
1531                        format!("Service was not ready: {}", e.into()),
1532                    )
1533                })?;
1534            let codec = tonic::codec::ProstCodec::default();
1535            let path = http::uri::PathAndQuery::from_static(
1536                "/nominal.ai.v1.AIAgentService/GetConversation",
1537            );
1538            let mut req = request.into_request();
1539            req.extensions_mut()
1540                .insert(
1541                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1542                );
1543            self.inner.unary(req, path, codec).await
1544        }
1545        /// GetConversationMetadata handles getting the conversation metadata like title, current token count etc
1546        pub async fn get_conversation_metadata(
1547            &mut self,
1548            request: impl tonic::IntoRequest<super::GetConversationMetadataRequest>,
1549        ) -> std::result::Result<
1550            tonic::Response<super::GetConversationMetadataResponse>,
1551            tonic::Status,
1552        > {
1553            self.inner
1554                .ready()
1555                .await
1556                .map_err(|e| {
1557                    tonic::Status::unknown(
1558                        format!("Service was not ready: {}", e.into()),
1559                    )
1560                })?;
1561            let codec = tonic::codec::ProstCodec::default();
1562            let path = http::uri::PathAndQuery::from_static(
1563                "/nominal.ai.v1.AIAgentService/GetConversationMetadata",
1564            );
1565            let mut req = request.into_request();
1566            req.extensions_mut()
1567                .insert(
1568                    GrpcMethod::new(
1569                        "nominal.ai.v1.AIAgentService",
1570                        "GetConversationMetadata",
1571                    ),
1572                );
1573            self.inner.unary(req, path, codec).await
1574        }
1575        /// GetConversationMessages handles retrieving the set of conversation messages. Supports pagination
1576        pub async fn get_conversation_messages(
1577            &mut self,
1578            request: impl tonic::IntoRequest<super::GetConversationMessagesRequest>,
1579        ) -> std::result::Result<
1580            tonic::Response<super::GetConversationMessagesResponse>,
1581            tonic::Status,
1582        > {
1583            self.inner
1584                .ready()
1585                .await
1586                .map_err(|e| {
1587                    tonic::Status::unknown(
1588                        format!("Service was not ready: {}", e.into()),
1589                    )
1590                })?;
1591            let codec = tonic::codec::ProstCodec::default();
1592            let path = http::uri::PathAndQuery::from_static(
1593                "/nominal.ai.v1.AIAgentService/GetConversationMessages",
1594            );
1595            let mut req = request.into_request();
1596            req.extensions_mut()
1597                .insert(
1598                    GrpcMethod::new(
1599                        "nominal.ai.v1.AIAgentService",
1600                        "GetConversationMessages",
1601                    ),
1602                );
1603            self.inner.unary(req, path, codec).await
1604        }
1605        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1606        pub async fn list_conversations(
1607            &mut self,
1608            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1609        ) -> std::result::Result<
1610            tonic::Response<super::ListConversationsResponse>,
1611            tonic::Status,
1612        > {
1613            self.inner
1614                .ready()
1615                .await
1616                .map_err(|e| {
1617                    tonic::Status::unknown(
1618                        format!("Service was not ready: {}", e.into()),
1619                    )
1620                })?;
1621            let codec = tonic::codec::ProstCodec::default();
1622            let path = http::uri::PathAndQuery::from_static(
1623                "/nominal.ai.v1.AIAgentService/ListConversations",
1624            );
1625            let mut req = request.into_request();
1626            req.extensions_mut()
1627                .insert(
1628                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1629                );
1630            self.inner.unary(req, path, codec).await
1631        }
1632        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1633        pub async fn create_conversation(
1634            &mut self,
1635            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1636        ) -> std::result::Result<
1637            tonic::Response<super::CreateConversationResponse>,
1638            tonic::Status,
1639        > {
1640            self.inner
1641                .ready()
1642                .await
1643                .map_err(|e| {
1644                    tonic::Status::unknown(
1645                        format!("Service was not ready: {}", e.into()),
1646                    )
1647                })?;
1648            let codec = tonic::codec::ProstCodec::default();
1649            let path = http::uri::PathAndQuery::from_static(
1650                "/nominal.ai.v1.AIAgentService/CreateConversation",
1651            );
1652            let mut req = request.into_request();
1653            req.extensions_mut()
1654                .insert(
1655                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1656                );
1657            self.inner.unary(req, path, codec).await
1658        }
1659        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1660        pub async fn update_conversation_metadata(
1661            &mut self,
1662            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1663        ) -> std::result::Result<
1664            tonic::Response<super::UpdateConversationMetadataResponse>,
1665            tonic::Status,
1666        > {
1667            self.inner
1668                .ready()
1669                .await
1670                .map_err(|e| {
1671                    tonic::Status::unknown(
1672                        format!("Service was not ready: {}", e.into()),
1673                    )
1674                })?;
1675            let codec = tonic::codec::ProstCodec::default();
1676            let path = http::uri::PathAndQuery::from_static(
1677                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1678            );
1679            let mut req = request.into_request();
1680            req.extensions_mut()
1681                .insert(
1682                    GrpcMethod::new(
1683                        "nominal.ai.v1.AIAgentService",
1684                        "UpdateConversationMetadata",
1685                    ),
1686                );
1687            self.inner.unary(req, path, codec).await
1688        }
1689        /// DeleteConversation handles deleting a specific conversation by conversation rid
1690        pub async fn delete_conversation(
1691            &mut self,
1692            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1693        ) -> std::result::Result<
1694            tonic::Response<super::DeleteConversationResponse>,
1695            tonic::Status,
1696        > {
1697            self.inner
1698                .ready()
1699                .await
1700                .map_err(|e| {
1701                    tonic::Status::unknown(
1702                        format!("Service was not ready: {}", e.into()),
1703                    )
1704                })?;
1705            let codec = tonic::codec::ProstCodec::default();
1706            let path = http::uri::PathAndQuery::from_static(
1707                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1708            );
1709            let mut req = request.into_request();
1710            req.extensions_mut()
1711                .insert(
1712                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1713                );
1714            self.inner.unary(req, path, codec).await
1715        }
1716        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1717        pub async fn get_snapshot_rid_by_user_message_id(
1718            &mut self,
1719            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1720        ) -> std::result::Result<
1721            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1722            tonic::Status,
1723        > {
1724            self.inner
1725                .ready()
1726                .await
1727                .map_err(|e| {
1728                    tonic::Status::unknown(
1729                        format!("Service was not ready: {}", e.into()),
1730                    )
1731                })?;
1732            let codec = tonic::codec::ProstCodec::default();
1733            let path = http::uri::PathAndQuery::from_static(
1734                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1735            );
1736            let mut req = request.into_request();
1737            req.extensions_mut()
1738                .insert(
1739                    GrpcMethod::new(
1740                        "nominal.ai.v1.AIAgentService",
1741                        "GetSnapshotRidByUserMessageId",
1742                    ),
1743                );
1744            self.inner.unary(req, path, codec).await
1745        }
1746        /// CompactConversation handles compacting the conversation context into approximately half its original size
1747        pub async fn compact_conversation(
1748            &mut self,
1749            request: impl tonic::IntoRequest<super::CompactConversationRequest>,
1750        ) -> std::result::Result<
1751            tonic::Response<super::CompactConversationResponse>,
1752            tonic::Status,
1753        > {
1754            self.inner
1755                .ready()
1756                .await
1757                .map_err(|e| {
1758                    tonic::Status::unknown(
1759                        format!("Service was not ready: {}", e.into()),
1760                    )
1761                })?;
1762            let codec = tonic::codec::ProstCodec::default();
1763            let path = http::uri::PathAndQuery::from_static(
1764                "/nominal.ai.v1.AIAgentService/CompactConversation",
1765            );
1766            let mut req = request.into_request();
1767            req.extensions_mut()
1768                .insert(
1769                    GrpcMethod::new(
1770                        "nominal.ai.v1.AIAgentService",
1771                        "CompactConversation",
1772                    ),
1773                );
1774            self.inner.unary(req, path, codec).await
1775        }
1776    }
1777}