nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, Copy, PartialEq, ::prost::Message)]
3pub struct GetProviderStatusRequest {}
4#[derive(Clone, Copy, PartialEq, ::prost::Message)]
5pub struct GetProviderStatusResponse {
6    /// Timestamp when the last status was determined
7    #[prost(message, optional, tag = "1")]
8    pub timestamp: ::core::option::Option<
9        super::super::super::google::protobuf::Timestamp,
10    >,
11    /// Status of the most recent health check probe
12    #[prost(message, optional, tag = "2")]
13    pub last_status: ::core::option::Option<ProviderStatus>,
14    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
15    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
16    #[deprecated]
17    #[prost(message, optional, tag = "3")]
18    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
19    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
20    #[prost(message, optional, tag = "4")]
21    pub aggregated_status: ::core::option::Option<ProviderStatus>,
22}
23#[derive(Clone, Copy, PartialEq, ::prost::Message)]
24pub struct ProviderStatus {
25    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
26    pub status: ::core::option::Option<provider_status::Status>,
27}
28/// Nested message and enum types in `ProviderStatus`.
29pub mod provider_status {
30    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
31    pub enum Status {
32        #[prost(message, tag = "1")]
33        Healthy(super::Healthy),
34        #[prost(message, tag = "2")]
35        Degraded(super::Degraded),
36    }
37}
38#[derive(Clone, Copy, PartialEq, ::prost::Message)]
39pub struct Healthy {}
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct Degraded {
42    #[prost(enumeration = "DegradationReason", tag = "1")]
43    pub reason: i32,
44}
45#[derive(Clone, Copy, PartialEq, ::prost::Message)]
46pub struct ProviderMetrics {
47    #[prost(int32, tag = "1")]
48    pub time_to_first_token_ms: i32,
49    #[prost(int32, tag = "2")]
50    pub total_time_ms: i32,
51}
52#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
53#[repr(i32)]
54pub enum DegradationReason {
55    Unspecified = 0,
56    HighLatency = 1,
57    Failures = 2,
58    HighLatencyAndFailures = 3,
59}
60impl DegradationReason {
61    /// String value of the enum field names used in the ProtoBuf definition.
62    ///
63    /// The values are not transformed in any way and thus are considered stable
64    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
65    pub fn as_str_name(&self) -> &'static str {
66        match self {
67            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
68            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
69            Self::Failures => "DEGRADATION_REASON_FAILURES",
70            Self::HighLatencyAndFailures => {
71                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
72            }
73        }
74    }
75    /// Creates an enum from field names used in the ProtoBuf definition.
76    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
77        match value {
78            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
79            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
80            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
81            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
82                Some(Self::HighLatencyAndFailures)
83            }
84            _ => None,
85        }
86    }
87}
88/// Generated client implementations.
89pub mod model_provider_health_service_client {
90    #![allow(
91        unused_variables,
92        dead_code,
93        missing_docs,
94        clippy::wildcard_imports,
95        clippy::let_unit_value,
96    )]
97    use tonic::codegen::*;
98    use tonic::codegen::http::Uri;
99    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
100    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
101    /// independent of the complexity of user prompts.
102    #[derive(Debug, Clone)]
103    pub struct ModelProviderHealthServiceClient<T> {
104        inner: tonic::client::Grpc<T>,
105    }
106    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
107        /// Attempt to create a new client by connecting to a given endpoint.
108        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
109        where
110            D: TryInto<tonic::transport::Endpoint>,
111            D::Error: Into<StdError>,
112        {
113            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
114            Ok(Self::new(conn))
115        }
116    }
117    impl<T> ModelProviderHealthServiceClient<T>
118    where
119        T: tonic::client::GrpcService<tonic::body::Body>,
120        T::Error: Into<StdError>,
121        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
122        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
123    {
124        pub fn new(inner: T) -> Self {
125            let inner = tonic::client::Grpc::new(inner);
126            Self { inner }
127        }
128        pub fn with_origin(inner: T, origin: Uri) -> Self {
129            let inner = tonic::client::Grpc::with_origin(inner, origin);
130            Self { inner }
131        }
132        pub fn with_interceptor<F>(
133            inner: T,
134            interceptor: F,
135        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
136        where
137            F: tonic::service::Interceptor,
138            T::ResponseBody: Default,
139            T: tonic::codegen::Service<
140                http::Request<tonic::body::Body>,
141                Response = http::Response<
142                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
143                >,
144            >,
145            <T as tonic::codegen::Service<
146                http::Request<tonic::body::Body>,
147            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
148        {
149            ModelProviderHealthServiceClient::new(
150                InterceptedService::new(inner, interceptor),
151            )
152        }
153        /// Compress requests with the given encoding.
154        ///
155        /// This requires the server to support it otherwise it might respond with an
156        /// error.
157        #[must_use]
158        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
159            self.inner = self.inner.send_compressed(encoding);
160            self
161        }
162        /// Enable decompressing responses.
163        #[must_use]
164        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
165            self.inner = self.inner.accept_compressed(encoding);
166            self
167        }
168        /// Limits the maximum size of a decoded message.
169        ///
170        /// Default: `4MB`
171        #[must_use]
172        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
173            self.inner = self.inner.max_decoding_message_size(limit);
174            self
175        }
176        /// Limits the maximum size of an encoded message.
177        ///
178        /// Default: `usize::MAX`
179        #[must_use]
180        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
181            self.inner = self.inner.max_encoding_message_size(limit);
182            self
183        }
184        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
185        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
186        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
187        pub async fn get_provider_status(
188            &mut self,
189            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
190        ) -> std::result::Result<
191            tonic::Response<super::GetProviderStatusResponse>,
192            tonic::Status,
193        > {
194            self.inner
195                .ready()
196                .await
197                .map_err(|e| {
198                    tonic::Status::unknown(
199                        format!("Service was not ready: {}", e.into()),
200                    )
201                })?;
202            let codec = tonic::codec::ProstCodec::default();
203            let path = http::uri::PathAndQuery::from_static(
204                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
205            );
206            let mut req = request.into_request();
207            req.extensions_mut()
208                .insert(
209                    GrpcMethod::new(
210                        "nominal.ai.v1.ModelProviderHealthService",
211                        "GetProviderStatus",
212                    ),
213                );
214            self.inner.unary(req, path, codec).await
215        }
216    }
217}
218/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
219/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
220#[derive(Clone, PartialEq, ::prost::Message)]
221pub struct CreateOrUpdateKnowledgeBaseRequest {
222    #[prost(string, tag = "1")]
223    pub attachment_rid: ::prost::alloc::string::String,
224    /// summary of the knowledge base, will be used by the LLM to decide when to use it
225    #[prost(string, tag = "2")]
226    pub summary_description: ::prost::alloc::string::String,
227    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
228    pub r#type: ::core::option::Option<i32>,
229}
230/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
231#[derive(Clone, PartialEq, ::prost::Message)]
232pub struct CreateOrUpdateKnowledgeBaseResponse {
233    #[prost(string, tag = "1")]
234    pub knowledge_base_rid: ::prost::alloc::string::String,
235}
236/// KnowledgeBase represents a knowledge base entry
237#[derive(Clone, PartialEq, ::prost::Message)]
238pub struct KnowledgeBase {
239    #[prost(string, tag = "1")]
240    pub knowledge_base_rid: ::prost::alloc::string::String,
241    #[prost(string, tag = "2")]
242    pub attachment_rid: ::prost::alloc::string::String,
243    #[prost(string, tag = "3")]
244    pub workspace_rid: ::prost::alloc::string::String,
245    #[prost(string, tag = "4")]
246    pub summary_description: ::prost::alloc::string::String,
247    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
248    pub r#type: i32,
249    #[prost(int32, tag = "6")]
250    pub version: i32,
251}
252#[derive(Clone, PartialEq, ::prost::Message)]
253pub struct ListRequest {
254    #[prost(string, tag = "1")]
255    pub workspace_rid: ::prost::alloc::string::String,
256}
257#[derive(Clone, PartialEq, ::prost::Message)]
258pub struct ListResponse {
259    #[prost(message, repeated, tag = "1")]
260    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
261}
262#[derive(Clone, PartialEq, ::prost::Message)]
263pub struct DeleteRequest {
264    #[prost(string, tag = "1")]
265    pub knowledge_base_rid: ::prost::alloc::string::String,
266}
267#[derive(Clone, Copy, PartialEq, ::prost::Message)]
268pub struct DeleteResponse {
269    #[prost(bool, tag = "1")]
270    pub success: bool,
271}
272#[derive(Clone, PartialEq, ::prost::Message)]
273pub struct GetBatchRequest {
274    #[prost(string, repeated, tag = "1")]
275    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
276}
277#[derive(Clone, PartialEq, ::prost::Message)]
278pub struct GetBatchResponse {
279    #[prost(message, repeated, tag = "1")]
280    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
281}
282/// generate summary description is intentionally going to return the generated description to the frontend
283/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
284#[derive(Clone, PartialEq, ::prost::Message)]
285pub struct GenerateSummaryDescriptionRequest {
286    #[prost(string, tag = "1")]
287    pub attachment_rid: ::prost::alloc::string::String,
288}
289#[derive(Clone, PartialEq, ::prost::Message)]
290pub struct GenerateSummaryDescriptionResponse {
291    #[prost(string, tag = "1")]
292    pub summary_description: ::prost::alloc::string::String,
293}
294/// KnowledgeBaseType defines the types of knowledge base
295#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
296#[repr(i32)]
297pub enum KnowledgeBaseType {
298    /// defaults to PROMPT
299    Unspecified = 0,
300    /// knowledge base gets added directly to prompt (needs to be small enough!)
301    Prompt = 1,
302    /// knowledge base gets used via vector search on embeddings
303    Embedding = 2,
304}
305impl KnowledgeBaseType {
306    /// String value of the enum field names used in the ProtoBuf definition.
307    ///
308    /// The values are not transformed in any way and thus are considered stable
309    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
310    pub fn as_str_name(&self) -> &'static str {
311        match self {
312            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
313            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
314            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
315        }
316    }
317    /// Creates an enum from field names used in the ProtoBuf definition.
318    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
319        match value {
320            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
321            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
322            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
323            _ => None,
324        }
325    }
326}
327/// Generated client implementations.
328pub mod knowledge_base_service_client {
329    #![allow(
330        unused_variables,
331        dead_code,
332        missing_docs,
333        clippy::wildcard_imports,
334        clippy::let_unit_value,
335    )]
336    use tonic::codegen::*;
337    use tonic::codegen::http::Uri;
338    /// KnowledgeBaseService provides AI-powered knowledge base management
339    #[derive(Debug, Clone)]
340    pub struct KnowledgeBaseServiceClient<T> {
341        inner: tonic::client::Grpc<T>,
342    }
343    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
344        /// Attempt to create a new client by connecting to a given endpoint.
345        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
346        where
347            D: TryInto<tonic::transport::Endpoint>,
348            D::Error: Into<StdError>,
349        {
350            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
351            Ok(Self::new(conn))
352        }
353    }
354    impl<T> KnowledgeBaseServiceClient<T>
355    where
356        T: tonic::client::GrpcService<tonic::body::Body>,
357        T::Error: Into<StdError>,
358        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
359        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
360    {
361        pub fn new(inner: T) -> Self {
362            let inner = tonic::client::Grpc::new(inner);
363            Self { inner }
364        }
365        pub fn with_origin(inner: T, origin: Uri) -> Self {
366            let inner = tonic::client::Grpc::with_origin(inner, origin);
367            Self { inner }
368        }
369        pub fn with_interceptor<F>(
370            inner: T,
371            interceptor: F,
372        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
373        where
374            F: tonic::service::Interceptor,
375            T::ResponseBody: Default,
376            T: tonic::codegen::Service<
377                http::Request<tonic::body::Body>,
378                Response = http::Response<
379                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
380                >,
381            >,
382            <T as tonic::codegen::Service<
383                http::Request<tonic::body::Body>,
384            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
385        {
386            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
387        }
388        /// Compress requests with the given encoding.
389        ///
390        /// This requires the server to support it otherwise it might respond with an
391        /// error.
392        #[must_use]
393        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
394            self.inner = self.inner.send_compressed(encoding);
395            self
396        }
397        /// Enable decompressing responses.
398        #[must_use]
399        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
400            self.inner = self.inner.accept_compressed(encoding);
401            self
402        }
403        /// Limits the maximum size of a decoded message.
404        ///
405        /// Default: `4MB`
406        #[must_use]
407        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
408            self.inner = self.inner.max_decoding_message_size(limit);
409            self
410        }
411        /// Limits the maximum size of an encoded message.
412        ///
413        /// Default: `usize::MAX`
414        #[must_use]
415        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
416            self.inner = self.inner.max_encoding_message_size(limit);
417            self
418        }
419        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
420        pub async fn create_or_update_knowledge_base(
421            &mut self,
422            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
423        ) -> std::result::Result<
424            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
425            tonic::Status,
426        > {
427            self.inner
428                .ready()
429                .await
430                .map_err(|e| {
431                    tonic::Status::unknown(
432                        format!("Service was not ready: {}", e.into()),
433                    )
434                })?;
435            let codec = tonic::codec::ProstCodec::default();
436            let path = http::uri::PathAndQuery::from_static(
437                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
438            );
439            let mut req = request.into_request();
440            req.extensions_mut()
441                .insert(
442                    GrpcMethod::new(
443                        "nominal.ai.v1.KnowledgeBaseService",
444                        "CreateOrUpdateKnowledgeBase",
445                    ),
446                );
447            self.inner.unary(req, path, codec).await
448        }
449        /// List returns all knowledge bases in the specified workspace
450        pub async fn list(
451            &mut self,
452            request: impl tonic::IntoRequest<super::ListRequest>,
453        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
454            self.inner
455                .ready()
456                .await
457                .map_err(|e| {
458                    tonic::Status::unknown(
459                        format!("Service was not ready: {}", e.into()),
460                    )
461                })?;
462            let codec = tonic::codec::ProstCodec::default();
463            let path = http::uri::PathAndQuery::from_static(
464                "/nominal.ai.v1.KnowledgeBaseService/List",
465            );
466            let mut req = request.into_request();
467            req.extensions_mut()
468                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
469            self.inner.unary(req, path, codec).await
470        }
471        /// Delete removes a knowledge base by its RID
472        pub async fn delete(
473            &mut self,
474            request: impl tonic::IntoRequest<super::DeleteRequest>,
475        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
476            self.inner
477                .ready()
478                .await
479                .map_err(|e| {
480                    tonic::Status::unknown(
481                        format!("Service was not ready: {}", e.into()),
482                    )
483                })?;
484            let codec = tonic::codec::ProstCodec::default();
485            let path = http::uri::PathAndQuery::from_static(
486                "/nominal.ai.v1.KnowledgeBaseService/Delete",
487            );
488            let mut req = request.into_request();
489            req.extensions_mut()
490                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
491            self.inner.unary(req, path, codec).await
492        }
493        /// GetBatch retrieves multiple knowledge bases by their RIDs
494        pub async fn get_batch(
495            &mut self,
496            request: impl tonic::IntoRequest<super::GetBatchRequest>,
497        ) -> std::result::Result<
498            tonic::Response<super::GetBatchResponse>,
499            tonic::Status,
500        > {
501            self.inner
502                .ready()
503                .await
504                .map_err(|e| {
505                    tonic::Status::unknown(
506                        format!("Service was not ready: {}", e.into()),
507                    )
508                })?;
509            let codec = tonic::codec::ProstCodec::default();
510            let path = http::uri::PathAndQuery::from_static(
511                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
512            );
513            let mut req = request.into_request();
514            req.extensions_mut()
515                .insert(
516                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
517                );
518            self.inner.unary(req, path, codec).await
519        }
520        /// GenerateSummaryDescription generates a summary description for an attachment rid
521        pub async fn generate_summary_description(
522            &mut self,
523            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
524        ) -> std::result::Result<
525            tonic::Response<super::GenerateSummaryDescriptionResponse>,
526            tonic::Status,
527        > {
528            self.inner
529                .ready()
530                .await
531                .map_err(|e| {
532                    tonic::Status::unknown(
533                        format!("Service was not ready: {}", e.into()),
534                    )
535                })?;
536            let codec = tonic::codec::ProstCodec::default();
537            let path = http::uri::PathAndQuery::from_static(
538                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
539            );
540            let mut req = request.into_request();
541            req.extensions_mut()
542                .insert(
543                    GrpcMethod::new(
544                        "nominal.ai.v1.KnowledgeBaseService",
545                        "GenerateSummaryDescription",
546                    ),
547                );
548            self.inner.unary(req, path, codec).await
549        }
550    }
551}
552#[derive(Clone, PartialEq, ::prost::Message)]
553pub struct GetSnapshotRidByUserMessageIdRequest {
554    #[prost(string, tag = "1")]
555    pub conversation_rid: ::prost::alloc::string::String,
556    #[prost(string, tag = "2")]
557    pub message_id: ::prost::alloc::string::String,
558}
559/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
560/// This occurs in the instance where a message was sent in a non-workbook context
561#[derive(Clone, PartialEq, ::prost::Message)]
562pub struct GetSnapshotRidByUserMessageIdResponse {
563    #[prost(string, optional, tag = "1")]
564    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
565}
566/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
567#[derive(Clone, Copy, PartialEq, ::prost::Message)]
568pub struct ReadOnlyMode {}
569/// EditMode configures edit mode where all tools are available
570#[derive(Clone, Copy, PartialEq, ::prost::Message)]
571pub struct EditMode {}
572/// ConversationMode specifies the mode of the conversation
573#[derive(Clone, Copy, PartialEq, ::prost::Message)]
574pub struct ConversationMode {
575    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
576    pub mode: ::core::option::Option<conversation_mode::Mode>,
577}
578/// Nested message and enum types in `ConversationMode`.
579pub mod conversation_mode {
580    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
581    pub enum Mode {
582        #[prost(message, tag = "1")]
583        ReadOnly(super::ReadOnlyMode),
584        #[prost(message, tag = "2")]
585        Edit(super::EditMode),
586    }
587}
588/// StreamChatRequest is a request to stream chat messages for AI agent
589#[derive(Clone, PartialEq, ::prost::Message)]
590pub struct StreamChatRequest {
591    /// The conversation ID
592    #[prost(string, tag = "1")]
593    pub conversation_rid: ::prost::alloc::string::String,
594    /// The user message to append to the conversation
595    #[prost(message, optional, tag = "2")]
596    pub message: ::core::option::Option<UserModelMessage>,
597    /// Optional: image files to provide to the agent
598    #[prost(message, repeated, tag = "3")]
599    pub images: ::prost::alloc::vec::Vec<ImagePart>,
600    /// Context-specific fields based on the oneofKind.
601    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
602    pub context: ::core::option::Option<stream_chat_request::Context>,
603}
604/// Nested message and enum types in `StreamChatRequest`.
605pub mod stream_chat_request {
606    /// Context-specific fields based on the oneofKind.
607    #[derive(Clone, PartialEq, ::prost::Oneof)]
608    pub enum Context {
609        #[prost(message, tag = "4")]
610        Workbook(super::WorkbookContext),
611        #[prost(message, tag = "5")]
612        Global(super::GlobalContext),
613    }
614}
615/// WorkbookContext contains workbook-specific context fields
616#[derive(Clone, PartialEq, ::prost::Message)]
617pub struct WorkbookContext {
618    /// RID of the workbook to use for context
619    #[prost(string, tag = "1")]
620    pub workbook_rid: ::prost::alloc::string::String,
621    /// The user's presence in the workbook
622    #[prost(message, optional, tag = "2")]
623    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
624}
625/// DefaultContext (no context)
626#[derive(Clone, Copy, PartialEq, ::prost::Message)]
627pub struct GlobalContext {}
628/// WorkbookUserPresence contains the user's presence in the workbook
629/// which is used to describe what the user is viewing at the time of the message.
630#[derive(Clone, Copy, PartialEq, ::prost::Message)]
631pub struct WorkbookUserPresence {
632    #[prost(int32, tag = "1")]
633    pub tab_index: i32,
634    #[prost(message, optional, tag = "2")]
635    pub range: ::core::option::Option<TimeRange>,
636}
637/// CreateConversation request will create a new conversation thread
638/// if old conversation id is not set, a brand new, clear chat is created
639/// If old conversation id is set without a previous message id, the full conversation thread will be copied
640/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
641/// the above case is useful for branching a conversation into a new thread
642#[derive(Clone, PartialEq, ::prost::Message)]
643pub struct CreateConversationRequest {
644    #[prost(string, tag = "1")]
645    pub title: ::prost::alloc::string::String,
646    #[prost(string, tag = "2")]
647    pub workspace_rid: ::prost::alloc::string::String,
648    #[prost(string, optional, tag = "3")]
649    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
650    #[prost(string, optional, tag = "4")]
651    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
652    #[prost(message, optional, tag = "5")]
653    pub conversation_mode: ::core::option::Option<ConversationMode>,
654}
655/// CreateConversationResponse will return the conversation id for the new conversation
656#[derive(Clone, PartialEq, ::prost::Message)]
657pub struct CreateConversationResponse {
658    #[prost(string, tag = "1")]
659    pub new_conversation_rid: ::prost::alloc::string::String,
660}
661/// Updates the fields if specified (optional means no change for that field)
662#[derive(Clone, PartialEq, ::prost::Message)]
663pub struct UpdateConversationMetadataRequest {
664    #[prost(string, optional, tag = "1")]
665    pub title: ::core::option::Option<::prost::alloc::string::String>,
666    #[prost(string, tag = "2")]
667    pub conversation_rid: ::prost::alloc::string::String,
668    #[prost(message, optional, tag = "3")]
669    pub conversation_mode: ::core::option::Option<ConversationMode>,
670}
671#[derive(Clone, Copy, PartialEq, ::prost::Message)]
672pub struct UpdateConversationMetadataResponse {}
673#[derive(Clone, PartialEq, ::prost::Message)]
674pub struct DeleteConversationRequest {
675    #[prost(string, tag = "1")]
676    pub conversation_rid: ::prost::alloc::string::String,
677}
678#[derive(Clone, Copy, PartialEq, ::prost::Message)]
679pub struct DeleteConversationResponse {}
680/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
681/// by provided rid. To start from a particular message - you can also provide a message id.
682#[derive(Clone, PartialEq, ::prost::Message)]
683pub struct GetConversationRequest {
684    #[prost(string, tag = "1")]
685    pub conversation_rid: ::prost::alloc::string::String,
686    #[prost(string, optional, tag = "2")]
687    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
688    #[prost(int32, optional, tag = "3")]
689    pub max_message_count: ::core::option::Option<i32>,
690}
691/// Model message with id allows you to identify the message ID of a given message
692#[derive(Clone, PartialEq, ::prost::Message)]
693pub struct ModelMessageWithId {
694    #[prost(string, tag = "3")]
695    pub message_id: ::prost::alloc::string::String,
696    /// WB agent user messages can have snapshot rids associated with them
697    #[prost(string, optional, tag = "4")]
698    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
699    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
700    pub content: ::core::option::Option<model_message_with_id::Content>,
701}
702/// Nested message and enum types in `ModelMessageWithId`.
703pub mod model_message_with_id {
704    #[derive(Clone, PartialEq, ::prost::Oneof)]
705    pub enum Content {
706        #[prost(message, tag = "1")]
707        Message(super::ModelMessage),
708        #[prost(message, tag = "2")]
709        ToolAction(super::ToolAction),
710    }
711}
712#[derive(Clone, PartialEq, ::prost::Message)]
713pub struct GetConversationResponse {
714    #[prost(message, repeated, tag = "1")]
715    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
716    #[prost(message, optional, tag = "2")]
717    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
718}
719/// Will generate all conversation threads that this user has in this workspace
720#[derive(Clone, PartialEq, ::prost::Message)]
721pub struct ListConversationsRequest {
722    #[prost(string, tag = "1")]
723    pub workspace_rid: ::prost::alloc::string::String,
724}
725#[derive(Clone, PartialEq, ::prost::Message)]
726pub struct ConversationMetadata {
727    #[prost(string, tag = "1")]
728    pub conversation_rid: ::prost::alloc::string::String,
729    #[prost(string, tag = "2")]
730    pub title: ::prost::alloc::string::String,
731    #[prost(message, optional, tag = "3")]
732    pub created_at: ::core::option::Option<
733        super::super::super::google::protobuf::Timestamp,
734    >,
735    #[prost(message, optional, tag = "4")]
736    pub last_updated_at: ::core::option::Option<
737        super::super::super::google::protobuf::Timestamp,
738    >,
739    #[prost(message, optional, tag = "5")]
740    pub mode: ::core::option::Option<ConversationMode>,
741}
742/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
743/// to get a full conversation from storage. These are ordered by creation time.
744#[derive(Clone, PartialEq, ::prost::Message)]
745pub struct ListConversationsResponse {
746    #[prost(message, repeated, tag = "1")]
747    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
748}
749#[derive(Clone, Copy, PartialEq, ::prost::Message)]
750pub struct TimeRange {
751    #[prost(message, optional, tag = "1")]
752    pub range_start: ::core::option::Option<Timestamp>,
753    #[prost(message, optional, tag = "2")]
754    pub range_end: ::core::option::Option<Timestamp>,
755}
756#[derive(Clone, Copy, PartialEq, ::prost::Message)]
757pub struct Timestamp {
758    #[prost(int32, tag = "1")]
759    pub seconds: i32,
760    #[prost(int32, tag = "2")]
761    pub nanoseconds: i32,
762}
763/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
764/// Each message type has its own structure and content.
765#[derive(Clone, PartialEq, ::prost::Message)]
766pub struct ModelMessage {
767    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
768    pub kind: ::core::option::Option<model_message::Kind>,
769}
770/// Nested message and enum types in `ModelMessage`.
771pub mod model_message {
772    #[derive(Clone, PartialEq, ::prost::Oneof)]
773    pub enum Kind {
774        #[prost(message, tag = "1")]
775        User(super::UserModelMessage),
776        #[prost(message, tag = "2")]
777        Assistant(super::AssistantModelMessage),
778    }
779}
780/// A user message containing text
781#[derive(Clone, PartialEq, ::prost::Message)]
782pub struct UserModelMessage {
783    #[prost(message, repeated, tag = "1")]
784    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
785}
786/// An assistant message containing text
787#[derive(Clone, PartialEq, ::prost::Message)]
788pub struct AssistantModelMessage {
789    #[prost(message, repeated, tag = "1")]
790    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
791}
792#[derive(Clone, PartialEq, ::prost::Message)]
793pub struct UserContentPart {
794    #[prost(oneof = "user_content_part::Part", tags = "1")]
795    pub part: ::core::option::Option<user_content_part::Part>,
796}
797/// Nested message and enum types in `UserContentPart`.
798pub mod user_content_part {
799    #[derive(Clone, PartialEq, ::prost::Oneof)]
800    pub enum Part {
801        #[prost(message, tag = "1")]
802        Text(super::TextPart),
803    }
804}
805/// Content part for assistant messages: can be text, reasoning, or mutation.
806#[derive(Clone, PartialEq, ::prost::Message)]
807pub struct AssistantContentPart {
808    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
809    pub part: ::core::option::Option<assistant_content_part::Part>,
810}
811/// Nested message and enum types in `AssistantContentPart`.
812pub mod assistant_content_part {
813    #[derive(Clone, PartialEq, ::prost::Oneof)]
814    pub enum Part {
815        #[prost(message, tag = "1")]
816        Text(super::TextPart),
817        #[prost(message, tag = "2")]
818        Reasoning(super::ReasoningPart),
819    }
820}
821/// Text part for user or assistant messages.
822#[derive(Clone, PartialEq, ::prost::Message)]
823pub struct TextPart {
824    #[prost(string, tag = "1")]
825    pub text: ::prost::alloc::string::String,
826}
827/// User-supplied image part.
828#[derive(Clone, PartialEq, ::prost::Message)]
829pub struct ImagePart {
830    /// The base64-encoded image data
831    #[prost(bytes = "vec", tag = "1")]
832    pub data: ::prost::alloc::vec::Vec<u8>,
833    /// The media type of the image (e.g. "image/png", "image/jpeg")
834    #[prost(string, optional, tag = "2")]
835    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
836    /// Optional: the filename of the image
837    #[prost(string, optional, tag = "3")]
838    pub filename: ::core::option::Option<::prost::alloc::string::String>,
839}
840/// Reasoning part for assistant messages.
841#[derive(Clone, PartialEq, ::prost::Message)]
842pub struct ReasoningPart {
843    #[prost(string, tag = "1")]
844    pub reasoning: ::prost::alloc::string::String,
845}
846/// StreamChatResponse is a discriminated union response to a StreamChatRequest
847#[derive(Clone, PartialEq, ::prost::Message)]
848pub struct StreamChatResponse {
849    #[prost(
850        oneof = "stream_chat_response::Response",
851        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10"
852    )]
853    pub response: ::core::option::Option<stream_chat_response::Response>,
854}
855/// Nested message and enum types in `StreamChatResponse`.
856pub mod stream_chat_response {
857    #[derive(Clone, PartialEq, ::prost::Oneof)]
858    pub enum Response {
859        #[prost(message, tag = "1")]
860        Finish(super::Finish),
861        #[prost(message, tag = "2")]
862        Error(super::Error),
863        #[prost(message, tag = "3")]
864        TextStart(super::TextStart),
865        #[prost(message, tag = "4")]
866        TextDelta(super::TextDelta),
867        #[prost(message, tag = "5")]
868        TextEnd(super::TextEnd),
869        #[prost(message, tag = "6")]
870        ReasoningStart(super::ReasoningStart),
871        #[prost(message, tag = "7")]
872        ReasoningDelta(super::ReasoningDelta),
873        #[prost(message, tag = "8")]
874        ReasoningEnd(super::ReasoningEnd),
875        #[prost(message, tag = "10")]
876        ToolAction(super::ToolAction),
877    }
878}
879/// Indicates the end of a chat session
880#[derive(Clone, PartialEq, ::prost::Message)]
881pub struct Finish {
882    /// The message ids in order of all generated messages for this agent run
883    /// These ids can be used to branch a message from that specific message
884    #[prost(string, repeated, tag = "1")]
885    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
886    /// In the case that this is the first agent run in a conversation thread, we also
887    /// return the new conversation title generated
888    #[prost(string, optional, tag = "2")]
889    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
890}
891/// An error that occurred during the chat session
892#[derive(Clone, PartialEq, ::prost::Message)]
893pub struct Error {
894    #[prost(string, tag = "1")]
895    pub message: ::prost::alloc::string::String,
896}
897/// Indicates the start of a text message from the agent
898#[derive(Clone, PartialEq, ::prost::Message)]
899pub struct TextStart {
900    /// uniquely identifies the text message (e.g. uuid) so that the client can
901    /// merge parallel message streams (if it happens).
902    #[prost(string, tag = "1")]
903    pub id: ::prost::alloc::string::String,
904}
905/// A delta (continuation) of a text message from the agent
906#[derive(Clone, PartialEq, ::prost::Message)]
907pub struct TextDelta {
908    #[prost(string, tag = "1")]
909    pub id: ::prost::alloc::string::String,
910    /// The next chunk of text
911    #[prost(string, tag = "2")]
912    pub delta: ::prost::alloc::string::String,
913}
914/// Indicates the end of a text message from the agent
915#[derive(Clone, PartialEq, ::prost::Message)]
916pub struct TextEnd {
917    #[prost(string, tag = "1")]
918    pub id: ::prost::alloc::string::String,
919}
920/// Indicates the start of a reasoning message from the agent
921#[derive(Clone, PartialEq, ::prost::Message)]
922pub struct ReasoningStart {
923    #[prost(string, tag = "1")]
924    pub id: ::prost::alloc::string::String,
925}
926/// A delta (continuation) of a reasoning message from the agent
927#[derive(Clone, PartialEq, ::prost::Message)]
928pub struct ReasoningDelta {
929    #[prost(string, tag = "1")]
930    pub id: ::prost::alloc::string::String,
931    /// The next chunk of reasoning
932    #[prost(string, tag = "2")]
933    pub delta: ::prost::alloc::string::String,
934}
935/// Indicates the end of a reasoning message from the agent
936#[derive(Clone, PartialEq, ::prost::Message)]
937pub struct ReasoningEnd {
938    #[prost(string, tag = "1")]
939    pub id: ::prost::alloc::string::String,
940}
941/// this is a concise description of a tool call that the agent is making internally
942/// without revealing too much detail about the tool call, it informs the user what the agent is doing
943/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
944/// "Search channels for My Datasource"
945#[derive(Clone, PartialEq, ::prost::Message)]
946pub struct ToolAction {
947    #[prost(string, tag = "1")]
948    pub id: ::prost::alloc::string::String,
949    /// "Thought", "Read", "Find", "Look-up", etc.
950    #[prost(string, tag = "2")]
951    pub tool_action_verb: ::prost::alloc::string::String,
952    /// "workbook", "channel", "variable", "panel", etc.
953    #[prost(string, optional, tag = "3")]
954    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
955}
956/// Generated client implementations.
957pub mod ai_agent_service_client {
958    #![allow(
959        unused_variables,
960        dead_code,
961        missing_docs,
962        clippy::wildcard_imports,
963        clippy::let_unit_value,
964    )]
965    use tonic::codegen::*;
966    use tonic::codegen::http::Uri;
967    /// AIAgentService provides AI-powered assistance for general operations
968    #[derive(Debug, Clone)]
969    pub struct AiAgentServiceClient<T> {
970        inner: tonic::client::Grpc<T>,
971    }
972    impl AiAgentServiceClient<tonic::transport::Channel> {
973        /// Attempt to create a new client by connecting to a given endpoint.
974        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
975        where
976            D: TryInto<tonic::transport::Endpoint>,
977            D::Error: Into<StdError>,
978        {
979            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
980            Ok(Self::new(conn))
981        }
982    }
983    impl<T> AiAgentServiceClient<T>
984    where
985        T: tonic::client::GrpcService<tonic::body::Body>,
986        T::Error: Into<StdError>,
987        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
988        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
989    {
990        pub fn new(inner: T) -> Self {
991            let inner = tonic::client::Grpc::new(inner);
992            Self { inner }
993        }
994        pub fn with_origin(inner: T, origin: Uri) -> Self {
995            let inner = tonic::client::Grpc::with_origin(inner, origin);
996            Self { inner }
997        }
998        pub fn with_interceptor<F>(
999            inner: T,
1000            interceptor: F,
1001        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1002        where
1003            F: tonic::service::Interceptor,
1004            T::ResponseBody: Default,
1005            T: tonic::codegen::Service<
1006                http::Request<tonic::body::Body>,
1007                Response = http::Response<
1008                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1009                >,
1010            >,
1011            <T as tonic::codegen::Service<
1012                http::Request<tonic::body::Body>,
1013            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1014        {
1015            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1016        }
1017        /// Compress requests with the given encoding.
1018        ///
1019        /// This requires the server to support it otherwise it might respond with an
1020        /// error.
1021        #[must_use]
1022        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1023            self.inner = self.inner.send_compressed(encoding);
1024            self
1025        }
1026        /// Enable decompressing responses.
1027        #[must_use]
1028        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1029            self.inner = self.inner.accept_compressed(encoding);
1030            self
1031        }
1032        /// Limits the maximum size of a decoded message.
1033        ///
1034        /// Default: `4MB`
1035        #[must_use]
1036        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1037            self.inner = self.inner.max_decoding_message_size(limit);
1038            self
1039        }
1040        /// Limits the maximum size of an encoded message.
1041        ///
1042        /// Default: `usize::MAX`
1043        #[must_use]
1044        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1045            self.inner = self.inner.max_encoding_message_size(limit);
1046            self
1047        }
1048        /// StreamChat handles bidirectional streaming chat for AI agent
1049        pub async fn stream_chat(
1050            &mut self,
1051            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1052        ) -> std::result::Result<
1053            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1054            tonic::Status,
1055        > {
1056            self.inner
1057                .ready()
1058                .await
1059                .map_err(|e| {
1060                    tonic::Status::unknown(
1061                        format!("Service was not ready: {}", e.into()),
1062                    )
1063                })?;
1064            let codec = tonic::codec::ProstCodec::default();
1065            let path = http::uri::PathAndQuery::from_static(
1066                "/nominal.ai.v1.AIAgentService/StreamChat",
1067            );
1068            let mut req = request.into_request();
1069            req.extensions_mut()
1070                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1071            self.inner.server_streaming(req, path, codec).await
1072        }
1073        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
1074        pub async fn get_conversation(
1075            &mut self,
1076            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1077        ) -> std::result::Result<
1078            tonic::Response<super::GetConversationResponse>,
1079            tonic::Status,
1080        > {
1081            self.inner
1082                .ready()
1083                .await
1084                .map_err(|e| {
1085                    tonic::Status::unknown(
1086                        format!("Service was not ready: {}", e.into()),
1087                    )
1088                })?;
1089            let codec = tonic::codec::ProstCodec::default();
1090            let path = http::uri::PathAndQuery::from_static(
1091                "/nominal.ai.v1.AIAgentService/GetConversation",
1092            );
1093            let mut req = request.into_request();
1094            req.extensions_mut()
1095                .insert(
1096                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1097                );
1098            self.inner.unary(req, path, codec).await
1099        }
1100        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1101        pub async fn list_conversations(
1102            &mut self,
1103            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1104        ) -> std::result::Result<
1105            tonic::Response<super::ListConversationsResponse>,
1106            tonic::Status,
1107        > {
1108            self.inner
1109                .ready()
1110                .await
1111                .map_err(|e| {
1112                    tonic::Status::unknown(
1113                        format!("Service was not ready: {}", e.into()),
1114                    )
1115                })?;
1116            let codec = tonic::codec::ProstCodec::default();
1117            let path = http::uri::PathAndQuery::from_static(
1118                "/nominal.ai.v1.AIAgentService/ListConversations",
1119            );
1120            let mut req = request.into_request();
1121            req.extensions_mut()
1122                .insert(
1123                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1124                );
1125            self.inner.unary(req, path, codec).await
1126        }
1127        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1128        pub async fn create_conversation(
1129            &mut self,
1130            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1131        ) -> std::result::Result<
1132            tonic::Response<super::CreateConversationResponse>,
1133            tonic::Status,
1134        > {
1135            self.inner
1136                .ready()
1137                .await
1138                .map_err(|e| {
1139                    tonic::Status::unknown(
1140                        format!("Service was not ready: {}", e.into()),
1141                    )
1142                })?;
1143            let codec = tonic::codec::ProstCodec::default();
1144            let path = http::uri::PathAndQuery::from_static(
1145                "/nominal.ai.v1.AIAgentService/CreateConversation",
1146            );
1147            let mut req = request.into_request();
1148            req.extensions_mut()
1149                .insert(
1150                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1151                );
1152            self.inner.unary(req, path, codec).await
1153        }
1154        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1155        pub async fn update_conversation_metadata(
1156            &mut self,
1157            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1158        ) -> std::result::Result<
1159            tonic::Response<super::UpdateConversationMetadataResponse>,
1160            tonic::Status,
1161        > {
1162            self.inner
1163                .ready()
1164                .await
1165                .map_err(|e| {
1166                    tonic::Status::unknown(
1167                        format!("Service was not ready: {}", e.into()),
1168                    )
1169                })?;
1170            let codec = tonic::codec::ProstCodec::default();
1171            let path = http::uri::PathAndQuery::from_static(
1172                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1173            );
1174            let mut req = request.into_request();
1175            req.extensions_mut()
1176                .insert(
1177                    GrpcMethod::new(
1178                        "nominal.ai.v1.AIAgentService",
1179                        "UpdateConversationMetadata",
1180                    ),
1181                );
1182            self.inner.unary(req, path, codec).await
1183        }
1184        /// DeleteConversation handles deleting a specific conversation by conversation rid
1185        pub async fn delete_conversation(
1186            &mut self,
1187            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1188        ) -> std::result::Result<
1189            tonic::Response<super::DeleteConversationResponse>,
1190            tonic::Status,
1191        > {
1192            self.inner
1193                .ready()
1194                .await
1195                .map_err(|e| {
1196                    tonic::Status::unknown(
1197                        format!("Service was not ready: {}", e.into()),
1198                    )
1199                })?;
1200            let codec = tonic::codec::ProstCodec::default();
1201            let path = http::uri::PathAndQuery::from_static(
1202                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1203            );
1204            let mut req = request.into_request();
1205            req.extensions_mut()
1206                .insert(
1207                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1208                );
1209            self.inner.unary(req, path, codec).await
1210        }
1211        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1212        pub async fn get_snapshot_rid_by_user_message_id(
1213            &mut self,
1214            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1215        ) -> std::result::Result<
1216            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1217            tonic::Status,
1218        > {
1219            self.inner
1220                .ready()
1221                .await
1222                .map_err(|e| {
1223                    tonic::Status::unknown(
1224                        format!("Service was not ready: {}", e.into()),
1225                    )
1226                })?;
1227            let codec = tonic::codec::ProstCodec::default();
1228            let path = http::uri::PathAndQuery::from_static(
1229                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1230            );
1231            let mut req = request.into_request();
1232            req.extensions_mut()
1233                .insert(
1234                    GrpcMethod::new(
1235                        "nominal.ai.v1.AIAgentService",
1236                        "GetSnapshotRidByUserMessageId",
1237                    ),
1238                );
1239            self.inner.unary(req, path, codec).await
1240        }
1241    }
1242}