nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, Copy, PartialEq, ::prost::Message)]
3pub struct GetProviderStatusRequest {}
4#[derive(Clone, Copy, PartialEq, ::prost::Message)]
5pub struct GetProviderStatusResponse {
6    /// Timestamp when the last status was determined
7    #[prost(message, optional, tag = "1")]
8    pub timestamp: ::core::option::Option<
9        super::super::super::google::protobuf::Timestamp,
10    >,
11    /// Status of the most recent health check probe
12    #[prost(message, optional, tag = "2")]
13    pub last_status: ::core::option::Option<ProviderStatus>,
14    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
15    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
16    #[deprecated]
17    #[prost(message, optional, tag = "3")]
18    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
19    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
20    #[prost(message, optional, tag = "4")]
21    pub aggregated_status: ::core::option::Option<ProviderStatus>,
22}
23#[derive(Clone, Copy, PartialEq, ::prost::Message)]
24pub struct ProviderStatus {
25    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
26    pub status: ::core::option::Option<provider_status::Status>,
27}
28/// Nested message and enum types in `ProviderStatus`.
29pub mod provider_status {
30    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
31    pub enum Status {
32        #[prost(message, tag = "1")]
33        Healthy(super::Healthy),
34        #[prost(message, tag = "2")]
35        Degraded(super::Degraded),
36    }
37}
38#[derive(Clone, Copy, PartialEq, ::prost::Message)]
39pub struct Healthy {}
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct Degraded {
42    #[prost(enumeration = "DegradationReason", tag = "1")]
43    pub reason: i32,
44}
45#[derive(Clone, Copy, PartialEq, ::prost::Message)]
46pub struct ProviderMetrics {
47    #[prost(int32, tag = "1")]
48    pub time_to_first_token_ms: i32,
49    #[prost(int32, tag = "2")]
50    pub total_time_ms: i32,
51}
52#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
53#[repr(i32)]
54pub enum DegradationReason {
55    Unspecified = 0,
56    HighLatency = 1,
57    Failures = 2,
58    HighLatencyAndFailures = 3,
59}
60impl DegradationReason {
61    /// String value of the enum field names used in the ProtoBuf definition.
62    ///
63    /// The values are not transformed in any way and thus are considered stable
64    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
65    pub fn as_str_name(&self) -> &'static str {
66        match self {
67            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
68            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
69            Self::Failures => "DEGRADATION_REASON_FAILURES",
70            Self::HighLatencyAndFailures => {
71                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
72            }
73        }
74    }
75    /// Creates an enum from field names used in the ProtoBuf definition.
76    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
77        match value {
78            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
79            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
80            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
81            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
82                Some(Self::HighLatencyAndFailures)
83            }
84            _ => None,
85        }
86    }
87}
88/// Generated client implementations.
89pub mod model_provider_health_service_client {
90    #![allow(
91        unused_variables,
92        dead_code,
93        missing_docs,
94        clippy::wildcard_imports,
95        clippy::let_unit_value,
96    )]
97    use tonic::codegen::*;
98    use tonic::codegen::http::Uri;
99    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
100    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
101    /// independent of the complexity of user prompts.
102    #[derive(Debug, Clone)]
103    pub struct ModelProviderHealthServiceClient<T> {
104        inner: tonic::client::Grpc<T>,
105    }
106    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
107        /// Attempt to create a new client by connecting to a given endpoint.
108        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
109        where
110            D: TryInto<tonic::transport::Endpoint>,
111            D::Error: Into<StdError>,
112        {
113            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
114            Ok(Self::new(conn))
115        }
116    }
117    impl<T> ModelProviderHealthServiceClient<T>
118    where
119        T: tonic::client::GrpcService<tonic::body::Body>,
120        T::Error: Into<StdError>,
121        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
122        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
123    {
124        pub fn new(inner: T) -> Self {
125            let inner = tonic::client::Grpc::new(inner);
126            Self { inner }
127        }
128        pub fn with_origin(inner: T, origin: Uri) -> Self {
129            let inner = tonic::client::Grpc::with_origin(inner, origin);
130            Self { inner }
131        }
132        pub fn with_interceptor<F>(
133            inner: T,
134            interceptor: F,
135        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
136        where
137            F: tonic::service::Interceptor,
138            T::ResponseBody: Default,
139            T: tonic::codegen::Service<
140                http::Request<tonic::body::Body>,
141                Response = http::Response<
142                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
143                >,
144            >,
145            <T as tonic::codegen::Service<
146                http::Request<tonic::body::Body>,
147            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
148        {
149            ModelProviderHealthServiceClient::new(
150                InterceptedService::new(inner, interceptor),
151            )
152        }
153        /// Compress requests with the given encoding.
154        ///
155        /// This requires the server to support it otherwise it might respond with an
156        /// error.
157        #[must_use]
158        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
159            self.inner = self.inner.send_compressed(encoding);
160            self
161        }
162        /// Enable decompressing responses.
163        #[must_use]
164        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
165            self.inner = self.inner.accept_compressed(encoding);
166            self
167        }
168        /// Limits the maximum size of a decoded message.
169        ///
170        /// Default: `4MB`
171        #[must_use]
172        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
173            self.inner = self.inner.max_decoding_message_size(limit);
174            self
175        }
176        /// Limits the maximum size of an encoded message.
177        ///
178        /// Default: `usize::MAX`
179        #[must_use]
180        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
181            self.inner = self.inner.max_encoding_message_size(limit);
182            self
183        }
184        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
185        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
186        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
187        pub async fn get_provider_status(
188            &mut self,
189            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
190        ) -> std::result::Result<
191            tonic::Response<super::GetProviderStatusResponse>,
192            tonic::Status,
193        > {
194            self.inner
195                .ready()
196                .await
197                .map_err(|e| {
198                    tonic::Status::unknown(
199                        format!("Service was not ready: {}", e.into()),
200                    )
201                })?;
202            let codec = tonic::codec::ProstCodec::default();
203            let path = http::uri::PathAndQuery::from_static(
204                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
205            );
206            let mut req = request.into_request();
207            req.extensions_mut()
208                .insert(
209                    GrpcMethod::new(
210                        "nominal.ai.v1.ModelProviderHealthService",
211                        "GetProviderStatus",
212                    ),
213                );
214            self.inner.unary(req, path, codec).await
215        }
216    }
217}
218/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
219/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
220#[derive(Clone, PartialEq, ::prost::Message)]
221pub struct CreateOrUpdateKnowledgeBaseRequest {
222    #[prost(string, tag = "1")]
223    pub attachment_rid: ::prost::alloc::string::String,
224    /// summary of the knowledge base, will be used by the LLM to decide when to use it
225    #[prost(string, tag = "2")]
226    pub summary_description: ::prost::alloc::string::String,
227    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
228    pub r#type: ::core::option::Option<i32>,
229}
230/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
231#[derive(Clone, PartialEq, ::prost::Message)]
232pub struct CreateOrUpdateKnowledgeBaseResponse {
233    #[prost(string, tag = "1")]
234    pub knowledge_base_rid: ::prost::alloc::string::String,
235}
236/// KnowledgeBase represents a knowledge base entry
237#[derive(Clone, PartialEq, ::prost::Message)]
238pub struct KnowledgeBase {
239    #[prost(string, tag = "1")]
240    pub knowledge_base_rid: ::prost::alloc::string::String,
241    #[prost(string, tag = "2")]
242    pub attachment_rid: ::prost::alloc::string::String,
243    #[prost(string, tag = "3")]
244    pub workspace_rid: ::prost::alloc::string::String,
245    #[prost(string, tag = "4")]
246    pub summary_description: ::prost::alloc::string::String,
247    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
248    pub r#type: i32,
249    #[prost(int32, tag = "6")]
250    pub version: i32,
251}
252#[derive(Clone, PartialEq, ::prost::Message)]
253pub struct ListRequest {
254    #[prost(string, tag = "1")]
255    pub workspace_rid: ::prost::alloc::string::String,
256}
257#[derive(Clone, PartialEq, ::prost::Message)]
258pub struct ListResponse {
259    #[prost(message, repeated, tag = "1")]
260    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
261}
262#[derive(Clone, PartialEq, ::prost::Message)]
263pub struct DeleteRequest {
264    #[prost(string, tag = "1")]
265    pub knowledge_base_rid: ::prost::alloc::string::String,
266}
267#[derive(Clone, Copy, PartialEq, ::prost::Message)]
268pub struct DeleteResponse {
269    #[prost(bool, tag = "1")]
270    pub success: bool,
271}
272#[derive(Clone, PartialEq, ::prost::Message)]
273pub struct GetBatchRequest {
274    #[prost(string, repeated, tag = "1")]
275    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
276}
277#[derive(Clone, PartialEq, ::prost::Message)]
278pub struct GetBatchResponse {
279    #[prost(message, repeated, tag = "1")]
280    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
281}
282/// generate summary description is intentionally going to return the generated description to the frontend
283/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
284#[derive(Clone, PartialEq, ::prost::Message)]
285pub struct GenerateSummaryDescriptionRequest {
286    #[prost(string, tag = "1")]
287    pub attachment_rid: ::prost::alloc::string::String,
288}
289#[derive(Clone, PartialEq, ::prost::Message)]
290pub struct GenerateSummaryDescriptionResponse {
291    #[prost(string, tag = "1")]
292    pub summary_description: ::prost::alloc::string::String,
293}
294/// KnowledgeBaseType defines the types of knowledge base
295#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
296#[repr(i32)]
297pub enum KnowledgeBaseType {
298    /// defaults to PROMPT
299    Unspecified = 0,
300    /// knowledge base gets added directly to prompt (needs to be small enough!)
301    Prompt = 1,
302    /// knowledge base gets used via vector search on embeddings
303    Embedding = 2,
304}
305impl KnowledgeBaseType {
306    /// String value of the enum field names used in the ProtoBuf definition.
307    ///
308    /// The values are not transformed in any way and thus are considered stable
309    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
310    pub fn as_str_name(&self) -> &'static str {
311        match self {
312            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
313            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
314            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
315        }
316    }
317    /// Creates an enum from field names used in the ProtoBuf definition.
318    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
319        match value {
320            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
321            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
322            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
323            _ => None,
324        }
325    }
326}
327/// Generated client implementations.
328pub mod knowledge_base_service_client {
329    #![allow(
330        unused_variables,
331        dead_code,
332        missing_docs,
333        clippy::wildcard_imports,
334        clippy::let_unit_value,
335    )]
336    use tonic::codegen::*;
337    use tonic::codegen::http::Uri;
338    /// KnowledgeBaseService provides AI-powered knowledge base management
339    #[derive(Debug, Clone)]
340    pub struct KnowledgeBaseServiceClient<T> {
341        inner: tonic::client::Grpc<T>,
342    }
343    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
344        /// Attempt to create a new client by connecting to a given endpoint.
345        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
346        where
347            D: TryInto<tonic::transport::Endpoint>,
348            D::Error: Into<StdError>,
349        {
350            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
351            Ok(Self::new(conn))
352        }
353    }
354    impl<T> KnowledgeBaseServiceClient<T>
355    where
356        T: tonic::client::GrpcService<tonic::body::Body>,
357        T::Error: Into<StdError>,
358        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
359        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
360    {
361        pub fn new(inner: T) -> Self {
362            let inner = tonic::client::Grpc::new(inner);
363            Self { inner }
364        }
365        pub fn with_origin(inner: T, origin: Uri) -> Self {
366            let inner = tonic::client::Grpc::with_origin(inner, origin);
367            Self { inner }
368        }
369        pub fn with_interceptor<F>(
370            inner: T,
371            interceptor: F,
372        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
373        where
374            F: tonic::service::Interceptor,
375            T::ResponseBody: Default,
376            T: tonic::codegen::Service<
377                http::Request<tonic::body::Body>,
378                Response = http::Response<
379                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
380                >,
381            >,
382            <T as tonic::codegen::Service<
383                http::Request<tonic::body::Body>,
384            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
385        {
386            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
387        }
388        /// Compress requests with the given encoding.
389        ///
390        /// This requires the server to support it otherwise it might respond with an
391        /// error.
392        #[must_use]
393        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
394            self.inner = self.inner.send_compressed(encoding);
395            self
396        }
397        /// Enable decompressing responses.
398        #[must_use]
399        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
400            self.inner = self.inner.accept_compressed(encoding);
401            self
402        }
403        /// Limits the maximum size of a decoded message.
404        ///
405        /// Default: `4MB`
406        #[must_use]
407        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
408            self.inner = self.inner.max_decoding_message_size(limit);
409            self
410        }
411        /// Limits the maximum size of an encoded message.
412        ///
413        /// Default: `usize::MAX`
414        #[must_use]
415        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
416            self.inner = self.inner.max_encoding_message_size(limit);
417            self
418        }
419        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
420        pub async fn create_or_update_knowledge_base(
421            &mut self,
422            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
423        ) -> std::result::Result<
424            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
425            tonic::Status,
426        > {
427            self.inner
428                .ready()
429                .await
430                .map_err(|e| {
431                    tonic::Status::unknown(
432                        format!("Service was not ready: {}", e.into()),
433                    )
434                })?;
435            let codec = tonic::codec::ProstCodec::default();
436            let path = http::uri::PathAndQuery::from_static(
437                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
438            );
439            let mut req = request.into_request();
440            req.extensions_mut()
441                .insert(
442                    GrpcMethod::new(
443                        "nominal.ai.v1.KnowledgeBaseService",
444                        "CreateOrUpdateKnowledgeBase",
445                    ),
446                );
447            self.inner.unary(req, path, codec).await
448        }
449        /// List returns all knowledge bases in the specified workspace
450        pub async fn list(
451            &mut self,
452            request: impl tonic::IntoRequest<super::ListRequest>,
453        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
454            self.inner
455                .ready()
456                .await
457                .map_err(|e| {
458                    tonic::Status::unknown(
459                        format!("Service was not ready: {}", e.into()),
460                    )
461                })?;
462            let codec = tonic::codec::ProstCodec::default();
463            let path = http::uri::PathAndQuery::from_static(
464                "/nominal.ai.v1.KnowledgeBaseService/List",
465            );
466            let mut req = request.into_request();
467            req.extensions_mut()
468                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
469            self.inner.unary(req, path, codec).await
470        }
471        /// Delete removes a knowledge base by its RID
472        pub async fn delete(
473            &mut self,
474            request: impl tonic::IntoRequest<super::DeleteRequest>,
475        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
476            self.inner
477                .ready()
478                .await
479                .map_err(|e| {
480                    tonic::Status::unknown(
481                        format!("Service was not ready: {}", e.into()),
482                    )
483                })?;
484            let codec = tonic::codec::ProstCodec::default();
485            let path = http::uri::PathAndQuery::from_static(
486                "/nominal.ai.v1.KnowledgeBaseService/Delete",
487            );
488            let mut req = request.into_request();
489            req.extensions_mut()
490                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
491            self.inner.unary(req, path, codec).await
492        }
493        /// GetBatch retrieves multiple knowledge bases by their RIDs
494        pub async fn get_batch(
495            &mut self,
496            request: impl tonic::IntoRequest<super::GetBatchRequest>,
497        ) -> std::result::Result<
498            tonic::Response<super::GetBatchResponse>,
499            tonic::Status,
500        > {
501            self.inner
502                .ready()
503                .await
504                .map_err(|e| {
505                    tonic::Status::unknown(
506                        format!("Service was not ready: {}", e.into()),
507                    )
508                })?;
509            let codec = tonic::codec::ProstCodec::default();
510            let path = http::uri::PathAndQuery::from_static(
511                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
512            );
513            let mut req = request.into_request();
514            req.extensions_mut()
515                .insert(
516                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
517                );
518            self.inner.unary(req, path, codec).await
519        }
520        /// GenerateSummaryDescription generates a summary description for an attachment rid
521        pub async fn generate_summary_description(
522            &mut self,
523            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
524        ) -> std::result::Result<
525            tonic::Response<super::GenerateSummaryDescriptionResponse>,
526            tonic::Status,
527        > {
528            self.inner
529                .ready()
530                .await
531                .map_err(|e| {
532                    tonic::Status::unknown(
533                        format!("Service was not ready: {}", e.into()),
534                    )
535                })?;
536            let codec = tonic::codec::ProstCodec::default();
537            let path = http::uri::PathAndQuery::from_static(
538                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
539            );
540            let mut req = request.into_request();
541            req.extensions_mut()
542                .insert(
543                    GrpcMethod::new(
544                        "nominal.ai.v1.KnowledgeBaseService",
545                        "GenerateSummaryDescription",
546                    ),
547                );
548            self.inner.unary(req, path, codec).await
549        }
550    }
551}
552#[derive(Clone, PartialEq, ::prost::Message)]
553pub struct GetSnapshotRidByUserMessageIdRequest {
554    #[prost(string, tag = "1")]
555    pub conversation_rid: ::prost::alloc::string::String,
556    #[prost(string, tag = "2")]
557    pub message_id: ::prost::alloc::string::String,
558}
559/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
560/// This occurs in the instance where a message was sent in a non-workbook context
561#[derive(Clone, PartialEq, ::prost::Message)]
562pub struct GetSnapshotRidByUserMessageIdResponse {
563    #[prost(string, optional, tag = "1")]
564    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
565}
566/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
567#[derive(Clone, Copy, PartialEq, ::prost::Message)]
568pub struct ReadOnlyMode {}
569/// EditMode configures edit mode where all tools are available
570#[derive(Clone, Copy, PartialEq, ::prost::Message)]
571pub struct EditMode {}
572/// ConversationMode specifies the mode of the conversation
573#[derive(Clone, Copy, PartialEq, ::prost::Message)]
574pub struct ConversationMode {
575    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
576    pub mode: ::core::option::Option<conversation_mode::Mode>,
577}
578/// Nested message and enum types in `ConversationMode`.
579pub mod conversation_mode {
580    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
581    pub enum Mode {
582        #[prost(message, tag = "1")]
583        ReadOnly(super::ReadOnlyMode),
584        #[prost(message, tag = "2")]
585        Edit(super::EditMode),
586    }
587}
588/// StreamChatRequest is a request to stream chat messages for AI agent
589#[derive(Clone, PartialEq, ::prost::Message)]
590pub struct StreamChatRequest {
591    /// The conversation ID
592    #[prost(string, tag = "1")]
593    pub conversation_rid: ::prost::alloc::string::String,
594    /// The user message to append to the conversation
595    #[prost(message, optional, tag = "2")]
596    pub message: ::core::option::Option<UserModelMessage>,
597    /// Optional: image files to provide to the agent
598    #[prost(message, repeated, tag = "3")]
599    pub images: ::prost::alloc::vec::Vec<ImagePart>,
600    /// Context-specific fields based on the oneofKind.
601    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
602    pub context: ::core::option::Option<stream_chat_request::Context>,
603}
604/// Nested message and enum types in `StreamChatRequest`.
605pub mod stream_chat_request {
606    /// Context-specific fields based on the oneofKind.
607    #[derive(Clone, PartialEq, ::prost::Oneof)]
608    pub enum Context {
609        #[prost(message, tag = "4")]
610        Workbook(super::WorkbookContext),
611        #[prost(message, tag = "5")]
612        Global(super::GlobalContext),
613    }
614}
615/// WorkbookContext contains workbook-specific context fields
616#[derive(Clone, PartialEq, ::prost::Message)]
617pub struct WorkbookContext {
618    /// RID of the workbook to use for context
619    #[prost(string, tag = "1")]
620    pub workbook_rid: ::prost::alloc::string::String,
621    /// The user's presence in the workbook
622    #[prost(message, optional, tag = "2")]
623    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
624}
625/// DefaultContext (no context)
626#[derive(Clone, Copy, PartialEq, ::prost::Message)]
627pub struct GlobalContext {}
628/// WorkbookUserPresence contains the user's presence in the workbook
629/// which is used to describe what the user is viewing at the time of the message.
630#[derive(Clone, Copy, PartialEq, ::prost::Message)]
631pub struct WorkbookUserPresence {
632    #[prost(int32, tag = "1")]
633    pub tab_index: i32,
634    #[prost(message, optional, tag = "2")]
635    pub range: ::core::option::Option<TimeRange>,
636}
637/// CreateConversation request will create a new conversation thread
638/// if old conversation id is not set, a brand new, clear chat is created
639/// If old conversation id is set without a previous message id, the full conversation thread will be copied
640/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
641/// the above case is useful for branching a conversation into a new thread
642#[derive(Clone, PartialEq, ::prost::Message)]
643pub struct CreateConversationRequest {
644    #[prost(string, tag = "1")]
645    pub title: ::prost::alloc::string::String,
646    #[prost(string, tag = "2")]
647    pub workspace_rid: ::prost::alloc::string::String,
648    #[prost(string, optional, tag = "3")]
649    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
650    #[prost(string, optional, tag = "4")]
651    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
652    #[prost(message, optional, tag = "5")]
653    pub conversation_mode: ::core::option::Option<ConversationMode>,
654}
655/// CreateConversationResponse will return the conversation id for the new conversation
656#[derive(Clone, PartialEq, ::prost::Message)]
657pub struct CreateConversationResponse {
658    #[prost(string, tag = "1")]
659    pub new_conversation_rid: ::prost::alloc::string::String,
660}
661/// Updates the fields if specified (optional means no change for that field)
662#[derive(Clone, PartialEq, ::prost::Message)]
663pub struct UpdateConversationMetadataRequest {
664    #[prost(string, optional, tag = "1")]
665    pub title: ::core::option::Option<::prost::alloc::string::String>,
666    #[prost(string, tag = "2")]
667    pub conversation_rid: ::prost::alloc::string::String,
668    #[prost(message, optional, tag = "3")]
669    pub conversation_mode: ::core::option::Option<ConversationMode>,
670}
671#[derive(Clone, Copy, PartialEq, ::prost::Message)]
672pub struct UpdateConversationMetadataResponse {}
673#[derive(Clone, PartialEq, ::prost::Message)]
674pub struct DeleteConversationRequest {
675    #[prost(string, tag = "1")]
676    pub conversation_rid: ::prost::alloc::string::String,
677}
678#[derive(Clone, Copy, PartialEq, ::prost::Message)]
679pub struct DeleteConversationResponse {}
680/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
681/// by provided rid. To start from a particular message - you can also provide a message id.
682#[derive(Clone, PartialEq, ::prost::Message)]
683pub struct GetConversationRequest {
684    #[prost(string, tag = "1")]
685    pub conversation_rid: ::prost::alloc::string::String,
686    #[prost(string, optional, tag = "2")]
687    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
688    #[prost(int32, optional, tag = "3")]
689    pub max_message_count: ::core::option::Option<i32>,
690}
691/// Model message with id allows you to identify the message ID of a given message
692#[derive(Clone, PartialEq, ::prost::Message)]
693pub struct ModelMessageWithId {
694    #[prost(string, tag = "3")]
695    pub message_id: ::prost::alloc::string::String,
696    /// WB agent user messages can have snapshot rids associated with them
697    #[prost(string, optional, tag = "4")]
698    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
699    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
700    pub content: ::core::option::Option<model_message_with_id::Content>,
701}
702/// Nested message and enum types in `ModelMessageWithId`.
703pub mod model_message_with_id {
704    #[derive(Clone, PartialEq, ::prost::Oneof)]
705    pub enum Content {
706        #[prost(message, tag = "1")]
707        Message(super::ModelMessage),
708        #[prost(message, tag = "2")]
709        ToolAction(super::ToolAction),
710    }
711}
712#[derive(Clone, PartialEq, ::prost::Message)]
713pub struct GetConversationResponse {
714    #[prost(message, repeated, tag = "1")]
715    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
716    #[prost(message, optional, tag = "2")]
717    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
718}
719/// Will generate all conversation threads that this user has in this workspace
720#[derive(Clone, PartialEq, ::prost::Message)]
721pub struct ListConversationsRequest {
722    #[prost(string, tag = "1")]
723    pub workspace_rid: ::prost::alloc::string::String,
724    #[prost(string, optional, tag = "2")]
725    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
726    #[prost(int32, optional, tag = "3")]
727    pub page_size: ::core::option::Option<i32>,
728}
729#[derive(Clone, PartialEq, ::prost::Message)]
730pub struct ConversationMetadata {
731    #[prost(string, tag = "1")]
732    pub conversation_rid: ::prost::alloc::string::String,
733    #[prost(string, tag = "2")]
734    pub title: ::prost::alloc::string::String,
735    #[prost(message, optional, tag = "3")]
736    pub created_at: ::core::option::Option<
737        super::super::super::google::protobuf::Timestamp,
738    >,
739    #[prost(message, optional, tag = "4")]
740    pub last_updated_at: ::core::option::Option<
741        super::super::super::google::protobuf::Timestamp,
742    >,
743    #[prost(message, optional, tag = "5")]
744    pub mode: ::core::option::Option<ConversationMode>,
745}
746/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
747/// to get a full conversation from storage. These are ordered by creation time.
748#[derive(Clone, PartialEq, ::prost::Message)]
749pub struct ListConversationsResponse {
750    #[prost(message, repeated, tag = "1")]
751    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
752    #[prost(string, optional, tag = "2")]
753    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
754}
755#[derive(Clone, Copy, PartialEq, ::prost::Message)]
756pub struct TimeRange {
757    #[prost(message, optional, tag = "1")]
758    pub range_start: ::core::option::Option<Timestamp>,
759    #[prost(message, optional, tag = "2")]
760    pub range_end: ::core::option::Option<Timestamp>,
761}
762#[derive(Clone, Copy, PartialEq, ::prost::Message)]
763pub struct Timestamp {
764    #[prost(int32, tag = "1")]
765    pub seconds: i32,
766    #[prost(int32, tag = "2")]
767    pub nanoseconds: i32,
768}
769/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
770/// Each message type has its own structure and content.
771#[derive(Clone, PartialEq, ::prost::Message)]
772pub struct ModelMessage {
773    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
774    pub kind: ::core::option::Option<model_message::Kind>,
775}
776/// Nested message and enum types in `ModelMessage`.
777pub mod model_message {
778    #[derive(Clone, PartialEq, ::prost::Oneof)]
779    pub enum Kind {
780        #[prost(message, tag = "1")]
781        User(super::UserModelMessage),
782        #[prost(message, tag = "2")]
783        Assistant(super::AssistantModelMessage),
784    }
785}
786/// A user message containing text
787#[derive(Clone, PartialEq, ::prost::Message)]
788pub struct UserModelMessage {
789    #[prost(message, repeated, tag = "1")]
790    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
791}
792/// An assistant message containing text
793#[derive(Clone, PartialEq, ::prost::Message)]
794pub struct AssistantModelMessage {
795    #[prost(message, repeated, tag = "1")]
796    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
797}
798#[derive(Clone, PartialEq, ::prost::Message)]
799pub struct UserContentPart {
800    #[prost(oneof = "user_content_part::Part", tags = "1")]
801    pub part: ::core::option::Option<user_content_part::Part>,
802}
803/// Nested message and enum types in `UserContentPart`.
804pub mod user_content_part {
805    #[derive(Clone, PartialEq, ::prost::Oneof)]
806    pub enum Part {
807        #[prost(message, tag = "1")]
808        Text(super::TextPart),
809    }
810}
811/// Content part for assistant messages: can be text, reasoning, or mutation.
812#[derive(Clone, PartialEq, ::prost::Message)]
813pub struct AssistantContentPart {
814    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
815    pub part: ::core::option::Option<assistant_content_part::Part>,
816}
817/// Nested message and enum types in `AssistantContentPart`.
818pub mod assistant_content_part {
819    #[derive(Clone, PartialEq, ::prost::Oneof)]
820    pub enum Part {
821        #[prost(message, tag = "1")]
822        Text(super::TextPart),
823        #[prost(message, tag = "2")]
824        Reasoning(super::ReasoningPart),
825    }
826}
827/// Text part for user or assistant messages.
828#[derive(Clone, PartialEq, ::prost::Message)]
829pub struct TextPart {
830    #[prost(string, tag = "1")]
831    pub text: ::prost::alloc::string::String,
832}
833/// User-supplied image part.
834#[derive(Clone, PartialEq, ::prost::Message)]
835pub struct ImagePart {
836    /// The base64-encoded image data
837    #[prost(bytes = "vec", tag = "1")]
838    pub data: ::prost::alloc::vec::Vec<u8>,
839    /// The media type of the image (e.g. "image/png", "image/jpeg")
840    #[prost(string, optional, tag = "2")]
841    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
842    /// Optional: the filename of the image
843    #[prost(string, optional, tag = "3")]
844    pub filename: ::core::option::Option<::prost::alloc::string::String>,
845}
846/// Reasoning part for assistant messages.
847#[derive(Clone, PartialEq, ::prost::Message)]
848pub struct ReasoningPart {
849    #[prost(string, tag = "1")]
850    pub reasoning: ::prost::alloc::string::String,
851}
852/// StreamChatResponse is a discriminated union response to a StreamChatRequest
853#[derive(Clone, PartialEq, ::prost::Message)]
854pub struct StreamChatResponse {
855    #[prost(
856        oneof = "stream_chat_response::Response",
857        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10"
858    )]
859    pub response: ::core::option::Option<stream_chat_response::Response>,
860}
861/// Nested message and enum types in `StreamChatResponse`.
862pub mod stream_chat_response {
863    #[derive(Clone, PartialEq, ::prost::Oneof)]
864    pub enum Response {
865        #[prost(message, tag = "1")]
866        Finish(super::Finish),
867        #[prost(message, tag = "2")]
868        Error(super::Error),
869        #[prost(message, tag = "3")]
870        TextStart(super::TextStart),
871        #[prost(message, tag = "4")]
872        TextDelta(super::TextDelta),
873        #[prost(message, tag = "5")]
874        TextEnd(super::TextEnd),
875        #[prost(message, tag = "6")]
876        ReasoningStart(super::ReasoningStart),
877        #[prost(message, tag = "7")]
878        ReasoningDelta(super::ReasoningDelta),
879        #[prost(message, tag = "8")]
880        ReasoningEnd(super::ReasoningEnd),
881        #[prost(message, tag = "10")]
882        ToolAction(super::ToolAction),
883    }
884}
885/// Indicates the end of a chat session
886#[derive(Clone, PartialEq, ::prost::Message)]
887pub struct Finish {
888    /// The message ids in order of all generated messages for this agent run
889    /// These ids can be used to branch a message from that specific message
890    #[prost(string, repeated, tag = "1")]
891    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
892    /// In the case that this is the first agent run in a conversation thread, we also
893    /// return the new conversation title generated
894    #[prost(string, optional, tag = "2")]
895    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
896}
897/// An error that occurred during the chat session
898#[derive(Clone, PartialEq, ::prost::Message)]
899pub struct Error {
900    #[prost(string, tag = "1")]
901    pub message: ::prost::alloc::string::String,
902}
903/// Indicates the start of a text message from the agent
904#[derive(Clone, PartialEq, ::prost::Message)]
905pub struct TextStart {
906    /// uniquely identifies the text message (e.g. uuid) so that the client can
907    /// merge parallel message streams (if it happens).
908    #[prost(string, tag = "1")]
909    pub id: ::prost::alloc::string::String,
910}
911/// A delta (continuation) of a text message from the agent
912#[derive(Clone, PartialEq, ::prost::Message)]
913pub struct TextDelta {
914    #[prost(string, tag = "1")]
915    pub id: ::prost::alloc::string::String,
916    /// The next chunk of text
917    #[prost(string, tag = "2")]
918    pub delta: ::prost::alloc::string::String,
919}
920/// Indicates the end of a text message from the agent
921#[derive(Clone, PartialEq, ::prost::Message)]
922pub struct TextEnd {
923    #[prost(string, tag = "1")]
924    pub id: ::prost::alloc::string::String,
925}
926/// Indicates the start of a reasoning message from the agent
927#[derive(Clone, PartialEq, ::prost::Message)]
928pub struct ReasoningStart {
929    #[prost(string, tag = "1")]
930    pub id: ::prost::alloc::string::String,
931}
932/// A delta (continuation) of a reasoning message from the agent
933#[derive(Clone, PartialEq, ::prost::Message)]
934pub struct ReasoningDelta {
935    #[prost(string, tag = "1")]
936    pub id: ::prost::alloc::string::String,
937    /// The next chunk of reasoning
938    #[prost(string, tag = "2")]
939    pub delta: ::prost::alloc::string::String,
940}
941/// Indicates the end of a reasoning message from the agent
942#[derive(Clone, PartialEq, ::prost::Message)]
943pub struct ReasoningEnd {
944    #[prost(string, tag = "1")]
945    pub id: ::prost::alloc::string::String,
946}
947/// this is a concise description of a tool call that the agent is making internally
948/// without revealing too much detail about the tool call, it informs the user what the agent is doing
949/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
950/// "Search channels for My Datasource"
951#[derive(Clone, PartialEq, ::prost::Message)]
952pub struct ToolAction {
953    #[prost(string, tag = "1")]
954    pub id: ::prost::alloc::string::String,
955    /// "Thought", "Read", "Find", "Look-up", etc.
956    #[prost(string, tag = "2")]
957    pub tool_action_verb: ::prost::alloc::string::String,
958    /// "workbook", "channel", "variable", "panel", etc.
959    #[prost(string, optional, tag = "3")]
960    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
961}
962/// Generated client implementations.
963pub mod ai_agent_service_client {
964    #![allow(
965        unused_variables,
966        dead_code,
967        missing_docs,
968        clippy::wildcard_imports,
969        clippy::let_unit_value,
970    )]
971    use tonic::codegen::*;
972    use tonic::codegen::http::Uri;
973    /// AIAgentService provides AI-powered assistance for general operations
974    #[derive(Debug, Clone)]
975    pub struct AiAgentServiceClient<T> {
976        inner: tonic::client::Grpc<T>,
977    }
978    impl AiAgentServiceClient<tonic::transport::Channel> {
979        /// Attempt to create a new client by connecting to a given endpoint.
980        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
981        where
982            D: TryInto<tonic::transport::Endpoint>,
983            D::Error: Into<StdError>,
984        {
985            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
986            Ok(Self::new(conn))
987        }
988    }
989    impl<T> AiAgentServiceClient<T>
990    where
991        T: tonic::client::GrpcService<tonic::body::Body>,
992        T::Error: Into<StdError>,
993        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
994        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
995    {
996        pub fn new(inner: T) -> Self {
997            let inner = tonic::client::Grpc::new(inner);
998            Self { inner }
999        }
1000        pub fn with_origin(inner: T, origin: Uri) -> Self {
1001            let inner = tonic::client::Grpc::with_origin(inner, origin);
1002            Self { inner }
1003        }
1004        pub fn with_interceptor<F>(
1005            inner: T,
1006            interceptor: F,
1007        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1008        where
1009            F: tonic::service::Interceptor,
1010            T::ResponseBody: Default,
1011            T: tonic::codegen::Service<
1012                http::Request<tonic::body::Body>,
1013                Response = http::Response<
1014                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1015                >,
1016            >,
1017            <T as tonic::codegen::Service<
1018                http::Request<tonic::body::Body>,
1019            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1020        {
1021            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1022        }
1023        /// Compress requests with the given encoding.
1024        ///
1025        /// This requires the server to support it otherwise it might respond with an
1026        /// error.
1027        #[must_use]
1028        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1029            self.inner = self.inner.send_compressed(encoding);
1030            self
1031        }
1032        /// Enable decompressing responses.
1033        #[must_use]
1034        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1035            self.inner = self.inner.accept_compressed(encoding);
1036            self
1037        }
1038        /// Limits the maximum size of a decoded message.
1039        ///
1040        /// Default: `4MB`
1041        #[must_use]
1042        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1043            self.inner = self.inner.max_decoding_message_size(limit);
1044            self
1045        }
1046        /// Limits the maximum size of an encoded message.
1047        ///
1048        /// Default: `usize::MAX`
1049        #[must_use]
1050        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1051            self.inner = self.inner.max_encoding_message_size(limit);
1052            self
1053        }
1054        /// StreamChat handles bidirectional streaming chat for AI agent
1055        pub async fn stream_chat(
1056            &mut self,
1057            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1058        ) -> std::result::Result<
1059            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1060            tonic::Status,
1061        > {
1062            self.inner
1063                .ready()
1064                .await
1065                .map_err(|e| {
1066                    tonic::Status::unknown(
1067                        format!("Service was not ready: {}", e.into()),
1068                    )
1069                })?;
1070            let codec = tonic::codec::ProstCodec::default();
1071            let path = http::uri::PathAndQuery::from_static(
1072                "/nominal.ai.v1.AIAgentService/StreamChat",
1073            );
1074            let mut req = request.into_request();
1075            req.extensions_mut()
1076                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1077            self.inner.server_streaming(req, path, codec).await
1078        }
1079        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
1080        pub async fn get_conversation(
1081            &mut self,
1082            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1083        ) -> std::result::Result<
1084            tonic::Response<super::GetConversationResponse>,
1085            tonic::Status,
1086        > {
1087            self.inner
1088                .ready()
1089                .await
1090                .map_err(|e| {
1091                    tonic::Status::unknown(
1092                        format!("Service was not ready: {}", e.into()),
1093                    )
1094                })?;
1095            let codec = tonic::codec::ProstCodec::default();
1096            let path = http::uri::PathAndQuery::from_static(
1097                "/nominal.ai.v1.AIAgentService/GetConversation",
1098            );
1099            let mut req = request.into_request();
1100            req.extensions_mut()
1101                .insert(
1102                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1103                );
1104            self.inner.unary(req, path, codec).await
1105        }
1106        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1107        pub async fn list_conversations(
1108            &mut self,
1109            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1110        ) -> std::result::Result<
1111            tonic::Response<super::ListConversationsResponse>,
1112            tonic::Status,
1113        > {
1114            self.inner
1115                .ready()
1116                .await
1117                .map_err(|e| {
1118                    tonic::Status::unknown(
1119                        format!("Service was not ready: {}", e.into()),
1120                    )
1121                })?;
1122            let codec = tonic::codec::ProstCodec::default();
1123            let path = http::uri::PathAndQuery::from_static(
1124                "/nominal.ai.v1.AIAgentService/ListConversations",
1125            );
1126            let mut req = request.into_request();
1127            req.extensions_mut()
1128                .insert(
1129                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1130                );
1131            self.inner.unary(req, path, codec).await
1132        }
1133        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1134        pub async fn create_conversation(
1135            &mut self,
1136            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1137        ) -> std::result::Result<
1138            tonic::Response<super::CreateConversationResponse>,
1139            tonic::Status,
1140        > {
1141            self.inner
1142                .ready()
1143                .await
1144                .map_err(|e| {
1145                    tonic::Status::unknown(
1146                        format!("Service was not ready: {}", e.into()),
1147                    )
1148                })?;
1149            let codec = tonic::codec::ProstCodec::default();
1150            let path = http::uri::PathAndQuery::from_static(
1151                "/nominal.ai.v1.AIAgentService/CreateConversation",
1152            );
1153            let mut req = request.into_request();
1154            req.extensions_mut()
1155                .insert(
1156                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1157                );
1158            self.inner.unary(req, path, codec).await
1159        }
1160        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1161        pub async fn update_conversation_metadata(
1162            &mut self,
1163            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1164        ) -> std::result::Result<
1165            tonic::Response<super::UpdateConversationMetadataResponse>,
1166            tonic::Status,
1167        > {
1168            self.inner
1169                .ready()
1170                .await
1171                .map_err(|e| {
1172                    tonic::Status::unknown(
1173                        format!("Service was not ready: {}", e.into()),
1174                    )
1175                })?;
1176            let codec = tonic::codec::ProstCodec::default();
1177            let path = http::uri::PathAndQuery::from_static(
1178                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1179            );
1180            let mut req = request.into_request();
1181            req.extensions_mut()
1182                .insert(
1183                    GrpcMethod::new(
1184                        "nominal.ai.v1.AIAgentService",
1185                        "UpdateConversationMetadata",
1186                    ),
1187                );
1188            self.inner.unary(req, path, codec).await
1189        }
1190        /// DeleteConversation handles deleting a specific conversation by conversation rid
1191        pub async fn delete_conversation(
1192            &mut self,
1193            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1194        ) -> std::result::Result<
1195            tonic::Response<super::DeleteConversationResponse>,
1196            tonic::Status,
1197        > {
1198            self.inner
1199                .ready()
1200                .await
1201                .map_err(|e| {
1202                    tonic::Status::unknown(
1203                        format!("Service was not ready: {}", e.into()),
1204                    )
1205                })?;
1206            let codec = tonic::codec::ProstCodec::default();
1207            let path = http::uri::PathAndQuery::from_static(
1208                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1209            );
1210            let mut req = request.into_request();
1211            req.extensions_mut()
1212                .insert(
1213                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1214                );
1215            self.inner.unary(req, path, codec).await
1216        }
1217        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1218        pub async fn get_snapshot_rid_by_user_message_id(
1219            &mut self,
1220            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1221        ) -> std::result::Result<
1222            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1223            tonic::Status,
1224        > {
1225            self.inner
1226                .ready()
1227                .await
1228                .map_err(|e| {
1229                    tonic::Status::unknown(
1230                        format!("Service was not ready: {}", e.into()),
1231                    )
1232                })?;
1233            let codec = tonic::codec::ProstCodec::default();
1234            let path = http::uri::PathAndQuery::from_static(
1235                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1236            );
1237            let mut req = request.into_request();
1238            req.extensions_mut()
1239                .insert(
1240                    GrpcMethod::new(
1241                        "nominal.ai.v1.AIAgentService",
1242                        "GetSnapshotRidByUserMessageId",
1243                    ),
1244                );
1245            self.inner.unary(req, path, codec).await
1246        }
1247    }
1248}