Skip to main content

nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, Copy, PartialEq, ::prost::Message)]
3pub struct GetProviderStatusRequest {}
4#[derive(Clone, Copy, PartialEq, ::prost::Message)]
5pub struct GetProviderStatusResponse {
6    /// Timestamp when the last status was determined
7    #[prost(message, optional, tag = "1")]
8    pub timestamp: ::core::option::Option<
9        super::super::super::google::protobuf::Timestamp,
10    >,
11    /// Status of the most recent health check probe
12    #[prost(message, optional, tag = "2")]
13    pub last_status: ::core::option::Option<ProviderStatus>,
14    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
15    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
16    #[deprecated]
17    #[prost(message, optional, tag = "3")]
18    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
19    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
20    #[prost(message, optional, tag = "4")]
21    pub aggregated_status: ::core::option::Option<ProviderStatus>,
22}
23#[derive(Clone, Copy, PartialEq, ::prost::Message)]
24pub struct ProviderStatus {
25    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
26    pub status: ::core::option::Option<provider_status::Status>,
27}
28/// Nested message and enum types in `ProviderStatus`.
29pub mod provider_status {
30    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
31    pub enum Status {
32        #[prost(message, tag = "1")]
33        Healthy(super::Healthy),
34        #[prost(message, tag = "2")]
35        Degraded(super::Degraded),
36    }
37}
38#[derive(Clone, Copy, PartialEq, ::prost::Message)]
39pub struct Healthy {}
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct Degraded {
42    #[prost(enumeration = "DegradationReason", tag = "1")]
43    pub reason: i32,
44}
45#[derive(Clone, Copy, PartialEq, ::prost::Message)]
46pub struct ProviderMetrics {
47    #[prost(int32, tag = "1")]
48    pub time_to_first_token_ms: i32,
49    #[prost(int32, tag = "2")]
50    pub total_time_ms: i32,
51}
52#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
53#[repr(i32)]
54pub enum DegradationReason {
55    Unspecified = 0,
56    HighLatency = 1,
57    Failures = 2,
58    HighLatencyAndFailures = 3,
59}
60impl DegradationReason {
61    /// String value of the enum field names used in the ProtoBuf definition.
62    ///
63    /// The values are not transformed in any way and thus are considered stable
64    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
65    pub fn as_str_name(&self) -> &'static str {
66        match self {
67            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
68            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
69            Self::Failures => "DEGRADATION_REASON_FAILURES",
70            Self::HighLatencyAndFailures => {
71                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
72            }
73        }
74    }
75    /// Creates an enum from field names used in the ProtoBuf definition.
76    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
77        match value {
78            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
79            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
80            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
81            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
82                Some(Self::HighLatencyAndFailures)
83            }
84            _ => None,
85        }
86    }
87}
88/// Generated client implementations.
89pub mod model_provider_health_service_client {
90    #![allow(
91        unused_variables,
92        dead_code,
93        missing_docs,
94        clippy::wildcard_imports,
95        clippy::let_unit_value,
96    )]
97    use tonic::codegen::*;
98    use tonic::codegen::http::Uri;
99    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
100    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
101    /// independent of the complexity of user prompts.
102    #[derive(Debug, Clone)]
103    pub struct ModelProviderHealthServiceClient<T> {
104        inner: tonic::client::Grpc<T>,
105    }
106    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
107        /// Attempt to create a new client by connecting to a given endpoint.
108        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
109        where
110            D: TryInto<tonic::transport::Endpoint>,
111            D::Error: Into<StdError>,
112        {
113            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
114            Ok(Self::new(conn))
115        }
116    }
117    impl<T> ModelProviderHealthServiceClient<T>
118    where
119        T: tonic::client::GrpcService<tonic::body::Body>,
120        T::Error: Into<StdError>,
121        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
122        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
123    {
124        pub fn new(inner: T) -> Self {
125            let inner = tonic::client::Grpc::new(inner);
126            Self { inner }
127        }
128        pub fn with_origin(inner: T, origin: Uri) -> Self {
129            let inner = tonic::client::Grpc::with_origin(inner, origin);
130            Self { inner }
131        }
132        pub fn with_interceptor<F>(
133            inner: T,
134            interceptor: F,
135        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
136        where
137            F: tonic::service::Interceptor,
138            T::ResponseBody: Default,
139            T: tonic::codegen::Service<
140                http::Request<tonic::body::Body>,
141                Response = http::Response<
142                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
143                >,
144            >,
145            <T as tonic::codegen::Service<
146                http::Request<tonic::body::Body>,
147            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
148        {
149            ModelProviderHealthServiceClient::new(
150                InterceptedService::new(inner, interceptor),
151            )
152        }
153        /// Compress requests with the given encoding.
154        ///
155        /// This requires the server to support it otherwise it might respond with an
156        /// error.
157        #[must_use]
158        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
159            self.inner = self.inner.send_compressed(encoding);
160            self
161        }
162        /// Enable decompressing responses.
163        #[must_use]
164        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
165            self.inner = self.inner.accept_compressed(encoding);
166            self
167        }
168        /// Limits the maximum size of a decoded message.
169        ///
170        /// Default: `4MB`
171        #[must_use]
172        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
173            self.inner = self.inner.max_decoding_message_size(limit);
174            self
175        }
176        /// Limits the maximum size of an encoded message.
177        ///
178        /// Default: `usize::MAX`
179        #[must_use]
180        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
181            self.inner = self.inner.max_encoding_message_size(limit);
182            self
183        }
184        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
185        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
186        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
187        pub async fn get_provider_status(
188            &mut self,
189            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
190        ) -> std::result::Result<
191            tonic::Response<super::GetProviderStatusResponse>,
192            tonic::Status,
193        > {
194            self.inner
195                .ready()
196                .await
197                .map_err(|e| {
198                    tonic::Status::unknown(
199                        format!("Service was not ready: {}", e.into()),
200                    )
201                })?;
202            let codec = tonic::codec::ProstCodec::default();
203            let path = http::uri::PathAndQuery::from_static(
204                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
205            );
206            let mut req = request.into_request();
207            req.extensions_mut()
208                .insert(
209                    GrpcMethod::new(
210                        "nominal.ai.v1.ModelProviderHealthService",
211                        "GetProviderStatus",
212                    ),
213                );
214            self.inner.unary(req, path, codec).await
215        }
216    }
217}
218/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
219/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
220#[derive(Clone, PartialEq, ::prost::Message)]
221pub struct CreateOrUpdateKnowledgeBaseRequest {
222    #[prost(string, tag = "1")]
223    pub attachment_rid: ::prost::alloc::string::String,
224    /// summary of the knowledge base, will be used by the LLM to decide when to use it
225    #[prost(string, tag = "2")]
226    pub summary_description: ::prost::alloc::string::String,
227    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
228    pub r#type: ::core::option::Option<i32>,
229}
230/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
231#[derive(Clone, PartialEq, ::prost::Message)]
232pub struct CreateOrUpdateKnowledgeBaseResponse {
233    #[prost(string, tag = "1")]
234    pub knowledge_base_rid: ::prost::alloc::string::String,
235}
236/// KnowledgeBase represents a knowledge base entry
237#[derive(Clone, PartialEq, ::prost::Message)]
238pub struct KnowledgeBase {
239    #[prost(string, tag = "1")]
240    pub knowledge_base_rid: ::prost::alloc::string::String,
241    #[prost(string, tag = "2")]
242    pub attachment_rid: ::prost::alloc::string::String,
243    #[prost(string, tag = "3")]
244    pub workspace_rid: ::prost::alloc::string::String,
245    #[prost(string, tag = "4")]
246    pub summary_description: ::prost::alloc::string::String,
247    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
248    pub r#type: i32,
249    #[prost(int32, tag = "6")]
250    pub version: i32,
251}
252#[derive(Clone, PartialEq, ::prost::Message)]
253pub struct ListRequest {
254    #[prost(string, tag = "1")]
255    pub workspace_rid: ::prost::alloc::string::String,
256}
257#[derive(Clone, PartialEq, ::prost::Message)]
258pub struct ListResponse {
259    #[prost(message, repeated, tag = "1")]
260    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
261}
262#[derive(Clone, PartialEq, ::prost::Message)]
263pub struct DeleteRequest {
264    #[prost(string, tag = "1")]
265    pub knowledge_base_rid: ::prost::alloc::string::String,
266}
267#[derive(Clone, Copy, PartialEq, ::prost::Message)]
268pub struct DeleteResponse {
269    #[prost(bool, tag = "1")]
270    pub success: bool,
271}
272#[derive(Clone, PartialEq, ::prost::Message)]
273pub struct GetBatchRequest {
274    #[prost(string, repeated, tag = "1")]
275    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
276}
277#[derive(Clone, PartialEq, ::prost::Message)]
278pub struct GetBatchResponse {
279    #[prost(message, repeated, tag = "1")]
280    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
281}
282/// generate summary description is intentionally going to return the generated description to the frontend
283/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
284#[derive(Clone, PartialEq, ::prost::Message)]
285pub struct GenerateSummaryDescriptionRequest {
286    #[prost(string, tag = "1")]
287    pub attachment_rid: ::prost::alloc::string::String,
288}
289#[derive(Clone, PartialEq, ::prost::Message)]
290pub struct GenerateSummaryDescriptionResponse {
291    #[prost(string, tag = "1")]
292    pub summary_description: ::prost::alloc::string::String,
293}
294/// KnowledgeBaseType defines the types of knowledge base
295#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
296#[repr(i32)]
297pub enum KnowledgeBaseType {
298    /// defaults to PROMPT
299    Unspecified = 0,
300    /// knowledge base gets added directly to prompt (needs to be small enough!)
301    Prompt = 1,
302    /// knowledge base gets used via vector search on embeddings
303    Embedding = 2,
304}
305impl KnowledgeBaseType {
306    /// String value of the enum field names used in the ProtoBuf definition.
307    ///
308    /// The values are not transformed in any way and thus are considered stable
309    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
310    pub fn as_str_name(&self) -> &'static str {
311        match self {
312            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
313            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
314            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
315        }
316    }
317    /// Creates an enum from field names used in the ProtoBuf definition.
318    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
319        match value {
320            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
321            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
322            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
323            _ => None,
324        }
325    }
326}
327/// Generated client implementations.
328pub mod knowledge_base_service_client {
329    #![allow(
330        unused_variables,
331        dead_code,
332        missing_docs,
333        clippy::wildcard_imports,
334        clippy::let_unit_value,
335    )]
336    use tonic::codegen::*;
337    use tonic::codegen::http::Uri;
338    /// KnowledgeBaseService provides AI-powered knowledge base management
339    #[derive(Debug, Clone)]
340    pub struct KnowledgeBaseServiceClient<T> {
341        inner: tonic::client::Grpc<T>,
342    }
343    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
344        /// Attempt to create a new client by connecting to a given endpoint.
345        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
346        where
347            D: TryInto<tonic::transport::Endpoint>,
348            D::Error: Into<StdError>,
349        {
350            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
351            Ok(Self::new(conn))
352        }
353    }
354    impl<T> KnowledgeBaseServiceClient<T>
355    where
356        T: tonic::client::GrpcService<tonic::body::Body>,
357        T::Error: Into<StdError>,
358        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
359        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
360    {
361        pub fn new(inner: T) -> Self {
362            let inner = tonic::client::Grpc::new(inner);
363            Self { inner }
364        }
365        pub fn with_origin(inner: T, origin: Uri) -> Self {
366            let inner = tonic::client::Grpc::with_origin(inner, origin);
367            Self { inner }
368        }
369        pub fn with_interceptor<F>(
370            inner: T,
371            interceptor: F,
372        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
373        where
374            F: tonic::service::Interceptor,
375            T::ResponseBody: Default,
376            T: tonic::codegen::Service<
377                http::Request<tonic::body::Body>,
378                Response = http::Response<
379                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
380                >,
381            >,
382            <T as tonic::codegen::Service<
383                http::Request<tonic::body::Body>,
384            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
385        {
386            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
387        }
388        /// Compress requests with the given encoding.
389        ///
390        /// This requires the server to support it otherwise it might respond with an
391        /// error.
392        #[must_use]
393        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
394            self.inner = self.inner.send_compressed(encoding);
395            self
396        }
397        /// Enable decompressing responses.
398        #[must_use]
399        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
400            self.inner = self.inner.accept_compressed(encoding);
401            self
402        }
403        /// Limits the maximum size of a decoded message.
404        ///
405        /// Default: `4MB`
406        #[must_use]
407        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
408            self.inner = self.inner.max_decoding_message_size(limit);
409            self
410        }
411        /// Limits the maximum size of an encoded message.
412        ///
413        /// Default: `usize::MAX`
414        #[must_use]
415        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
416            self.inner = self.inner.max_encoding_message_size(limit);
417            self
418        }
419        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
420        pub async fn create_or_update_knowledge_base(
421            &mut self,
422            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
423        ) -> std::result::Result<
424            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
425            tonic::Status,
426        > {
427            self.inner
428                .ready()
429                .await
430                .map_err(|e| {
431                    tonic::Status::unknown(
432                        format!("Service was not ready: {}", e.into()),
433                    )
434                })?;
435            let codec = tonic::codec::ProstCodec::default();
436            let path = http::uri::PathAndQuery::from_static(
437                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
438            );
439            let mut req = request.into_request();
440            req.extensions_mut()
441                .insert(
442                    GrpcMethod::new(
443                        "nominal.ai.v1.KnowledgeBaseService",
444                        "CreateOrUpdateKnowledgeBase",
445                    ),
446                );
447            self.inner.unary(req, path, codec).await
448        }
449        /// List returns all knowledge bases in the specified workspace
450        pub async fn list(
451            &mut self,
452            request: impl tonic::IntoRequest<super::ListRequest>,
453        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
454            self.inner
455                .ready()
456                .await
457                .map_err(|e| {
458                    tonic::Status::unknown(
459                        format!("Service was not ready: {}", e.into()),
460                    )
461                })?;
462            let codec = tonic::codec::ProstCodec::default();
463            let path = http::uri::PathAndQuery::from_static(
464                "/nominal.ai.v1.KnowledgeBaseService/List",
465            );
466            let mut req = request.into_request();
467            req.extensions_mut()
468                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
469            self.inner.unary(req, path, codec).await
470        }
471        /// Delete removes a knowledge base by its RID
472        pub async fn delete(
473            &mut self,
474            request: impl tonic::IntoRequest<super::DeleteRequest>,
475        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
476            self.inner
477                .ready()
478                .await
479                .map_err(|e| {
480                    tonic::Status::unknown(
481                        format!("Service was not ready: {}", e.into()),
482                    )
483                })?;
484            let codec = tonic::codec::ProstCodec::default();
485            let path = http::uri::PathAndQuery::from_static(
486                "/nominal.ai.v1.KnowledgeBaseService/Delete",
487            );
488            let mut req = request.into_request();
489            req.extensions_mut()
490                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
491            self.inner.unary(req, path, codec).await
492        }
493        /// GetBatch retrieves multiple knowledge bases by their RIDs
494        pub async fn get_batch(
495            &mut self,
496            request: impl tonic::IntoRequest<super::GetBatchRequest>,
497        ) -> std::result::Result<
498            tonic::Response<super::GetBatchResponse>,
499            tonic::Status,
500        > {
501            self.inner
502                .ready()
503                .await
504                .map_err(|e| {
505                    tonic::Status::unknown(
506                        format!("Service was not ready: {}", e.into()),
507                    )
508                })?;
509            let codec = tonic::codec::ProstCodec::default();
510            let path = http::uri::PathAndQuery::from_static(
511                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
512            );
513            let mut req = request.into_request();
514            req.extensions_mut()
515                .insert(
516                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
517                );
518            self.inner.unary(req, path, codec).await
519        }
520        /// GenerateSummaryDescription generates a summary description for an attachment rid
521        pub async fn generate_summary_description(
522            &mut self,
523            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
524        ) -> std::result::Result<
525            tonic::Response<super::GenerateSummaryDescriptionResponse>,
526            tonic::Status,
527        > {
528            self.inner
529                .ready()
530                .await
531                .map_err(|e| {
532                    tonic::Status::unknown(
533                        format!("Service was not ready: {}", e.into()),
534                    )
535                })?;
536            let codec = tonic::codec::ProstCodec::default();
537            let path = http::uri::PathAndQuery::from_static(
538                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
539            );
540            let mut req = request.into_request();
541            req.extensions_mut()
542                .insert(
543                    GrpcMethod::new(
544                        "nominal.ai.v1.KnowledgeBaseService",
545                        "GenerateSummaryDescription",
546                    ),
547                );
548            self.inner.unary(req, path, codec).await
549        }
550    }
551}
552#[derive(Clone, PartialEq, ::prost::Message)]
553pub struct GetSnapshotRidByUserMessageIdRequest {
554    #[prost(string, tag = "1")]
555    pub conversation_rid: ::prost::alloc::string::String,
556    #[prost(string, tag = "2")]
557    pub message_id: ::prost::alloc::string::String,
558}
559/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
560/// This occurs in the instance where a message was sent in a non-workbook context
561#[derive(Clone, PartialEq, ::prost::Message)]
562pub struct GetSnapshotRidByUserMessageIdResponse {
563    #[prost(string, optional, tag = "1")]
564    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
565}
566/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
567#[derive(Clone, Copy, PartialEq, ::prost::Message)]
568pub struct ReadOnlyMode {}
569/// EditMode configures edit mode where all tools are available
570#[derive(Clone, Copy, PartialEq, ::prost::Message)]
571pub struct EditMode {
572    /// when set to true, we auto accept edits for any tools typically requiring approval
573    #[prost(bool, optional, tag = "1")]
574    pub auto_accept: ::core::option::Option<bool>,
575}
576/// ConversationMode specifies the mode of the conversation
577#[derive(Clone, Copy, PartialEq, ::prost::Message)]
578pub struct ConversationMode {
579    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
580    pub mode: ::core::option::Option<conversation_mode::Mode>,
581}
582/// Nested message and enum types in `ConversationMode`.
583pub mod conversation_mode {
584    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
585    pub enum Mode {
586        #[prost(message, tag = "1")]
587        ReadOnly(super::ReadOnlyMode),
588        #[prost(message, tag = "2")]
589        Edit(super::EditMode),
590    }
591}
592/// When the agent makes a request to use a tool, the user responds
593/// with one of these for every request - mapping a tool id to its approval/denial
594#[derive(Clone, PartialEq, ::prost::Message)]
595pub struct ToolApprovalResult {
596    /// identifies the tool call
597    #[prost(string, tag = "1")]
598    pub tool_call_id: ::prost::alloc::string::String,
599    #[prost(oneof = "tool_approval_result::Response", tags = "2, 3")]
600    pub response: ::core::option::Option<tool_approval_result::Response>,
601}
602/// Nested message and enum types in `ToolApprovalResult`.
603pub mod tool_approval_result {
604    #[derive(Clone, PartialEq, ::prost::Oneof)]
605    pub enum Response {
606        #[prost(message, tag = "2")]
607        Approved(super::ToolApprovedResponse),
608        #[prost(message, tag = "3")]
609        Denied(super::ToolDeniedResponse),
610    }
611}
612#[derive(Clone, PartialEq, ::prost::Message)]
613pub struct ToolApprovedResponse {
614    /// json string representation of the override argument if the user
615    /// needs it to be changed in some way
616    #[prost(string, optional, tag = "1")]
617    pub override_args: ::core::option::Option<::prost::alloc::string::String>,
618}
619#[derive(Clone, PartialEq, ::prost::Message)]
620pub struct ToolDeniedResponse {
621    #[prost(string, tag = "2")]
622    pub denial_reason: ::prost::alloc::string::String,
623}
624/// RetryRequest retries the last request (e.g., if it was interrupted/failed part-way through)
625#[derive(Clone, Copy, PartialEq, ::prost::Message)]
626pub struct RetryRequest {}
627/// UserPromptRequest contains a new user message
628#[derive(Clone, PartialEq, ::prost::Message)]
629pub struct UserPromptRequest {
630    #[prost(message, optional, tag = "1")]
631    pub message: ::core::option::Option<UserModelMessage>,
632    /// Optional: image files to provide to the agent
633    #[prost(message, repeated, tag = "2")]
634    pub images: ::prost::alloc::vec::Vec<ImagePart>,
635}
636/// ToolApprovalRequest contains tool approval results
637#[derive(Clone, PartialEq, ::prost::Message)]
638pub struct ToolApprovalRequest {
639    #[prost(message, repeated, tag = "1")]
640    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
641}
642/// StreamChatRequest is a request to stream chat messages for AI agent.
643#[derive(Clone, PartialEq, ::prost::Message)]
644pub struct StreamChatRequest {
645    /// The conversation ID
646    #[prost(string, tag = "1")]
647    pub conversation_rid: ::prost::alloc::string::String,
648    /// DEPRECATED: use request_type.user_prompt.message instead
649    #[deprecated]
650    #[prost(message, optional, tag = "2")]
651    pub message: ::core::option::Option<UserModelMessage>,
652    /// DEPRECATED: use request_type.user_prompt.images instead
653    #[deprecated]
654    #[prost(message, repeated, tag = "3")]
655    pub images: ::prost::alloc::vec::Vec<ImagePart>,
656    /// DEPRECATED: use request_type.tool_approval instead
657    #[deprecated]
658    #[prost(message, repeated, tag = "6")]
659    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
660    /// The type of request - exactly one should be set
661    #[prost(oneof = "stream_chat_request::RequestType", tags = "7, 8, 9")]
662    pub request_type: ::core::option::Option<stream_chat_request::RequestType>,
663    /// Context-specific fields based on the oneofKind.
664    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
665    pub context: ::core::option::Option<stream_chat_request::Context>,
666}
667/// Nested message and enum types in `StreamChatRequest`.
668pub mod stream_chat_request {
669    /// The type of request - exactly one should be set
670    #[derive(Clone, PartialEq, ::prost::Oneof)]
671    pub enum RequestType {
672        #[prost(message, tag = "7")]
673        Retry(super::RetryRequest),
674        #[prost(message, tag = "8")]
675        UserPrompt(super::UserPromptRequest),
676        #[prost(message, tag = "9")]
677        ToolApproval(super::ToolApprovalRequest),
678    }
679    /// Context-specific fields based on the oneofKind.
680    #[derive(Clone, PartialEq, ::prost::Oneof)]
681    pub enum Context {
682        #[prost(message, tag = "4")]
683        Workbook(super::WorkbookContext),
684        #[prost(message, tag = "5")]
685        Global(super::GlobalContext),
686    }
687}
688/// WorkbookContext contains workbook-specific context fields
689#[derive(Clone, PartialEq, ::prost::Message)]
690pub struct WorkbookContext {
691    /// RID of the workbook to use for context
692    #[prost(string, tag = "1")]
693    pub workbook_rid: ::prost::alloc::string::String,
694    /// The user's presence in the workbook
695    #[prost(message, optional, tag = "2")]
696    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
697}
698/// DefaultContext (no context)
699#[derive(Clone, Copy, PartialEq, ::prost::Message)]
700pub struct GlobalContext {}
701/// WorkbookUserPresence contains the user's presence in the workbook
702/// which is used to describe what the user is viewing at the time of the message.
703#[derive(Clone, Copy, PartialEq, ::prost::Message)]
704pub struct WorkbookUserPresence {
705    #[prost(int32, tag = "1")]
706    pub tab_index: i32,
707    #[prost(message, optional, tag = "2")]
708    pub range: ::core::option::Option<TimeRange>,
709}
710/// CreateConversation request will create a new conversation thread
711/// if old conversation id is not set, a brand new, clear chat is created
712/// If old conversation id is set without a previous message id, the full conversation thread will be copied
713/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
714/// the above case is useful for branching a conversation into a new thread
715#[derive(Clone, PartialEq, ::prost::Message)]
716pub struct CreateConversationRequest {
717    #[prost(string, tag = "1")]
718    pub title: ::prost::alloc::string::String,
719    #[prost(string, tag = "2")]
720    pub workspace_rid: ::prost::alloc::string::String,
721    #[prost(string, optional, tag = "3")]
722    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
723    #[prost(string, optional, tag = "4")]
724    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
725    #[prost(message, optional, tag = "5")]
726    pub conversation_mode: ::core::option::Option<ConversationMode>,
727}
728/// CreateConversationResponse will return the conversation id for the new conversation
729#[derive(Clone, PartialEq, ::prost::Message)]
730pub struct CreateConversationResponse {
731    #[prost(string, tag = "1")]
732    pub new_conversation_rid: ::prost::alloc::string::String,
733}
734/// Updates the fields if specified (optional means no change for that field)
735#[derive(Clone, PartialEq, ::prost::Message)]
736pub struct UpdateConversationMetadataRequest {
737    #[prost(string, optional, tag = "1")]
738    pub title: ::core::option::Option<::prost::alloc::string::String>,
739    #[prost(string, tag = "2")]
740    pub conversation_rid: ::prost::alloc::string::String,
741    #[prost(message, optional, tag = "3")]
742    pub conversation_mode: ::core::option::Option<ConversationMode>,
743}
744#[derive(Clone, Copy, PartialEq, ::prost::Message)]
745pub struct UpdateConversationMetadataResponse {}
746#[derive(Clone, PartialEq, ::prost::Message)]
747pub struct DeleteConversationRequest {
748    #[prost(string, tag = "1")]
749    pub conversation_rid: ::prost::alloc::string::String,
750}
751#[derive(Clone, Copy, PartialEq, ::prost::Message)]
752pub struct DeleteConversationResponse {}
753/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
754/// by provided rid. To start from a particular message - you can also provide a message id.
755#[derive(Clone, PartialEq, ::prost::Message)]
756pub struct GetConversationRequest {
757    #[prost(string, tag = "1")]
758    pub conversation_rid: ::prost::alloc::string::String,
759    #[prost(string, optional, tag = "2")]
760    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
761    #[prost(int32, optional, tag = "3")]
762    pub max_message_count: ::core::option::Option<i32>,
763}
764/// Model message with id allows you to identify the message ID of a given message
765#[derive(Clone, PartialEq, ::prost::Message)]
766pub struct ModelMessageWithId {
767    #[prost(string, tag = "3")]
768    pub message_id: ::prost::alloc::string::String,
769    /// WB agent user messages can have snapshot rids associated with them
770    #[prost(string, optional, tag = "4")]
771    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
772    #[prost(message, repeated, tag = "5")]
773    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
774    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
775    pub content: ::core::option::Option<model_message_with_id::Content>,
776}
777/// Nested message and enum types in `ModelMessageWithId`.
778pub mod model_message_with_id {
779    #[derive(Clone, PartialEq, ::prost::Oneof)]
780    pub enum Content {
781        #[prost(message, tag = "1")]
782        Message(super::ModelMessage),
783        #[prost(message, tag = "2")]
784        ToolAction(super::ToolAction),
785    }
786}
787#[derive(Clone, PartialEq, ::prost::Message)]
788pub struct GetConversationResponse {
789    #[prost(message, repeated, tag = "1")]
790    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
791    #[prost(message, optional, tag = "2")]
792    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
793}
794/// Will generate all conversation threads that this user has in this workspace
795#[derive(Clone, PartialEq, ::prost::Message)]
796pub struct ListConversationsRequest {
797    #[prost(string, tag = "1")]
798    pub workspace_rid: ::prost::alloc::string::String,
799    #[prost(string, optional, tag = "2")]
800    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
801    #[prost(int32, optional, tag = "3")]
802    pub page_size: ::core::option::Option<i32>,
803}
804#[derive(Clone, PartialEq, ::prost::Message)]
805pub struct ConversationMetadata {
806    #[prost(string, tag = "1")]
807    pub conversation_rid: ::prost::alloc::string::String,
808    #[prost(string, tag = "2")]
809    pub title: ::prost::alloc::string::String,
810    #[prost(message, optional, tag = "3")]
811    pub created_at: ::core::option::Option<
812        super::super::super::google::protobuf::Timestamp,
813    >,
814    #[prost(message, optional, tag = "4")]
815    pub last_updated_at: ::core::option::Option<
816        super::super::super::google::protobuf::Timestamp,
817    >,
818    #[prost(message, optional, tag = "5")]
819    pub mode: ::core::option::Option<ConversationMode>,
820}
821/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
822/// to get a full conversation from storage. These are ordered by creation time.
823#[derive(Clone, PartialEq, ::prost::Message)]
824pub struct ListConversationsResponse {
825    #[prost(message, repeated, tag = "1")]
826    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
827    #[prost(string, optional, tag = "2")]
828    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
829}
830#[derive(Clone, Copy, PartialEq, ::prost::Message)]
831pub struct TimeRange {
832    #[prost(message, optional, tag = "1")]
833    pub range_start: ::core::option::Option<Timestamp>,
834    #[prost(message, optional, tag = "2")]
835    pub range_end: ::core::option::Option<Timestamp>,
836}
837#[derive(Clone, Copy, PartialEq, ::prost::Message)]
838pub struct Timestamp {
839    #[prost(int32, tag = "1")]
840    pub seconds: i32,
841    #[prost(int32, tag = "2")]
842    pub nanoseconds: i32,
843}
844/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
845/// Each message type has its own structure and content.
846#[derive(Clone, PartialEq, ::prost::Message)]
847pub struct ModelMessage {
848    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
849    pub kind: ::core::option::Option<model_message::Kind>,
850}
851/// Nested message and enum types in `ModelMessage`.
852pub mod model_message {
853    #[derive(Clone, PartialEq, ::prost::Oneof)]
854    pub enum Kind {
855        #[prost(message, tag = "1")]
856        User(super::UserModelMessage),
857        #[prost(message, tag = "2")]
858        Assistant(super::AssistantModelMessage),
859    }
860}
861/// A user message containing text
862#[derive(Clone, PartialEq, ::prost::Message)]
863pub struct UserModelMessage {
864    #[prost(message, repeated, tag = "1")]
865    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
866}
867/// An assistant message containing text
868#[derive(Clone, PartialEq, ::prost::Message)]
869pub struct AssistantModelMessage {
870    #[prost(message, repeated, tag = "1")]
871    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
872}
873#[derive(Clone, PartialEq, ::prost::Message)]
874pub struct UserContentPart {
875    #[prost(oneof = "user_content_part::Part", tags = "1")]
876    pub part: ::core::option::Option<user_content_part::Part>,
877}
878/// Nested message and enum types in `UserContentPart`.
879pub mod user_content_part {
880    #[derive(Clone, PartialEq, ::prost::Oneof)]
881    pub enum Part {
882        #[prost(message, tag = "1")]
883        Text(super::TextPart),
884    }
885}
886/// Content part for assistant messages: can be text, reasoning, or mutation.
887#[derive(Clone, PartialEq, ::prost::Message)]
888pub struct AssistantContentPart {
889    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
890    pub part: ::core::option::Option<assistant_content_part::Part>,
891}
892/// Nested message and enum types in `AssistantContentPart`.
893pub mod assistant_content_part {
894    #[derive(Clone, PartialEq, ::prost::Oneof)]
895    pub enum Part {
896        #[prost(message, tag = "1")]
897        Text(super::TextPart),
898        #[prost(message, tag = "2")]
899        Reasoning(super::ReasoningPart),
900    }
901}
902/// Text part for user or assistant messages.
903#[derive(Clone, PartialEq, ::prost::Message)]
904pub struct TextPart {
905    #[prost(string, tag = "1")]
906    pub text: ::prost::alloc::string::String,
907}
908/// User-supplied image part.
909#[derive(Clone, PartialEq, ::prost::Message)]
910pub struct ImagePart {
911    /// The base64-encoded image data
912    #[prost(bytes = "vec", tag = "1")]
913    pub data: ::prost::alloc::vec::Vec<u8>,
914    /// The media type of the image (e.g. "image/png", "image/jpeg")
915    #[prost(string, optional, tag = "2")]
916    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
917    /// Optional: the filename of the image
918    #[prost(string, optional, tag = "3")]
919    pub filename: ::core::option::Option<::prost::alloc::string::String>,
920}
921/// Reasoning part for assistant messages.
922#[derive(Clone, PartialEq, ::prost::Message)]
923pub struct ReasoningPart {
924    #[prost(string, tag = "1")]
925    pub reasoning: ::prost::alloc::string::String,
926}
927/// StreamChatResponse is a discriminated union response to a StreamChatRequest
928#[derive(Clone, PartialEq, ::prost::Message)]
929pub struct StreamChatResponse {
930    #[prost(
931        oneof = "stream_chat_response::Response",
932        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10"
933    )]
934    pub response: ::core::option::Option<stream_chat_response::Response>,
935}
936/// Nested message and enum types in `StreamChatResponse`.
937pub mod stream_chat_response {
938    #[derive(Clone, PartialEq, ::prost::Oneof)]
939    pub enum Response {
940        #[prost(message, tag = "1")]
941        Finish(super::Finish),
942        #[prost(message, tag = "2")]
943        Error(super::Error),
944        #[prost(message, tag = "3")]
945        TextStart(super::TextStart),
946        #[prost(message, tag = "4")]
947        TextDelta(super::TextDelta),
948        #[prost(message, tag = "5")]
949        TextEnd(super::TextEnd),
950        #[prost(message, tag = "6")]
951        ReasoningStart(super::ReasoningStart),
952        #[prost(message, tag = "7")]
953        ReasoningDelta(super::ReasoningDelta),
954        #[prost(message, tag = "8")]
955        ReasoningEnd(super::ReasoningEnd),
956        #[prost(message, tag = "10")]
957        ToolAction(super::ToolAction),
958    }
959}
960#[derive(Clone, PartialEq, ::prost::Message)]
961pub struct ToolCallDescription {
962    #[prost(string, tag = "1")]
963    pub tool_call_id: ::prost::alloc::string::String,
964    #[prost(string, tag = "2")]
965    pub tool_name: ::prost::alloc::string::String,
966    /// string representation of the proposed tool args for display
967    #[prost(string, tag = "3")]
968    pub tool_args_json_string: ::prost::alloc::string::String,
969    /// used to conditionally render an approval button based the outcome of the tool call
970    #[prost(enumeration = "ToolCallStatus", tag = "4")]
971    pub status: i32,
972}
973/// Indicates the end of a chat session
974#[derive(Clone, PartialEq, ::prost::Message)]
975pub struct Finish {
976    /// The message ids in order of all generated messages for this agent run
977    /// These ids can be used to branch a message from that specific message
978    #[prost(string, repeated, tag = "1")]
979    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
980    /// In the case that this is the first agent run in a conversation thread, we also
981    /// return the new conversation title generated
982    #[prost(string, optional, tag = "2")]
983    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
984    #[prost(message, repeated, tag = "3")]
985    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
986}
987/// An error that occurred during the chat session
988#[derive(Clone, PartialEq, ::prost::Message)]
989pub struct Error {
990    #[prost(string, tag = "1")]
991    pub message: ::prost::alloc::string::String,
992}
993/// Indicates the start of a text message from the agent
994#[derive(Clone, PartialEq, ::prost::Message)]
995pub struct TextStart {
996    /// uniquely identifies the text message (e.g. uuid) so that the client can
997    /// merge parallel message streams (if it happens).
998    #[prost(string, tag = "1")]
999    pub id: ::prost::alloc::string::String,
1000}
1001/// A delta (continuation) of a text message from the agent
1002#[derive(Clone, PartialEq, ::prost::Message)]
1003pub struct TextDelta {
1004    #[prost(string, tag = "1")]
1005    pub id: ::prost::alloc::string::String,
1006    /// The next chunk of text
1007    #[prost(string, tag = "2")]
1008    pub delta: ::prost::alloc::string::String,
1009}
1010/// Indicates the end of a text message from the agent
1011#[derive(Clone, PartialEq, ::prost::Message)]
1012pub struct TextEnd {
1013    #[prost(string, tag = "1")]
1014    pub id: ::prost::alloc::string::String,
1015}
1016/// Indicates the start of a reasoning message from the agent
1017#[derive(Clone, PartialEq, ::prost::Message)]
1018pub struct ReasoningStart {
1019    #[prost(string, tag = "1")]
1020    pub id: ::prost::alloc::string::String,
1021}
1022/// A delta (continuation) of a reasoning message from the agent
1023#[derive(Clone, PartialEq, ::prost::Message)]
1024pub struct ReasoningDelta {
1025    #[prost(string, tag = "1")]
1026    pub id: ::prost::alloc::string::String,
1027    /// The next chunk of reasoning
1028    #[prost(string, tag = "2")]
1029    pub delta: ::prost::alloc::string::String,
1030}
1031/// Indicates the end of a reasoning message from the agent
1032#[derive(Clone, PartialEq, ::prost::Message)]
1033pub struct ReasoningEnd {
1034    #[prost(string, tag = "1")]
1035    pub id: ::prost::alloc::string::String,
1036}
1037/// this is a concise description of a tool call that the agent is making internally
1038/// without revealing too much detail about the tool call, it informs the user what the agent is doing
1039/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
1040/// "Search channels for My Datasource"
1041#[derive(Clone, PartialEq, ::prost::Message)]
1042pub struct ToolAction {
1043    #[prost(string, tag = "1")]
1044    pub id: ::prost::alloc::string::String,
1045    /// "Thought", "Read", "Find", "Look-up", etc.
1046    #[prost(string, tag = "2")]
1047    pub tool_action_verb: ::prost::alloc::string::String,
1048    /// "workbook", "channel", "variable", "panel", etc.
1049    #[prost(string, optional, tag = "3")]
1050    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
1051}
1052#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1053#[repr(i32)]
1054pub enum ToolCallStatus {
1055    Unspecified = 0,
1056    Approved = 1,
1057    Denied = 2,
1058    AwaitingApproval = 3,
1059}
1060impl ToolCallStatus {
1061    /// String value of the enum field names used in the ProtoBuf definition.
1062    ///
1063    /// The values are not transformed in any way and thus are considered stable
1064    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1065    pub fn as_str_name(&self) -> &'static str {
1066        match self {
1067            Self::Unspecified => "TOOL_CALL_STATUS_UNSPECIFIED",
1068            Self::Approved => "TOOL_CALL_STATUS_APPROVED",
1069            Self::Denied => "TOOL_CALL_STATUS_DENIED",
1070            Self::AwaitingApproval => "TOOL_CALL_STATUS_AWAITING_APPROVAL",
1071        }
1072    }
1073    /// Creates an enum from field names used in the ProtoBuf definition.
1074    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1075        match value {
1076            "TOOL_CALL_STATUS_UNSPECIFIED" => Some(Self::Unspecified),
1077            "TOOL_CALL_STATUS_APPROVED" => Some(Self::Approved),
1078            "TOOL_CALL_STATUS_DENIED" => Some(Self::Denied),
1079            "TOOL_CALL_STATUS_AWAITING_APPROVAL" => Some(Self::AwaitingApproval),
1080            _ => None,
1081        }
1082    }
1083}
1084/// Generated client implementations.
1085pub mod ai_agent_service_client {
1086    #![allow(
1087        unused_variables,
1088        dead_code,
1089        missing_docs,
1090        clippy::wildcard_imports,
1091        clippy::let_unit_value,
1092    )]
1093    use tonic::codegen::*;
1094    use tonic::codegen::http::Uri;
1095    /// AIAgentService provides AI-powered assistance for general operations
1096    #[derive(Debug, Clone)]
1097    pub struct AiAgentServiceClient<T> {
1098        inner: tonic::client::Grpc<T>,
1099    }
1100    impl AiAgentServiceClient<tonic::transport::Channel> {
1101        /// Attempt to create a new client by connecting to a given endpoint.
1102        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1103        where
1104            D: TryInto<tonic::transport::Endpoint>,
1105            D::Error: Into<StdError>,
1106        {
1107            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1108            Ok(Self::new(conn))
1109        }
1110    }
1111    impl<T> AiAgentServiceClient<T>
1112    where
1113        T: tonic::client::GrpcService<tonic::body::Body>,
1114        T::Error: Into<StdError>,
1115        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1116        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1117    {
1118        pub fn new(inner: T) -> Self {
1119            let inner = tonic::client::Grpc::new(inner);
1120            Self { inner }
1121        }
1122        pub fn with_origin(inner: T, origin: Uri) -> Self {
1123            let inner = tonic::client::Grpc::with_origin(inner, origin);
1124            Self { inner }
1125        }
1126        pub fn with_interceptor<F>(
1127            inner: T,
1128            interceptor: F,
1129        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1130        where
1131            F: tonic::service::Interceptor,
1132            T::ResponseBody: Default,
1133            T: tonic::codegen::Service<
1134                http::Request<tonic::body::Body>,
1135                Response = http::Response<
1136                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1137                >,
1138            >,
1139            <T as tonic::codegen::Service<
1140                http::Request<tonic::body::Body>,
1141            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1142        {
1143            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1144        }
1145        /// Compress requests with the given encoding.
1146        ///
1147        /// This requires the server to support it otherwise it might respond with an
1148        /// error.
1149        #[must_use]
1150        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1151            self.inner = self.inner.send_compressed(encoding);
1152            self
1153        }
1154        /// Enable decompressing responses.
1155        #[must_use]
1156        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1157            self.inner = self.inner.accept_compressed(encoding);
1158            self
1159        }
1160        /// Limits the maximum size of a decoded message.
1161        ///
1162        /// Default: `4MB`
1163        #[must_use]
1164        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1165            self.inner = self.inner.max_decoding_message_size(limit);
1166            self
1167        }
1168        /// Limits the maximum size of an encoded message.
1169        ///
1170        /// Default: `usize::MAX`
1171        #[must_use]
1172        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1173            self.inner = self.inner.max_encoding_message_size(limit);
1174            self
1175        }
1176        /// StreamChat handles bidirectional streaming chat for AI agent
1177        pub async fn stream_chat(
1178            &mut self,
1179            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1180        ) -> std::result::Result<
1181            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1182            tonic::Status,
1183        > {
1184            self.inner
1185                .ready()
1186                .await
1187                .map_err(|e| {
1188                    tonic::Status::unknown(
1189                        format!("Service was not ready: {}", e.into()),
1190                    )
1191                })?;
1192            let codec = tonic::codec::ProstCodec::default();
1193            let path = http::uri::PathAndQuery::from_static(
1194                "/nominal.ai.v1.AIAgentService/StreamChat",
1195            );
1196            let mut req = request.into_request();
1197            req.extensions_mut()
1198                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1199            self.inner.server_streaming(req, path, codec).await
1200        }
1201        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
1202        pub async fn get_conversation(
1203            &mut self,
1204            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1205        ) -> std::result::Result<
1206            tonic::Response<super::GetConversationResponse>,
1207            tonic::Status,
1208        > {
1209            self.inner
1210                .ready()
1211                .await
1212                .map_err(|e| {
1213                    tonic::Status::unknown(
1214                        format!("Service was not ready: {}", e.into()),
1215                    )
1216                })?;
1217            let codec = tonic::codec::ProstCodec::default();
1218            let path = http::uri::PathAndQuery::from_static(
1219                "/nominal.ai.v1.AIAgentService/GetConversation",
1220            );
1221            let mut req = request.into_request();
1222            req.extensions_mut()
1223                .insert(
1224                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1225                );
1226            self.inner.unary(req, path, codec).await
1227        }
1228        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1229        pub async fn list_conversations(
1230            &mut self,
1231            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1232        ) -> std::result::Result<
1233            tonic::Response<super::ListConversationsResponse>,
1234            tonic::Status,
1235        > {
1236            self.inner
1237                .ready()
1238                .await
1239                .map_err(|e| {
1240                    tonic::Status::unknown(
1241                        format!("Service was not ready: {}", e.into()),
1242                    )
1243                })?;
1244            let codec = tonic::codec::ProstCodec::default();
1245            let path = http::uri::PathAndQuery::from_static(
1246                "/nominal.ai.v1.AIAgentService/ListConversations",
1247            );
1248            let mut req = request.into_request();
1249            req.extensions_mut()
1250                .insert(
1251                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1252                );
1253            self.inner.unary(req, path, codec).await
1254        }
1255        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1256        pub async fn create_conversation(
1257            &mut self,
1258            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1259        ) -> std::result::Result<
1260            tonic::Response<super::CreateConversationResponse>,
1261            tonic::Status,
1262        > {
1263            self.inner
1264                .ready()
1265                .await
1266                .map_err(|e| {
1267                    tonic::Status::unknown(
1268                        format!("Service was not ready: {}", e.into()),
1269                    )
1270                })?;
1271            let codec = tonic::codec::ProstCodec::default();
1272            let path = http::uri::PathAndQuery::from_static(
1273                "/nominal.ai.v1.AIAgentService/CreateConversation",
1274            );
1275            let mut req = request.into_request();
1276            req.extensions_mut()
1277                .insert(
1278                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1279                );
1280            self.inner.unary(req, path, codec).await
1281        }
1282        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1283        pub async fn update_conversation_metadata(
1284            &mut self,
1285            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1286        ) -> std::result::Result<
1287            tonic::Response<super::UpdateConversationMetadataResponse>,
1288            tonic::Status,
1289        > {
1290            self.inner
1291                .ready()
1292                .await
1293                .map_err(|e| {
1294                    tonic::Status::unknown(
1295                        format!("Service was not ready: {}", e.into()),
1296                    )
1297                })?;
1298            let codec = tonic::codec::ProstCodec::default();
1299            let path = http::uri::PathAndQuery::from_static(
1300                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1301            );
1302            let mut req = request.into_request();
1303            req.extensions_mut()
1304                .insert(
1305                    GrpcMethod::new(
1306                        "nominal.ai.v1.AIAgentService",
1307                        "UpdateConversationMetadata",
1308                    ),
1309                );
1310            self.inner.unary(req, path, codec).await
1311        }
1312        /// DeleteConversation handles deleting a specific conversation by conversation rid
1313        pub async fn delete_conversation(
1314            &mut self,
1315            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1316        ) -> std::result::Result<
1317            tonic::Response<super::DeleteConversationResponse>,
1318            tonic::Status,
1319        > {
1320            self.inner
1321                .ready()
1322                .await
1323                .map_err(|e| {
1324                    tonic::Status::unknown(
1325                        format!("Service was not ready: {}", e.into()),
1326                    )
1327                })?;
1328            let codec = tonic::codec::ProstCodec::default();
1329            let path = http::uri::PathAndQuery::from_static(
1330                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1331            );
1332            let mut req = request.into_request();
1333            req.extensions_mut()
1334                .insert(
1335                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1336                );
1337            self.inner.unary(req, path, codec).await
1338        }
1339        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1340        pub async fn get_snapshot_rid_by_user_message_id(
1341            &mut self,
1342            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1343        ) -> std::result::Result<
1344            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1345            tonic::Status,
1346        > {
1347            self.inner
1348                .ready()
1349                .await
1350                .map_err(|e| {
1351                    tonic::Status::unknown(
1352                        format!("Service was not ready: {}", e.into()),
1353                    )
1354                })?;
1355            let codec = tonic::codec::ProstCodec::default();
1356            let path = http::uri::PathAndQuery::from_static(
1357                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1358            );
1359            let mut req = request.into_request();
1360            req.extensions_mut()
1361                .insert(
1362                    GrpcMethod::new(
1363                        "nominal.ai.v1.AIAgentService",
1364                        "GetSnapshotRidByUserMessageId",
1365                    ),
1366                );
1367            self.inner.unary(req, path, codec).await
1368        }
1369    }
1370}