Skip to main content

nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct ClassifyErrorRequest {
4    /// The error message or description to classify
5    #[prost(string, tag = "1")]
6    pub error_message: ::prost::alloc::string::String,
7}
8#[derive(Clone, PartialEq, ::prost::Message)]
9pub struct ClassifyErrorResponse {
10    /// The classification result
11    #[prost(enumeration = "ErrorClassification", tag = "1")]
12    pub classification: i32,
13    /// Explanation for why this classification was chosen
14    #[prost(string, tag = "2")]
15    pub reason: ::prost::alloc::string::String,
16}
17#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
18#[repr(i32)]
19pub enum ErrorClassification {
20    Unspecified = 0,
21    Client = 1,
22    Server = 2,
23}
24impl ErrorClassification {
25    /// String value of the enum field names used in the ProtoBuf definition.
26    ///
27    /// The values are not transformed in any way and thus are considered stable
28    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
29    pub fn as_str_name(&self) -> &'static str {
30        match self {
31            Self::Unspecified => "ERROR_CLASSIFICATION_UNSPECIFIED",
32            Self::Client => "ERROR_CLASSIFICATION_CLIENT",
33            Self::Server => "ERROR_CLASSIFICATION_SERVER",
34        }
35    }
36    /// Creates an enum from field names used in the ProtoBuf definition.
37    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
38        match value {
39            "ERROR_CLASSIFICATION_UNSPECIFIED" => Some(Self::Unspecified),
40            "ERROR_CLASSIFICATION_CLIENT" => Some(Self::Client),
41            "ERROR_CLASSIFICATION_SERVER" => Some(Self::Server),
42            _ => None,
43        }
44    }
45}
46/// Generated client implementations.
47pub mod data_ingestion_error_classifier_service_client {
48    #![allow(
49        unused_variables,
50        dead_code,
51        missing_docs,
52        clippy::wildcard_imports,
53        clippy::let_unit_value,
54    )]
55    use tonic::codegen::*;
56    use tonic::codegen::http::Uri;
57    /// DataIngestionErrorClassifierService classifies data ingestion errors as CLIENT or SERVER issues.
58    /// CLIENT errors are customer data issues (empty files, schema conflicts, timestamp problems, etc.)
59    /// SERVER errors are infrastructure issues (internal errors, timeouts, capacity limits, etc.)
60    #[derive(Debug, Clone)]
61    pub struct DataIngestionErrorClassifierServiceClient<T> {
62        inner: tonic::client::Grpc<T>,
63    }
64    impl DataIngestionErrorClassifierServiceClient<tonic::transport::Channel> {
65        /// Attempt to create a new client by connecting to a given endpoint.
66        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
67        where
68            D: TryInto<tonic::transport::Endpoint>,
69            D::Error: Into<StdError>,
70        {
71            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
72            Ok(Self::new(conn))
73        }
74    }
75    impl<T> DataIngestionErrorClassifierServiceClient<T>
76    where
77        T: tonic::client::GrpcService<tonic::body::Body>,
78        T::Error: Into<StdError>,
79        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
80        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
81    {
82        pub fn new(inner: T) -> Self {
83            let inner = tonic::client::Grpc::new(inner);
84            Self { inner }
85        }
86        pub fn with_origin(inner: T, origin: Uri) -> Self {
87            let inner = tonic::client::Grpc::with_origin(inner, origin);
88            Self { inner }
89        }
90        pub fn with_interceptor<F>(
91            inner: T,
92            interceptor: F,
93        ) -> DataIngestionErrorClassifierServiceClient<InterceptedService<T, F>>
94        where
95            F: tonic::service::Interceptor,
96            T::ResponseBody: Default,
97            T: tonic::codegen::Service<
98                http::Request<tonic::body::Body>,
99                Response = http::Response<
100                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
101                >,
102            >,
103            <T as tonic::codegen::Service<
104                http::Request<tonic::body::Body>,
105            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
106        {
107            DataIngestionErrorClassifierServiceClient::new(
108                InterceptedService::new(inner, interceptor),
109            )
110        }
111        /// Compress requests with the given encoding.
112        ///
113        /// This requires the server to support it otherwise it might respond with an
114        /// error.
115        #[must_use]
116        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
117            self.inner = self.inner.send_compressed(encoding);
118            self
119        }
120        /// Enable decompressing responses.
121        #[must_use]
122        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
123            self.inner = self.inner.accept_compressed(encoding);
124            self
125        }
126        /// Limits the maximum size of a decoded message.
127        ///
128        /// Default: `4MB`
129        #[must_use]
130        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
131            self.inner = self.inner.max_decoding_message_size(limit);
132            self
133        }
134        /// Limits the maximum size of an encoded message.
135        ///
136        /// Default: `usize::MAX`
137        #[must_use]
138        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
139            self.inner = self.inner.max_encoding_message_size(limit);
140            self
141        }
142        /// ClassifyError analyzes an error message and classifies it as CLIENT or SERVER.
143        pub async fn classify_error(
144            &mut self,
145            request: impl tonic::IntoRequest<super::ClassifyErrorRequest>,
146        ) -> std::result::Result<
147            tonic::Response<super::ClassifyErrorResponse>,
148            tonic::Status,
149        > {
150            self.inner
151                .ready()
152                .await
153                .map_err(|e| {
154                    tonic::Status::unknown(
155                        format!("Service was not ready: {}", e.into()),
156                    )
157                })?;
158            let codec = tonic::codec::ProstCodec::default();
159            let path = http::uri::PathAndQuery::from_static(
160                "/nominal.ai.v1.DataIngestionErrorClassifierService/ClassifyError",
161            );
162            let mut req = request.into_request();
163            req.extensions_mut()
164                .insert(
165                    GrpcMethod::new(
166                        "nominal.ai.v1.DataIngestionErrorClassifierService",
167                        "ClassifyError",
168                    ),
169                );
170            self.inner.unary(req, path, codec).await
171        }
172    }
173}
174#[derive(Clone, Copy, PartialEq, ::prost::Message)]
175pub struct GetProviderStatusRequest {}
176#[derive(Clone, Copy, PartialEq, ::prost::Message)]
177pub struct GetProviderStatusResponse {
178    /// Timestamp when the last status was determined
179    #[prost(message, optional, tag = "1")]
180    pub timestamp: ::core::option::Option<
181        super::super::super::google::protobuf::Timestamp,
182    >,
183    /// Status of the most recent health check probe
184    #[prost(message, optional, tag = "2")]
185    pub last_status: ::core::option::Option<ProviderStatus>,
186    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
187    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
188    #[deprecated]
189    #[prost(message, optional, tag = "3")]
190    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
191    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
192    #[prost(message, optional, tag = "4")]
193    pub aggregated_status: ::core::option::Option<ProviderStatus>,
194}
195#[derive(Clone, Copy, PartialEq, ::prost::Message)]
196pub struct ProviderStatus {
197    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
198    pub status: ::core::option::Option<provider_status::Status>,
199}
200/// Nested message and enum types in `ProviderStatus`.
201pub mod provider_status {
202    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
203    pub enum Status {
204        #[prost(message, tag = "1")]
205        Healthy(super::Healthy),
206        #[prost(message, tag = "2")]
207        Degraded(super::Degraded),
208    }
209}
210#[derive(Clone, Copy, PartialEq, ::prost::Message)]
211pub struct Healthy {}
212#[derive(Clone, Copy, PartialEq, ::prost::Message)]
213pub struct Degraded {
214    #[prost(enumeration = "DegradationReason", tag = "1")]
215    pub reason: i32,
216}
217#[derive(Clone, Copy, PartialEq, ::prost::Message)]
218pub struct ProviderMetrics {
219    #[prost(int32, tag = "1")]
220    pub time_to_first_token_ms: i32,
221    #[prost(int32, tag = "2")]
222    pub total_time_ms: i32,
223}
224#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
225#[repr(i32)]
226pub enum DegradationReason {
227    Unspecified = 0,
228    HighLatency = 1,
229    Failures = 2,
230    HighLatencyAndFailures = 3,
231}
232impl DegradationReason {
233    /// String value of the enum field names used in the ProtoBuf definition.
234    ///
235    /// The values are not transformed in any way and thus are considered stable
236    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
237    pub fn as_str_name(&self) -> &'static str {
238        match self {
239            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
240            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
241            Self::Failures => "DEGRADATION_REASON_FAILURES",
242            Self::HighLatencyAndFailures => {
243                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
244            }
245        }
246    }
247    /// Creates an enum from field names used in the ProtoBuf definition.
248    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
249        match value {
250            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
251            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
252            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
253            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
254                Some(Self::HighLatencyAndFailures)
255            }
256            _ => None,
257        }
258    }
259}
260/// Generated client implementations.
261pub mod model_provider_health_service_client {
262    #![allow(
263        unused_variables,
264        dead_code,
265        missing_docs,
266        clippy::wildcard_imports,
267        clippy::let_unit_value,
268    )]
269    use tonic::codegen::*;
270    use tonic::codegen::http::Uri;
271    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
272    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
273    /// independent of the complexity of user prompts.
274    #[derive(Debug, Clone)]
275    pub struct ModelProviderHealthServiceClient<T> {
276        inner: tonic::client::Grpc<T>,
277    }
278    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
279        /// Attempt to create a new client by connecting to a given endpoint.
280        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
281        where
282            D: TryInto<tonic::transport::Endpoint>,
283            D::Error: Into<StdError>,
284        {
285            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
286            Ok(Self::new(conn))
287        }
288    }
289    impl<T> ModelProviderHealthServiceClient<T>
290    where
291        T: tonic::client::GrpcService<tonic::body::Body>,
292        T::Error: Into<StdError>,
293        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
294        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
295    {
296        pub fn new(inner: T) -> Self {
297            let inner = tonic::client::Grpc::new(inner);
298            Self { inner }
299        }
300        pub fn with_origin(inner: T, origin: Uri) -> Self {
301            let inner = tonic::client::Grpc::with_origin(inner, origin);
302            Self { inner }
303        }
304        pub fn with_interceptor<F>(
305            inner: T,
306            interceptor: F,
307        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
308        where
309            F: tonic::service::Interceptor,
310            T::ResponseBody: Default,
311            T: tonic::codegen::Service<
312                http::Request<tonic::body::Body>,
313                Response = http::Response<
314                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
315                >,
316            >,
317            <T as tonic::codegen::Service<
318                http::Request<tonic::body::Body>,
319            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
320        {
321            ModelProviderHealthServiceClient::new(
322                InterceptedService::new(inner, interceptor),
323            )
324        }
325        /// Compress requests with the given encoding.
326        ///
327        /// This requires the server to support it otherwise it might respond with an
328        /// error.
329        #[must_use]
330        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
331            self.inner = self.inner.send_compressed(encoding);
332            self
333        }
334        /// Enable decompressing responses.
335        #[must_use]
336        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
337            self.inner = self.inner.accept_compressed(encoding);
338            self
339        }
340        /// Limits the maximum size of a decoded message.
341        ///
342        /// Default: `4MB`
343        #[must_use]
344        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
345            self.inner = self.inner.max_decoding_message_size(limit);
346            self
347        }
348        /// Limits the maximum size of an encoded message.
349        ///
350        /// Default: `usize::MAX`
351        #[must_use]
352        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
353            self.inner = self.inner.max_encoding_message_size(limit);
354            self
355        }
356        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
357        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
358        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
359        pub async fn get_provider_status(
360            &mut self,
361            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
362        ) -> std::result::Result<
363            tonic::Response<super::GetProviderStatusResponse>,
364            tonic::Status,
365        > {
366            self.inner
367                .ready()
368                .await
369                .map_err(|e| {
370                    tonic::Status::unknown(
371                        format!("Service was not ready: {}", e.into()),
372                    )
373                })?;
374            let codec = tonic::codec::ProstCodec::default();
375            let path = http::uri::PathAndQuery::from_static(
376                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
377            );
378            let mut req = request.into_request();
379            req.extensions_mut()
380                .insert(
381                    GrpcMethod::new(
382                        "nominal.ai.v1.ModelProviderHealthService",
383                        "GetProviderStatus",
384                    ),
385                );
386            self.inner.unary(req, path, codec).await
387        }
388    }
389}
390/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
391/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct CreateOrUpdateKnowledgeBaseRequest {
394    #[prost(string, tag = "1")]
395    pub attachment_rid: ::prost::alloc::string::String,
396    /// summary of the knowledge base, will be used by the LLM to decide when to use it
397    #[prost(string, tag = "2")]
398    pub summary_description: ::prost::alloc::string::String,
399    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
400    pub r#type: ::core::option::Option<i32>,
401}
402/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
403#[derive(Clone, PartialEq, ::prost::Message)]
404pub struct CreateOrUpdateKnowledgeBaseResponse {
405    #[prost(string, tag = "1")]
406    pub knowledge_base_rid: ::prost::alloc::string::String,
407}
408/// KnowledgeBase represents a knowledge base entry
409#[derive(Clone, PartialEq, ::prost::Message)]
410pub struct KnowledgeBase {
411    #[prost(string, tag = "1")]
412    pub knowledge_base_rid: ::prost::alloc::string::String,
413    #[prost(string, tag = "2")]
414    pub attachment_rid: ::prost::alloc::string::String,
415    #[prost(string, tag = "3")]
416    pub workspace_rid: ::prost::alloc::string::String,
417    #[prost(string, tag = "4")]
418    pub summary_description: ::prost::alloc::string::String,
419    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
420    pub r#type: i32,
421    #[prost(int32, tag = "6")]
422    pub version: i32,
423}
424#[derive(Clone, PartialEq, ::prost::Message)]
425pub struct ListRequest {
426    #[prost(string, tag = "1")]
427    pub workspace_rid: ::prost::alloc::string::String,
428}
429#[derive(Clone, PartialEq, ::prost::Message)]
430pub struct ListResponse {
431    #[prost(message, repeated, tag = "1")]
432    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
433}
434#[derive(Clone, PartialEq, ::prost::Message)]
435pub struct DeleteRequest {
436    #[prost(string, tag = "1")]
437    pub knowledge_base_rid: ::prost::alloc::string::String,
438}
439#[derive(Clone, Copy, PartialEq, ::prost::Message)]
440pub struct DeleteResponse {
441    #[prost(bool, tag = "1")]
442    pub success: bool,
443}
444#[derive(Clone, PartialEq, ::prost::Message)]
445pub struct GetBatchRequest {
446    #[prost(string, repeated, tag = "1")]
447    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
448}
449#[derive(Clone, PartialEq, ::prost::Message)]
450pub struct GetBatchResponse {
451    #[prost(message, repeated, tag = "1")]
452    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
453}
454/// generate summary description is intentionally going to return the generated description to the frontend
455/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
456#[derive(Clone, PartialEq, ::prost::Message)]
457pub struct GenerateSummaryDescriptionRequest {
458    #[prost(string, tag = "1")]
459    pub attachment_rid: ::prost::alloc::string::String,
460}
461#[derive(Clone, PartialEq, ::prost::Message)]
462pub struct GenerateSummaryDescriptionResponse {
463    #[prost(string, tag = "1")]
464    pub summary_description: ::prost::alloc::string::String,
465}
466/// KnowledgeBaseType defines the types of knowledge base
467#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
468#[repr(i32)]
469pub enum KnowledgeBaseType {
470    /// defaults to PROMPT
471    Unspecified = 0,
472    /// knowledge base gets added directly to prompt (needs to be small enough!)
473    Prompt = 1,
474    /// knowledge base gets used via vector search on embeddings
475    Embedding = 2,
476}
477impl KnowledgeBaseType {
478    /// String value of the enum field names used in the ProtoBuf definition.
479    ///
480    /// The values are not transformed in any way and thus are considered stable
481    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
482    pub fn as_str_name(&self) -> &'static str {
483        match self {
484            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
485            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
486            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
487        }
488    }
489    /// Creates an enum from field names used in the ProtoBuf definition.
490    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
491        match value {
492            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
493            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
494            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
495            _ => None,
496        }
497    }
498}
499/// Generated client implementations.
500pub mod knowledge_base_service_client {
501    #![allow(
502        unused_variables,
503        dead_code,
504        missing_docs,
505        clippy::wildcard_imports,
506        clippy::let_unit_value,
507    )]
508    use tonic::codegen::*;
509    use tonic::codegen::http::Uri;
510    /// KnowledgeBaseService provides AI-powered knowledge base management
511    #[derive(Debug, Clone)]
512    pub struct KnowledgeBaseServiceClient<T> {
513        inner: tonic::client::Grpc<T>,
514    }
515    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
516        /// Attempt to create a new client by connecting to a given endpoint.
517        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
518        where
519            D: TryInto<tonic::transport::Endpoint>,
520            D::Error: Into<StdError>,
521        {
522            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
523            Ok(Self::new(conn))
524        }
525    }
526    impl<T> KnowledgeBaseServiceClient<T>
527    where
528        T: tonic::client::GrpcService<tonic::body::Body>,
529        T::Error: Into<StdError>,
530        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
531        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
532    {
533        pub fn new(inner: T) -> Self {
534            let inner = tonic::client::Grpc::new(inner);
535            Self { inner }
536        }
537        pub fn with_origin(inner: T, origin: Uri) -> Self {
538            let inner = tonic::client::Grpc::with_origin(inner, origin);
539            Self { inner }
540        }
541        pub fn with_interceptor<F>(
542            inner: T,
543            interceptor: F,
544        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
545        where
546            F: tonic::service::Interceptor,
547            T::ResponseBody: Default,
548            T: tonic::codegen::Service<
549                http::Request<tonic::body::Body>,
550                Response = http::Response<
551                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
552                >,
553            >,
554            <T as tonic::codegen::Service<
555                http::Request<tonic::body::Body>,
556            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
557        {
558            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
559        }
560        /// Compress requests with the given encoding.
561        ///
562        /// This requires the server to support it otherwise it might respond with an
563        /// error.
564        #[must_use]
565        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
566            self.inner = self.inner.send_compressed(encoding);
567            self
568        }
569        /// Enable decompressing responses.
570        #[must_use]
571        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
572            self.inner = self.inner.accept_compressed(encoding);
573            self
574        }
575        /// Limits the maximum size of a decoded message.
576        ///
577        /// Default: `4MB`
578        #[must_use]
579        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
580            self.inner = self.inner.max_decoding_message_size(limit);
581            self
582        }
583        /// Limits the maximum size of an encoded message.
584        ///
585        /// Default: `usize::MAX`
586        #[must_use]
587        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
588            self.inner = self.inner.max_encoding_message_size(limit);
589            self
590        }
591        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
592        pub async fn create_or_update_knowledge_base(
593            &mut self,
594            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
595        ) -> std::result::Result<
596            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
597            tonic::Status,
598        > {
599            self.inner
600                .ready()
601                .await
602                .map_err(|e| {
603                    tonic::Status::unknown(
604                        format!("Service was not ready: {}", e.into()),
605                    )
606                })?;
607            let codec = tonic::codec::ProstCodec::default();
608            let path = http::uri::PathAndQuery::from_static(
609                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
610            );
611            let mut req = request.into_request();
612            req.extensions_mut()
613                .insert(
614                    GrpcMethod::new(
615                        "nominal.ai.v1.KnowledgeBaseService",
616                        "CreateOrUpdateKnowledgeBase",
617                    ),
618                );
619            self.inner.unary(req, path, codec).await
620        }
621        /// List returns all knowledge bases in the specified workspace
622        pub async fn list(
623            &mut self,
624            request: impl tonic::IntoRequest<super::ListRequest>,
625        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
626            self.inner
627                .ready()
628                .await
629                .map_err(|e| {
630                    tonic::Status::unknown(
631                        format!("Service was not ready: {}", e.into()),
632                    )
633                })?;
634            let codec = tonic::codec::ProstCodec::default();
635            let path = http::uri::PathAndQuery::from_static(
636                "/nominal.ai.v1.KnowledgeBaseService/List",
637            );
638            let mut req = request.into_request();
639            req.extensions_mut()
640                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
641            self.inner.unary(req, path, codec).await
642        }
643        /// Delete removes a knowledge base by its RID
644        pub async fn delete(
645            &mut self,
646            request: impl tonic::IntoRequest<super::DeleteRequest>,
647        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
648            self.inner
649                .ready()
650                .await
651                .map_err(|e| {
652                    tonic::Status::unknown(
653                        format!("Service was not ready: {}", e.into()),
654                    )
655                })?;
656            let codec = tonic::codec::ProstCodec::default();
657            let path = http::uri::PathAndQuery::from_static(
658                "/nominal.ai.v1.KnowledgeBaseService/Delete",
659            );
660            let mut req = request.into_request();
661            req.extensions_mut()
662                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
663            self.inner.unary(req, path, codec).await
664        }
665        /// GetBatch retrieves multiple knowledge bases by their RIDs
666        pub async fn get_batch(
667            &mut self,
668            request: impl tonic::IntoRequest<super::GetBatchRequest>,
669        ) -> std::result::Result<
670            tonic::Response<super::GetBatchResponse>,
671            tonic::Status,
672        > {
673            self.inner
674                .ready()
675                .await
676                .map_err(|e| {
677                    tonic::Status::unknown(
678                        format!("Service was not ready: {}", e.into()),
679                    )
680                })?;
681            let codec = tonic::codec::ProstCodec::default();
682            let path = http::uri::PathAndQuery::from_static(
683                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
684            );
685            let mut req = request.into_request();
686            req.extensions_mut()
687                .insert(
688                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
689                );
690            self.inner.unary(req, path, codec).await
691        }
692        /// GenerateSummaryDescription generates a summary description for an attachment rid
693        pub async fn generate_summary_description(
694            &mut self,
695            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
696        ) -> std::result::Result<
697            tonic::Response<super::GenerateSummaryDescriptionResponse>,
698            tonic::Status,
699        > {
700            self.inner
701                .ready()
702                .await
703                .map_err(|e| {
704                    tonic::Status::unknown(
705                        format!("Service was not ready: {}", e.into()),
706                    )
707                })?;
708            let codec = tonic::codec::ProstCodec::default();
709            let path = http::uri::PathAndQuery::from_static(
710                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
711            );
712            let mut req = request.into_request();
713            req.extensions_mut()
714                .insert(
715                    GrpcMethod::new(
716                        "nominal.ai.v1.KnowledgeBaseService",
717                        "GenerateSummaryDescription",
718                    ),
719                );
720            self.inner.unary(req, path, codec).await
721        }
722    }
723}
724#[derive(Clone, PartialEq, ::prost::Message)]
725pub struct GetSnapshotRidByUserMessageIdRequest {
726    #[prost(string, tag = "1")]
727    pub conversation_rid: ::prost::alloc::string::String,
728    #[prost(string, tag = "2")]
729    pub message_id: ::prost::alloc::string::String,
730}
731/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
732/// This occurs in the instance where a message was sent in a non-workbook context
733#[derive(Clone, PartialEq, ::prost::Message)]
734pub struct GetSnapshotRidByUserMessageIdResponse {
735    #[prost(string, optional, tag = "1")]
736    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
737}
738/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
739#[derive(Clone, Copy, PartialEq, ::prost::Message)]
740pub struct ReadOnlyMode {}
741/// EditMode configures edit mode where all tools are available
742#[derive(Clone, Copy, PartialEq, ::prost::Message)]
743pub struct EditMode {
744    /// when set to true, we auto accept edits for any tools typically requiring approval
745    #[prost(bool, optional, tag = "1")]
746    pub auto_accept: ::core::option::Option<bool>,
747}
748/// ConversationMode specifies the mode of the conversation
749#[derive(Clone, Copy, PartialEq, ::prost::Message)]
750pub struct ConversationMode {
751    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
752    pub mode: ::core::option::Option<conversation_mode::Mode>,
753}
754/// Nested message and enum types in `ConversationMode`.
755pub mod conversation_mode {
756    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
757    pub enum Mode {
758        #[prost(message, tag = "1")]
759        ReadOnly(super::ReadOnlyMode),
760        #[prost(message, tag = "2")]
761        Edit(super::EditMode),
762    }
763}
764/// When the agent makes a request to use a tool, the user responds
765/// with one of these for every request - mapping a tool id to its approval/denial
766#[derive(Clone, PartialEq, ::prost::Message)]
767pub struct ToolApprovalResult {
768    /// identifies the tool call
769    #[prost(string, tag = "1")]
770    pub tool_call_id: ::prost::alloc::string::String,
771    #[prost(oneof = "tool_approval_result::Response", tags = "2, 3")]
772    pub response: ::core::option::Option<tool_approval_result::Response>,
773}
774/// Nested message and enum types in `ToolApprovalResult`.
775pub mod tool_approval_result {
776    #[derive(Clone, PartialEq, ::prost::Oneof)]
777    pub enum Response {
778        #[prost(message, tag = "2")]
779        Approved(super::ToolApprovedResponse),
780        #[prost(message, tag = "3")]
781        Denied(super::ToolDeniedResponse),
782    }
783}
784#[derive(Clone, PartialEq, ::prost::Message)]
785pub struct ToolApprovedResponse {
786    /// json string representation of the override argument if the user
787    /// needs it to be changed in some way
788    #[prost(string, optional, tag = "1")]
789    pub override_args: ::core::option::Option<::prost::alloc::string::String>,
790}
791#[derive(Clone, PartialEq, ::prost::Message)]
792pub struct ToolDeniedResponse {
793    #[prost(string, tag = "2")]
794    pub denial_reason: ::prost::alloc::string::String,
795}
796/// RetryRequest retries the last request (e.g., if it was interrupted/failed part-way through)
797#[derive(Clone, Copy, PartialEq, ::prost::Message)]
798pub struct RetryRequest {}
799/// UserPromptRequest contains a new user message
800#[derive(Clone, PartialEq, ::prost::Message)]
801pub struct UserPromptRequest {
802    #[prost(message, optional, tag = "1")]
803    pub message: ::core::option::Option<UserModelMessage>,
804    /// Optional: image files to provide to the agent
805    #[prost(message, repeated, tag = "2")]
806    pub images: ::prost::alloc::vec::Vec<ImagePart>,
807}
808/// ToolApprovalRequest contains tool approval results
809#[derive(Clone, PartialEq, ::prost::Message)]
810pub struct ToolApprovalRequest {
811    #[prost(message, repeated, tag = "1")]
812    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
813}
814/// StreamChatRequest is a request to stream chat messages for AI agent.
815#[derive(Clone, PartialEq, ::prost::Message)]
816pub struct StreamChatRequest {
817    /// The conversation ID
818    #[prost(string, tag = "1")]
819    pub conversation_rid: ::prost::alloc::string::String,
820    /// DEPRECATED: use request_type.user_prompt.message instead
821    #[deprecated]
822    #[prost(message, optional, tag = "2")]
823    pub message: ::core::option::Option<UserModelMessage>,
824    /// DEPRECATED: use request_type.user_prompt.images instead
825    #[deprecated]
826    #[prost(message, repeated, tag = "3")]
827    pub images: ::prost::alloc::vec::Vec<ImagePart>,
828    /// DEPRECATED: use request_type.tool_approval instead
829    #[deprecated]
830    #[prost(message, repeated, tag = "6")]
831    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
832    /// The type of request - exactly one should be set
833    #[prost(oneof = "stream_chat_request::RequestType", tags = "7, 8, 9")]
834    pub request_type: ::core::option::Option<stream_chat_request::RequestType>,
835    /// Context-specific fields based on the oneofKind.
836    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
837    pub context: ::core::option::Option<stream_chat_request::Context>,
838}
839/// Nested message and enum types in `StreamChatRequest`.
840pub mod stream_chat_request {
841    /// The type of request - exactly one should be set
842    #[derive(Clone, PartialEq, ::prost::Oneof)]
843    pub enum RequestType {
844        #[prost(message, tag = "7")]
845        Retry(super::RetryRequest),
846        #[prost(message, tag = "8")]
847        UserPrompt(super::UserPromptRequest),
848        #[prost(message, tag = "9")]
849        ToolApproval(super::ToolApprovalRequest),
850    }
851    /// Context-specific fields based on the oneofKind.
852    #[derive(Clone, PartialEq, ::prost::Oneof)]
853    pub enum Context {
854        #[prost(message, tag = "4")]
855        Workbook(super::WorkbookContext),
856        #[prost(message, tag = "5")]
857        Global(super::GlobalContext),
858    }
859}
860/// WorkbookContext contains workbook-specific context fields
861#[derive(Clone, PartialEq, ::prost::Message)]
862pub struct WorkbookContext {
863    /// RID of the workbook to use for context
864    #[prost(string, tag = "1")]
865    pub workbook_rid: ::prost::alloc::string::String,
866    /// The user's presence in the workbook
867    #[prost(message, optional, tag = "2")]
868    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
869}
870/// DefaultContext (no context)
871#[derive(Clone, Copy, PartialEq, ::prost::Message)]
872pub struct GlobalContext {}
873/// WorkbookUserPresence contains the user's presence in the workbook
874/// which is used to describe what the user is viewing at the time of the message.
875#[derive(Clone, Copy, PartialEq, ::prost::Message)]
876pub struct WorkbookUserPresence {
877    #[prost(int32, tag = "1")]
878    pub tab_index: i32,
879    #[prost(message, optional, tag = "2")]
880    pub range: ::core::option::Option<TimeRange>,
881}
882/// CreateConversation request will create a new conversation thread
883/// if old conversation id is not set, a brand new, clear chat is created
884/// If old conversation id is set without a previous message id, the full conversation thread will be copied
885/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
886/// the above case is useful for branching a conversation into a new thread
887#[derive(Clone, PartialEq, ::prost::Message)]
888pub struct CreateConversationRequest {
889    #[prost(string, tag = "1")]
890    pub title: ::prost::alloc::string::String,
891    #[prost(string, tag = "2")]
892    pub workspace_rid: ::prost::alloc::string::String,
893    #[prost(string, optional, tag = "3")]
894    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
895    #[prost(string, optional, tag = "4")]
896    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
897    #[prost(message, optional, tag = "5")]
898    pub conversation_mode: ::core::option::Option<ConversationMode>,
899}
900/// CreateConversationResponse will return the conversation id for the new conversation
901#[derive(Clone, PartialEq, ::prost::Message)]
902pub struct CreateConversationResponse {
903    #[prost(string, tag = "1")]
904    pub new_conversation_rid: ::prost::alloc::string::String,
905}
906/// Updates the fields if specified (optional means no change for that field)
907#[derive(Clone, PartialEq, ::prost::Message)]
908pub struct UpdateConversationMetadataRequest {
909    #[prost(string, optional, tag = "1")]
910    pub title: ::core::option::Option<::prost::alloc::string::String>,
911    #[prost(string, tag = "2")]
912    pub conversation_rid: ::prost::alloc::string::String,
913    #[prost(message, optional, tag = "3")]
914    pub conversation_mode: ::core::option::Option<ConversationMode>,
915}
916#[derive(Clone, Copy, PartialEq, ::prost::Message)]
917pub struct UpdateConversationMetadataResponse {}
918#[derive(Clone, PartialEq, ::prost::Message)]
919pub struct DeleteConversationRequest {
920    #[prost(string, tag = "1")]
921    pub conversation_rid: ::prost::alloc::string::String,
922}
923#[derive(Clone, Copy, PartialEq, ::prost::Message)]
924pub struct DeleteConversationResponse {}
925/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
926/// by provided rid. To start from a particular message - you can also provide a message id.
927#[derive(Clone, PartialEq, ::prost::Message)]
928pub struct GetConversationRequest {
929    #[prost(string, tag = "1")]
930    pub conversation_rid: ::prost::alloc::string::String,
931    #[prost(string, optional, tag = "2")]
932    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
933    #[prost(int32, optional, tag = "3")]
934    pub max_message_count: ::core::option::Option<i32>,
935}
936/// Model message with id allows you to identify the message ID of a given message
937#[derive(Clone, PartialEq, ::prost::Message)]
938pub struct ModelMessageWithId {
939    #[prost(string, tag = "3")]
940    pub message_id: ::prost::alloc::string::String,
941    /// WB agent user messages can have snapshot rids associated with them
942    #[prost(string, optional, tag = "4")]
943    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
944    #[prost(message, repeated, tag = "5")]
945    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
946    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
947    pub content: ::core::option::Option<model_message_with_id::Content>,
948}
949/// Nested message and enum types in `ModelMessageWithId`.
950pub mod model_message_with_id {
951    #[derive(Clone, PartialEq, ::prost::Oneof)]
952    pub enum Content {
953        #[prost(message, tag = "1")]
954        Message(super::ModelMessage),
955        #[prost(message, tag = "2")]
956        ToolAction(super::ToolAction),
957    }
958}
959#[derive(Clone, PartialEq, ::prost::Message)]
960pub struct GetConversationResponse {
961    #[prost(message, repeated, tag = "1")]
962    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
963    #[prost(message, optional, tag = "2")]
964    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
965}
966/// Will generate all conversation threads that this user has in this workspace
967#[derive(Clone, PartialEq, ::prost::Message)]
968pub struct ListConversationsRequest {
969    #[prost(string, tag = "1")]
970    pub workspace_rid: ::prost::alloc::string::String,
971    #[prost(string, optional, tag = "2")]
972    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
973    #[prost(int32, optional, tag = "3")]
974    pub page_size: ::core::option::Option<i32>,
975}
976#[derive(Clone, PartialEq, ::prost::Message)]
977pub struct ConversationMetadata {
978    #[prost(string, tag = "1")]
979    pub conversation_rid: ::prost::alloc::string::String,
980    #[prost(string, tag = "2")]
981    pub title: ::prost::alloc::string::String,
982    #[prost(message, optional, tag = "3")]
983    pub created_at: ::core::option::Option<
984        super::super::super::google::protobuf::Timestamp,
985    >,
986    #[prost(message, optional, tag = "4")]
987    pub last_updated_at: ::core::option::Option<
988        super::super::super::google::protobuf::Timestamp,
989    >,
990    #[prost(message, optional, tag = "5")]
991    pub mode: ::core::option::Option<ConversationMode>,
992}
993/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
994/// to get a full conversation from storage. These are ordered by creation time.
995#[derive(Clone, PartialEq, ::prost::Message)]
996pub struct ListConversationsResponse {
997    #[prost(message, repeated, tag = "1")]
998    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
999    #[prost(string, optional, tag = "2")]
1000    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1001}
1002#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1003pub struct TimeRange {
1004    #[prost(message, optional, tag = "1")]
1005    pub range_start: ::core::option::Option<Timestamp>,
1006    #[prost(message, optional, tag = "2")]
1007    pub range_end: ::core::option::Option<Timestamp>,
1008}
1009#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1010pub struct Timestamp {
1011    #[prost(int32, tag = "1")]
1012    pub seconds: i32,
1013    #[prost(int32, tag = "2")]
1014    pub nanoseconds: i32,
1015}
1016/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
1017/// Each message type has its own structure and content.
1018#[derive(Clone, PartialEq, ::prost::Message)]
1019pub struct ModelMessage {
1020    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
1021    pub kind: ::core::option::Option<model_message::Kind>,
1022}
1023/// Nested message and enum types in `ModelMessage`.
1024pub mod model_message {
1025    #[derive(Clone, PartialEq, ::prost::Oneof)]
1026    pub enum Kind {
1027        #[prost(message, tag = "1")]
1028        User(super::UserModelMessage),
1029        #[prost(message, tag = "2")]
1030        Assistant(super::AssistantModelMessage),
1031    }
1032}
1033/// A user message containing text
1034#[derive(Clone, PartialEq, ::prost::Message)]
1035pub struct UserModelMessage {
1036    #[prost(message, repeated, tag = "1")]
1037    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
1038}
1039/// An assistant message containing text
1040#[derive(Clone, PartialEq, ::prost::Message)]
1041pub struct AssistantModelMessage {
1042    #[prost(message, repeated, tag = "1")]
1043    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
1044}
1045#[derive(Clone, PartialEq, ::prost::Message)]
1046pub struct UserContentPart {
1047    #[prost(oneof = "user_content_part::Part", tags = "1")]
1048    pub part: ::core::option::Option<user_content_part::Part>,
1049}
1050/// Nested message and enum types in `UserContentPart`.
1051pub mod user_content_part {
1052    #[derive(Clone, PartialEq, ::prost::Oneof)]
1053    pub enum Part {
1054        #[prost(message, tag = "1")]
1055        Text(super::TextPart),
1056    }
1057}
1058/// Content part for assistant messages: can be text, reasoning, or mutation.
1059#[derive(Clone, PartialEq, ::prost::Message)]
1060pub struct AssistantContentPart {
1061    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
1062    pub part: ::core::option::Option<assistant_content_part::Part>,
1063}
1064/// Nested message and enum types in `AssistantContentPart`.
1065pub mod assistant_content_part {
1066    #[derive(Clone, PartialEq, ::prost::Oneof)]
1067    pub enum Part {
1068        #[prost(message, tag = "1")]
1069        Text(super::TextPart),
1070        #[prost(message, tag = "2")]
1071        Reasoning(super::ReasoningPart),
1072    }
1073}
1074/// Text part for user or assistant messages.
1075#[derive(Clone, PartialEq, ::prost::Message)]
1076pub struct TextPart {
1077    #[prost(string, tag = "1")]
1078    pub text: ::prost::alloc::string::String,
1079}
1080/// User-supplied image part.
1081#[derive(Clone, PartialEq, ::prost::Message)]
1082pub struct ImagePart {
1083    /// The base64-encoded image data
1084    #[prost(bytes = "vec", tag = "1")]
1085    pub data: ::prost::alloc::vec::Vec<u8>,
1086    /// The media type of the image (e.g. "image/png", "image/jpeg")
1087    #[prost(string, optional, tag = "2")]
1088    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
1089    /// Optional: the filename of the image
1090    #[prost(string, optional, tag = "3")]
1091    pub filename: ::core::option::Option<::prost::alloc::string::String>,
1092}
1093/// Reasoning part for assistant messages.
1094#[derive(Clone, PartialEq, ::prost::Message)]
1095pub struct ReasoningPart {
1096    #[prost(string, tag = "1")]
1097    pub reasoning: ::prost::alloc::string::String,
1098}
1099/// StreamChatResponse is a discriminated union response to a StreamChatRequest
1100#[derive(Clone, PartialEq, ::prost::Message)]
1101pub struct StreamChatResponse {
1102    #[prost(
1103        oneof = "stream_chat_response::Response",
1104        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10"
1105    )]
1106    pub response: ::core::option::Option<stream_chat_response::Response>,
1107}
1108/// Nested message and enum types in `StreamChatResponse`.
1109pub mod stream_chat_response {
1110    #[derive(Clone, PartialEq, ::prost::Oneof)]
1111    pub enum Response {
1112        #[prost(message, tag = "1")]
1113        Finish(super::Finish),
1114        #[prost(message, tag = "2")]
1115        Error(super::Error),
1116        #[prost(message, tag = "3")]
1117        TextStart(super::TextStart),
1118        #[prost(message, tag = "4")]
1119        TextDelta(super::TextDelta),
1120        #[prost(message, tag = "5")]
1121        TextEnd(super::TextEnd),
1122        #[prost(message, tag = "6")]
1123        ReasoningStart(super::ReasoningStart),
1124        #[prost(message, tag = "7")]
1125        ReasoningDelta(super::ReasoningDelta),
1126        #[prost(message, tag = "8")]
1127        ReasoningEnd(super::ReasoningEnd),
1128        #[prost(message, tag = "10")]
1129        ToolAction(super::ToolAction),
1130    }
1131}
1132#[derive(Clone, PartialEq, ::prost::Message)]
1133pub struct ToolCallDescription {
1134    #[prost(string, tag = "1")]
1135    pub tool_call_id: ::prost::alloc::string::String,
1136    #[prost(string, tag = "2")]
1137    pub tool_name: ::prost::alloc::string::String,
1138    /// string representation of the proposed tool args for display
1139    #[prost(string, tag = "3")]
1140    pub tool_args_json_string: ::prost::alloc::string::String,
1141    /// used to conditionally render an approval button based the outcome of the tool call
1142    #[prost(enumeration = "ToolCallStatus", tag = "4")]
1143    pub status: i32,
1144}
1145/// Indicates the end of a chat session
1146#[derive(Clone, PartialEq, ::prost::Message)]
1147pub struct Finish {
1148    /// The message ids in order of all generated messages for this agent run
1149    /// These ids can be used to branch a message from that specific message
1150    #[prost(string, repeated, tag = "1")]
1151    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1152    /// In the case that this is the first agent run in a conversation thread, we also
1153    /// return the new conversation title generated
1154    #[prost(string, optional, tag = "2")]
1155    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
1156    #[prost(message, repeated, tag = "3")]
1157    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
1158}
1159/// An error that occurred during the chat session
1160#[derive(Clone, PartialEq, ::prost::Message)]
1161pub struct Error {
1162    #[prost(string, tag = "1")]
1163    pub message: ::prost::alloc::string::String,
1164}
1165/// Indicates the start of a text message from the agent
1166#[derive(Clone, PartialEq, ::prost::Message)]
1167pub struct TextStart {
1168    /// uniquely identifies the text message (e.g. uuid) so that the client can
1169    /// merge parallel message streams (if it happens).
1170    #[prost(string, tag = "1")]
1171    pub id: ::prost::alloc::string::String,
1172}
1173/// A delta (continuation) of a text message from the agent
1174#[derive(Clone, PartialEq, ::prost::Message)]
1175pub struct TextDelta {
1176    #[prost(string, tag = "1")]
1177    pub id: ::prost::alloc::string::String,
1178    /// The next chunk of text
1179    #[prost(string, tag = "2")]
1180    pub delta: ::prost::alloc::string::String,
1181}
1182/// Indicates the end of a text message from the agent
1183#[derive(Clone, PartialEq, ::prost::Message)]
1184pub struct TextEnd {
1185    #[prost(string, tag = "1")]
1186    pub id: ::prost::alloc::string::String,
1187}
1188/// Indicates the start of a reasoning message from the agent
1189#[derive(Clone, PartialEq, ::prost::Message)]
1190pub struct ReasoningStart {
1191    #[prost(string, tag = "1")]
1192    pub id: ::prost::alloc::string::String,
1193}
1194/// A delta (continuation) of a reasoning message from the agent
1195#[derive(Clone, PartialEq, ::prost::Message)]
1196pub struct ReasoningDelta {
1197    #[prost(string, tag = "1")]
1198    pub id: ::prost::alloc::string::String,
1199    /// The next chunk of reasoning
1200    #[prost(string, tag = "2")]
1201    pub delta: ::prost::alloc::string::String,
1202}
1203/// Indicates the end of a reasoning message from the agent
1204#[derive(Clone, PartialEq, ::prost::Message)]
1205pub struct ReasoningEnd {
1206    #[prost(string, tag = "1")]
1207    pub id: ::prost::alloc::string::String,
1208}
1209/// this is a concise description of a tool call that the agent is making internally
1210/// without revealing too much detail about the tool call, it informs the user what the agent is doing
1211/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
1212/// "Search channels for My Datasource"
1213#[derive(Clone, PartialEq, ::prost::Message)]
1214pub struct ToolAction {
1215    #[prost(string, tag = "1")]
1216    pub id: ::prost::alloc::string::String,
1217    /// "Thought", "Read", "Find", "Look-up", etc.
1218    #[prost(string, tag = "2")]
1219    pub tool_action_verb: ::prost::alloc::string::String,
1220    /// "workbook", "channel", "variable", "panel", etc.
1221    #[prost(string, optional, tag = "3")]
1222    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
1223}
1224#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1225#[repr(i32)]
1226pub enum ToolCallStatus {
1227    Unspecified = 0,
1228    Approved = 1,
1229    Denied = 2,
1230    AwaitingApproval = 3,
1231}
1232impl ToolCallStatus {
1233    /// String value of the enum field names used in the ProtoBuf definition.
1234    ///
1235    /// The values are not transformed in any way and thus are considered stable
1236    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1237    pub fn as_str_name(&self) -> &'static str {
1238        match self {
1239            Self::Unspecified => "TOOL_CALL_STATUS_UNSPECIFIED",
1240            Self::Approved => "TOOL_CALL_STATUS_APPROVED",
1241            Self::Denied => "TOOL_CALL_STATUS_DENIED",
1242            Self::AwaitingApproval => "TOOL_CALL_STATUS_AWAITING_APPROVAL",
1243        }
1244    }
1245    /// Creates an enum from field names used in the ProtoBuf definition.
1246    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1247        match value {
1248            "TOOL_CALL_STATUS_UNSPECIFIED" => Some(Self::Unspecified),
1249            "TOOL_CALL_STATUS_APPROVED" => Some(Self::Approved),
1250            "TOOL_CALL_STATUS_DENIED" => Some(Self::Denied),
1251            "TOOL_CALL_STATUS_AWAITING_APPROVAL" => Some(Self::AwaitingApproval),
1252            _ => None,
1253        }
1254    }
1255}
1256/// Generated client implementations.
1257pub mod ai_agent_service_client {
1258    #![allow(
1259        unused_variables,
1260        dead_code,
1261        missing_docs,
1262        clippy::wildcard_imports,
1263        clippy::let_unit_value,
1264    )]
1265    use tonic::codegen::*;
1266    use tonic::codegen::http::Uri;
1267    /// AIAgentService provides AI-powered assistance for general operations
1268    #[derive(Debug, Clone)]
1269    pub struct AiAgentServiceClient<T> {
1270        inner: tonic::client::Grpc<T>,
1271    }
1272    impl AiAgentServiceClient<tonic::transport::Channel> {
1273        /// Attempt to create a new client by connecting to a given endpoint.
1274        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1275        where
1276            D: TryInto<tonic::transport::Endpoint>,
1277            D::Error: Into<StdError>,
1278        {
1279            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1280            Ok(Self::new(conn))
1281        }
1282    }
1283    impl<T> AiAgentServiceClient<T>
1284    where
1285        T: tonic::client::GrpcService<tonic::body::Body>,
1286        T::Error: Into<StdError>,
1287        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1288        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1289    {
1290        pub fn new(inner: T) -> Self {
1291            let inner = tonic::client::Grpc::new(inner);
1292            Self { inner }
1293        }
1294        pub fn with_origin(inner: T, origin: Uri) -> Self {
1295            let inner = tonic::client::Grpc::with_origin(inner, origin);
1296            Self { inner }
1297        }
1298        pub fn with_interceptor<F>(
1299            inner: T,
1300            interceptor: F,
1301        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1302        where
1303            F: tonic::service::Interceptor,
1304            T::ResponseBody: Default,
1305            T: tonic::codegen::Service<
1306                http::Request<tonic::body::Body>,
1307                Response = http::Response<
1308                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1309                >,
1310            >,
1311            <T as tonic::codegen::Service<
1312                http::Request<tonic::body::Body>,
1313            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1314        {
1315            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1316        }
1317        /// Compress requests with the given encoding.
1318        ///
1319        /// This requires the server to support it otherwise it might respond with an
1320        /// error.
1321        #[must_use]
1322        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1323            self.inner = self.inner.send_compressed(encoding);
1324            self
1325        }
1326        /// Enable decompressing responses.
1327        #[must_use]
1328        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1329            self.inner = self.inner.accept_compressed(encoding);
1330            self
1331        }
1332        /// Limits the maximum size of a decoded message.
1333        ///
1334        /// Default: `4MB`
1335        #[must_use]
1336        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1337            self.inner = self.inner.max_decoding_message_size(limit);
1338            self
1339        }
1340        /// Limits the maximum size of an encoded message.
1341        ///
1342        /// Default: `usize::MAX`
1343        #[must_use]
1344        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1345            self.inner = self.inner.max_encoding_message_size(limit);
1346            self
1347        }
1348        /// StreamChat handles bidirectional streaming chat for AI agent
1349        pub async fn stream_chat(
1350            &mut self,
1351            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1352        ) -> std::result::Result<
1353            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1354            tonic::Status,
1355        > {
1356            self.inner
1357                .ready()
1358                .await
1359                .map_err(|e| {
1360                    tonic::Status::unknown(
1361                        format!("Service was not ready: {}", e.into()),
1362                    )
1363                })?;
1364            let codec = tonic::codec::ProstCodec::default();
1365            let path = http::uri::PathAndQuery::from_static(
1366                "/nominal.ai.v1.AIAgentService/StreamChat",
1367            );
1368            let mut req = request.into_request();
1369            req.extensions_mut()
1370                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1371            self.inner.server_streaming(req, path, codec).await
1372        }
1373        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
1374        pub async fn get_conversation(
1375            &mut self,
1376            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1377        ) -> std::result::Result<
1378            tonic::Response<super::GetConversationResponse>,
1379            tonic::Status,
1380        > {
1381            self.inner
1382                .ready()
1383                .await
1384                .map_err(|e| {
1385                    tonic::Status::unknown(
1386                        format!("Service was not ready: {}", e.into()),
1387                    )
1388                })?;
1389            let codec = tonic::codec::ProstCodec::default();
1390            let path = http::uri::PathAndQuery::from_static(
1391                "/nominal.ai.v1.AIAgentService/GetConversation",
1392            );
1393            let mut req = request.into_request();
1394            req.extensions_mut()
1395                .insert(
1396                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1397                );
1398            self.inner.unary(req, path, codec).await
1399        }
1400        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1401        pub async fn list_conversations(
1402            &mut self,
1403            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1404        ) -> std::result::Result<
1405            tonic::Response<super::ListConversationsResponse>,
1406            tonic::Status,
1407        > {
1408            self.inner
1409                .ready()
1410                .await
1411                .map_err(|e| {
1412                    tonic::Status::unknown(
1413                        format!("Service was not ready: {}", e.into()),
1414                    )
1415                })?;
1416            let codec = tonic::codec::ProstCodec::default();
1417            let path = http::uri::PathAndQuery::from_static(
1418                "/nominal.ai.v1.AIAgentService/ListConversations",
1419            );
1420            let mut req = request.into_request();
1421            req.extensions_mut()
1422                .insert(
1423                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1424                );
1425            self.inner.unary(req, path, codec).await
1426        }
1427        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1428        pub async fn create_conversation(
1429            &mut self,
1430            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1431        ) -> std::result::Result<
1432            tonic::Response<super::CreateConversationResponse>,
1433            tonic::Status,
1434        > {
1435            self.inner
1436                .ready()
1437                .await
1438                .map_err(|e| {
1439                    tonic::Status::unknown(
1440                        format!("Service was not ready: {}", e.into()),
1441                    )
1442                })?;
1443            let codec = tonic::codec::ProstCodec::default();
1444            let path = http::uri::PathAndQuery::from_static(
1445                "/nominal.ai.v1.AIAgentService/CreateConversation",
1446            );
1447            let mut req = request.into_request();
1448            req.extensions_mut()
1449                .insert(
1450                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1451                );
1452            self.inner.unary(req, path, codec).await
1453        }
1454        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1455        pub async fn update_conversation_metadata(
1456            &mut self,
1457            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1458        ) -> std::result::Result<
1459            tonic::Response<super::UpdateConversationMetadataResponse>,
1460            tonic::Status,
1461        > {
1462            self.inner
1463                .ready()
1464                .await
1465                .map_err(|e| {
1466                    tonic::Status::unknown(
1467                        format!("Service was not ready: {}", e.into()),
1468                    )
1469                })?;
1470            let codec = tonic::codec::ProstCodec::default();
1471            let path = http::uri::PathAndQuery::from_static(
1472                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1473            );
1474            let mut req = request.into_request();
1475            req.extensions_mut()
1476                .insert(
1477                    GrpcMethod::new(
1478                        "nominal.ai.v1.AIAgentService",
1479                        "UpdateConversationMetadata",
1480                    ),
1481                );
1482            self.inner.unary(req, path, codec).await
1483        }
1484        /// DeleteConversation handles deleting a specific conversation by conversation rid
1485        pub async fn delete_conversation(
1486            &mut self,
1487            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1488        ) -> std::result::Result<
1489            tonic::Response<super::DeleteConversationResponse>,
1490            tonic::Status,
1491        > {
1492            self.inner
1493                .ready()
1494                .await
1495                .map_err(|e| {
1496                    tonic::Status::unknown(
1497                        format!("Service was not ready: {}", e.into()),
1498                    )
1499                })?;
1500            let codec = tonic::codec::ProstCodec::default();
1501            let path = http::uri::PathAndQuery::from_static(
1502                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1503            );
1504            let mut req = request.into_request();
1505            req.extensions_mut()
1506                .insert(
1507                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1508                );
1509            self.inner.unary(req, path, codec).await
1510        }
1511        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1512        pub async fn get_snapshot_rid_by_user_message_id(
1513            &mut self,
1514            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1515        ) -> std::result::Result<
1516            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1517            tonic::Status,
1518        > {
1519            self.inner
1520                .ready()
1521                .await
1522                .map_err(|e| {
1523                    tonic::Status::unknown(
1524                        format!("Service was not ready: {}", e.into()),
1525                    )
1526                })?;
1527            let codec = tonic::codec::ProstCodec::default();
1528            let path = http::uri::PathAndQuery::from_static(
1529                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1530            );
1531            let mut req = request.into_request();
1532            req.extensions_mut()
1533                .insert(
1534                    GrpcMethod::new(
1535                        "nominal.ai.v1.AIAgentService",
1536                        "GetSnapshotRidByUserMessageId",
1537                    ),
1538                );
1539            self.inner.unary(req, path, codec).await
1540        }
1541    }
1542}