Skip to main content

nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct ClassifyErrorRequest {
4    /// The error message or description to classify
5    #[prost(string, tag = "1")]
6    pub error_message: ::prost::alloc::string::String,
7}
8#[derive(Clone, PartialEq, ::prost::Message)]
9pub struct ClassifyErrorResponse {
10    /// The classification result
11    #[prost(enumeration = "ErrorClassification", tag = "1")]
12    pub classification: i32,
13    /// Explanation for why this classification was chosen
14    #[prost(string, tag = "2")]
15    pub reason: ::prost::alloc::string::String,
16}
17#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
18#[repr(i32)]
19pub enum ErrorClassification {
20    Unspecified = 0,
21    Client = 1,
22    Server = 2,
23}
24impl ErrorClassification {
25    /// String value of the enum field names used in the ProtoBuf definition.
26    ///
27    /// The values are not transformed in any way and thus are considered stable
28    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
29    pub fn as_str_name(&self) -> &'static str {
30        match self {
31            Self::Unspecified => "ERROR_CLASSIFICATION_UNSPECIFIED",
32            Self::Client => "ERROR_CLASSIFICATION_CLIENT",
33            Self::Server => "ERROR_CLASSIFICATION_SERVER",
34        }
35    }
36    /// Creates an enum from field names used in the ProtoBuf definition.
37    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
38        match value {
39            "ERROR_CLASSIFICATION_UNSPECIFIED" => Some(Self::Unspecified),
40            "ERROR_CLASSIFICATION_CLIENT" => Some(Self::Client),
41            "ERROR_CLASSIFICATION_SERVER" => Some(Self::Server),
42            _ => None,
43        }
44    }
45}
46/// Generated client implementations.
47pub mod data_ingestion_error_classifier_service_client {
48    #![allow(
49        unused_variables,
50        dead_code,
51        missing_docs,
52        clippy::wildcard_imports,
53        clippy::let_unit_value,
54    )]
55    use tonic::codegen::*;
56    use tonic::codegen::http::Uri;
57    /// DataIngestionErrorClassifierService classifies data ingestion errors as CLIENT or SERVER issues.
58    /// CLIENT errors are customer data issues (empty files, schema conflicts, timestamp problems, etc.)
59    /// SERVER errors are infrastructure issues (internal errors, timeouts, capacity limits, etc.)
60    #[derive(Debug, Clone)]
61    pub struct DataIngestionErrorClassifierServiceClient<T> {
62        inner: tonic::client::Grpc<T>,
63    }
64    impl DataIngestionErrorClassifierServiceClient<tonic::transport::Channel> {
65        /// Attempt to create a new client by connecting to a given endpoint.
66        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
67        where
68            D: TryInto<tonic::transport::Endpoint>,
69            D::Error: Into<StdError>,
70        {
71            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
72            Ok(Self::new(conn))
73        }
74    }
75    impl<T> DataIngestionErrorClassifierServiceClient<T>
76    where
77        T: tonic::client::GrpcService<tonic::body::Body>,
78        T::Error: Into<StdError>,
79        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
80        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
81    {
82        pub fn new(inner: T) -> Self {
83            let inner = tonic::client::Grpc::new(inner);
84            Self { inner }
85        }
86        pub fn with_origin(inner: T, origin: Uri) -> Self {
87            let inner = tonic::client::Grpc::with_origin(inner, origin);
88            Self { inner }
89        }
90        pub fn with_interceptor<F>(
91            inner: T,
92            interceptor: F,
93        ) -> DataIngestionErrorClassifierServiceClient<InterceptedService<T, F>>
94        where
95            F: tonic::service::Interceptor,
96            T::ResponseBody: Default,
97            T: tonic::codegen::Service<
98                http::Request<tonic::body::Body>,
99                Response = http::Response<
100                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
101                >,
102            >,
103            <T as tonic::codegen::Service<
104                http::Request<tonic::body::Body>,
105            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
106        {
107            DataIngestionErrorClassifierServiceClient::new(
108                InterceptedService::new(inner, interceptor),
109            )
110        }
111        /// Compress requests with the given encoding.
112        ///
113        /// This requires the server to support it otherwise it might respond with an
114        /// error.
115        #[must_use]
116        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
117            self.inner = self.inner.send_compressed(encoding);
118            self
119        }
120        /// Enable decompressing responses.
121        #[must_use]
122        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
123            self.inner = self.inner.accept_compressed(encoding);
124            self
125        }
126        /// Limits the maximum size of a decoded message.
127        ///
128        /// Default: `4MB`
129        #[must_use]
130        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
131            self.inner = self.inner.max_decoding_message_size(limit);
132            self
133        }
134        /// Limits the maximum size of an encoded message.
135        ///
136        /// Default: `usize::MAX`
137        #[must_use]
138        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
139            self.inner = self.inner.max_encoding_message_size(limit);
140            self
141        }
142        /// ClassifyError analyzes an error message and classifies it as CLIENT or SERVER.
143        pub async fn classify_error(
144            &mut self,
145            request: impl tonic::IntoRequest<super::ClassifyErrorRequest>,
146        ) -> std::result::Result<
147            tonic::Response<super::ClassifyErrorResponse>,
148            tonic::Status,
149        > {
150            self.inner
151                .ready()
152                .await
153                .map_err(|e| {
154                    tonic::Status::unknown(
155                        format!("Service was not ready: {}", e.into()),
156                    )
157                })?;
158            let codec = tonic::codec::ProstCodec::default();
159            let path = http::uri::PathAndQuery::from_static(
160                "/nominal.ai.v1.DataIngestionErrorClassifierService/ClassifyError",
161            );
162            let mut req = request.into_request();
163            req.extensions_mut()
164                .insert(
165                    GrpcMethod::new(
166                        "nominal.ai.v1.DataIngestionErrorClassifierService",
167                        "ClassifyError",
168                    ),
169                );
170            self.inner.unary(req, path, codec).await
171        }
172    }
173}
174#[derive(Clone, Copy, PartialEq, ::prost::Message)]
175pub struct GetProviderStatusRequest {}
176#[derive(Clone, Copy, PartialEq, ::prost::Message)]
177pub struct GetProviderStatusResponse {
178    /// Timestamp when the last status was determined
179    #[prost(message, optional, tag = "1")]
180    pub timestamp: ::core::option::Option<
181        super::super::super::google::protobuf::Timestamp,
182    >,
183    /// Status of the most recent health check probe
184    #[prost(message, optional, tag = "2")]
185    pub last_status: ::core::option::Option<ProviderStatus>,
186    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
187    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
188    #[deprecated]
189    #[prost(message, optional, tag = "3")]
190    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
191    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
192    #[prost(message, optional, tag = "4")]
193    pub aggregated_status: ::core::option::Option<ProviderStatus>,
194}
195#[derive(Clone, Copy, PartialEq, ::prost::Message)]
196pub struct ProviderStatus {
197    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
198    pub status: ::core::option::Option<provider_status::Status>,
199}
200/// Nested message and enum types in `ProviderStatus`.
201pub mod provider_status {
202    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
203    pub enum Status {
204        #[prost(message, tag = "1")]
205        Healthy(super::Healthy),
206        #[prost(message, tag = "2")]
207        Degraded(super::Degraded),
208    }
209}
210#[derive(Clone, Copy, PartialEq, ::prost::Message)]
211pub struct Healthy {}
212#[derive(Clone, Copy, PartialEq, ::prost::Message)]
213pub struct Degraded {
214    #[prost(enumeration = "DegradationReason", tag = "1")]
215    pub reason: i32,
216}
217#[derive(Clone, Copy, PartialEq, ::prost::Message)]
218pub struct ProviderMetrics {
219    #[prost(int32, tag = "1")]
220    pub time_to_first_token_ms: i32,
221    #[prost(int32, tag = "2")]
222    pub total_time_ms: i32,
223}
224#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
225#[repr(i32)]
226pub enum DegradationReason {
227    Unspecified = 0,
228    HighLatency = 1,
229    Failures = 2,
230    HighLatencyAndFailures = 3,
231}
232impl DegradationReason {
233    /// String value of the enum field names used in the ProtoBuf definition.
234    ///
235    /// The values are not transformed in any way and thus are considered stable
236    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
237    pub fn as_str_name(&self) -> &'static str {
238        match self {
239            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
240            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
241            Self::Failures => "DEGRADATION_REASON_FAILURES",
242            Self::HighLatencyAndFailures => {
243                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
244            }
245        }
246    }
247    /// Creates an enum from field names used in the ProtoBuf definition.
248    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
249        match value {
250            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
251            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
252            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
253            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
254                Some(Self::HighLatencyAndFailures)
255            }
256            _ => None,
257        }
258    }
259}
260/// Generated client implementations.
261pub mod model_provider_health_service_client {
262    #![allow(
263        unused_variables,
264        dead_code,
265        missing_docs,
266        clippy::wildcard_imports,
267        clippy::let_unit_value,
268    )]
269    use tonic::codegen::*;
270    use tonic::codegen::http::Uri;
271    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
272    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
273    /// independent of the complexity of user prompts.
274    #[derive(Debug, Clone)]
275    pub struct ModelProviderHealthServiceClient<T> {
276        inner: tonic::client::Grpc<T>,
277    }
278    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
279        /// Attempt to create a new client by connecting to a given endpoint.
280        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
281        where
282            D: TryInto<tonic::transport::Endpoint>,
283            D::Error: Into<StdError>,
284        {
285            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
286            Ok(Self::new(conn))
287        }
288    }
289    impl<T> ModelProviderHealthServiceClient<T>
290    where
291        T: tonic::client::GrpcService<tonic::body::Body>,
292        T::Error: Into<StdError>,
293        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
294        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
295    {
296        pub fn new(inner: T) -> Self {
297            let inner = tonic::client::Grpc::new(inner);
298            Self { inner }
299        }
300        pub fn with_origin(inner: T, origin: Uri) -> Self {
301            let inner = tonic::client::Grpc::with_origin(inner, origin);
302            Self { inner }
303        }
304        pub fn with_interceptor<F>(
305            inner: T,
306            interceptor: F,
307        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
308        where
309            F: tonic::service::Interceptor,
310            T::ResponseBody: Default,
311            T: tonic::codegen::Service<
312                http::Request<tonic::body::Body>,
313                Response = http::Response<
314                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
315                >,
316            >,
317            <T as tonic::codegen::Service<
318                http::Request<tonic::body::Body>,
319            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
320        {
321            ModelProviderHealthServiceClient::new(
322                InterceptedService::new(inner, interceptor),
323            )
324        }
325        /// Compress requests with the given encoding.
326        ///
327        /// This requires the server to support it otherwise it might respond with an
328        /// error.
329        #[must_use]
330        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
331            self.inner = self.inner.send_compressed(encoding);
332            self
333        }
334        /// Enable decompressing responses.
335        #[must_use]
336        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
337            self.inner = self.inner.accept_compressed(encoding);
338            self
339        }
340        /// Limits the maximum size of a decoded message.
341        ///
342        /// Default: `4MB`
343        #[must_use]
344        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
345            self.inner = self.inner.max_decoding_message_size(limit);
346            self
347        }
348        /// Limits the maximum size of an encoded message.
349        ///
350        /// Default: `usize::MAX`
351        #[must_use]
352        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
353            self.inner = self.inner.max_encoding_message_size(limit);
354            self
355        }
356        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
357        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
358        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
359        pub async fn get_provider_status(
360            &mut self,
361            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
362        ) -> std::result::Result<
363            tonic::Response<super::GetProviderStatusResponse>,
364            tonic::Status,
365        > {
366            self.inner
367                .ready()
368                .await
369                .map_err(|e| {
370                    tonic::Status::unknown(
371                        format!("Service was not ready: {}", e.into()),
372                    )
373                })?;
374            let codec = tonic::codec::ProstCodec::default();
375            let path = http::uri::PathAndQuery::from_static(
376                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
377            );
378            let mut req = request.into_request();
379            req.extensions_mut()
380                .insert(
381                    GrpcMethod::new(
382                        "nominal.ai.v1.ModelProviderHealthService",
383                        "GetProviderStatus",
384                    ),
385                );
386            self.inner.unary(req, path, codec).await
387        }
388    }
389}
390/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
391/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct CreateOrUpdateKnowledgeBaseRequest {
394    #[prost(string, tag = "1")]
395    pub attachment_rid: ::prost::alloc::string::String,
396    /// summary of the knowledge base, will be used by the LLM to decide when to use it
397    #[prost(string, tag = "2")]
398    pub summary_description: ::prost::alloc::string::String,
399    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
400    pub r#type: ::core::option::Option<i32>,
401}
402/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
403#[derive(Clone, PartialEq, ::prost::Message)]
404pub struct CreateOrUpdateKnowledgeBaseResponse {
405    #[prost(string, tag = "1")]
406    pub knowledge_base_rid: ::prost::alloc::string::String,
407}
408/// KnowledgeBase represents a knowledge base entry
409#[derive(Clone, PartialEq, ::prost::Message)]
410pub struct KnowledgeBase {
411    #[prost(string, tag = "1")]
412    pub knowledge_base_rid: ::prost::alloc::string::String,
413    #[prost(string, tag = "2")]
414    pub attachment_rid: ::prost::alloc::string::String,
415    #[prost(string, tag = "3")]
416    pub workspace_rid: ::prost::alloc::string::String,
417    #[prost(string, tag = "4")]
418    pub summary_description: ::prost::alloc::string::String,
419    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
420    pub r#type: i32,
421    #[prost(int32, tag = "6")]
422    pub version: i32,
423}
424#[derive(Clone, PartialEq, ::prost::Message)]
425pub struct ListRequest {
426    #[prost(string, tag = "1")]
427    pub workspace_rid: ::prost::alloc::string::String,
428}
429#[derive(Clone, PartialEq, ::prost::Message)]
430pub struct ListResponse {
431    #[prost(message, repeated, tag = "1")]
432    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
433}
434#[derive(Clone, PartialEq, ::prost::Message)]
435pub struct DeleteRequest {
436    #[prost(string, tag = "1")]
437    pub knowledge_base_rid: ::prost::alloc::string::String,
438}
439#[derive(Clone, Copy, PartialEq, ::prost::Message)]
440pub struct DeleteResponse {
441    #[prost(bool, tag = "1")]
442    pub success: bool,
443}
444#[derive(Clone, PartialEq, ::prost::Message)]
445pub struct GetBatchRequest {
446    #[prost(string, repeated, tag = "1")]
447    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
448}
449#[derive(Clone, PartialEq, ::prost::Message)]
450pub struct GetBatchResponse {
451    #[prost(message, repeated, tag = "1")]
452    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
453}
454/// generate summary description is intentionally going to return the generated description to the frontend
455/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
456#[derive(Clone, PartialEq, ::prost::Message)]
457pub struct GenerateSummaryDescriptionRequest {
458    #[prost(string, tag = "1")]
459    pub attachment_rid: ::prost::alloc::string::String,
460}
461#[derive(Clone, PartialEq, ::prost::Message)]
462pub struct GenerateSummaryDescriptionResponse {
463    #[prost(string, tag = "1")]
464    pub summary_description: ::prost::alloc::string::String,
465}
466/// KnowledgeBaseType defines the types of knowledge base
467#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
468#[repr(i32)]
469pub enum KnowledgeBaseType {
470    /// defaults to PROMPT
471    Unspecified = 0,
472    /// knowledge base gets added directly to prompt (needs to be small enough!)
473    Prompt = 1,
474    /// knowledge base gets used via vector search on embeddings
475    Embedding = 2,
476}
477impl KnowledgeBaseType {
478    /// String value of the enum field names used in the ProtoBuf definition.
479    ///
480    /// The values are not transformed in any way and thus are considered stable
481    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
482    pub fn as_str_name(&self) -> &'static str {
483        match self {
484            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
485            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
486            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
487        }
488    }
489    /// Creates an enum from field names used in the ProtoBuf definition.
490    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
491        match value {
492            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
493            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
494            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
495            _ => None,
496        }
497    }
498}
499/// Generated client implementations.
500pub mod knowledge_base_service_client {
501    #![allow(
502        unused_variables,
503        dead_code,
504        missing_docs,
505        clippy::wildcard_imports,
506        clippy::let_unit_value,
507    )]
508    use tonic::codegen::*;
509    use tonic::codegen::http::Uri;
510    /// KnowledgeBaseService provides AI-powered knowledge base management
511    #[derive(Debug, Clone)]
512    pub struct KnowledgeBaseServiceClient<T> {
513        inner: tonic::client::Grpc<T>,
514    }
515    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
516        /// Attempt to create a new client by connecting to a given endpoint.
517        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
518        where
519            D: TryInto<tonic::transport::Endpoint>,
520            D::Error: Into<StdError>,
521        {
522            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
523            Ok(Self::new(conn))
524        }
525    }
526    impl<T> KnowledgeBaseServiceClient<T>
527    where
528        T: tonic::client::GrpcService<tonic::body::Body>,
529        T::Error: Into<StdError>,
530        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
531        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
532    {
533        pub fn new(inner: T) -> Self {
534            let inner = tonic::client::Grpc::new(inner);
535            Self { inner }
536        }
537        pub fn with_origin(inner: T, origin: Uri) -> Self {
538            let inner = tonic::client::Grpc::with_origin(inner, origin);
539            Self { inner }
540        }
541        pub fn with_interceptor<F>(
542            inner: T,
543            interceptor: F,
544        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
545        where
546            F: tonic::service::Interceptor,
547            T::ResponseBody: Default,
548            T: tonic::codegen::Service<
549                http::Request<tonic::body::Body>,
550                Response = http::Response<
551                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
552                >,
553            >,
554            <T as tonic::codegen::Service<
555                http::Request<tonic::body::Body>,
556            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
557        {
558            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
559        }
560        /// Compress requests with the given encoding.
561        ///
562        /// This requires the server to support it otherwise it might respond with an
563        /// error.
564        #[must_use]
565        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
566            self.inner = self.inner.send_compressed(encoding);
567            self
568        }
569        /// Enable decompressing responses.
570        #[must_use]
571        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
572            self.inner = self.inner.accept_compressed(encoding);
573            self
574        }
575        /// Limits the maximum size of a decoded message.
576        ///
577        /// Default: `4MB`
578        #[must_use]
579        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
580            self.inner = self.inner.max_decoding_message_size(limit);
581            self
582        }
583        /// Limits the maximum size of an encoded message.
584        ///
585        /// Default: `usize::MAX`
586        #[must_use]
587        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
588            self.inner = self.inner.max_encoding_message_size(limit);
589            self
590        }
591        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
592        pub async fn create_or_update_knowledge_base(
593            &mut self,
594            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
595        ) -> std::result::Result<
596            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
597            tonic::Status,
598        > {
599            self.inner
600                .ready()
601                .await
602                .map_err(|e| {
603                    tonic::Status::unknown(
604                        format!("Service was not ready: {}", e.into()),
605                    )
606                })?;
607            let codec = tonic::codec::ProstCodec::default();
608            let path = http::uri::PathAndQuery::from_static(
609                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
610            );
611            let mut req = request.into_request();
612            req.extensions_mut()
613                .insert(
614                    GrpcMethod::new(
615                        "nominal.ai.v1.KnowledgeBaseService",
616                        "CreateOrUpdateKnowledgeBase",
617                    ),
618                );
619            self.inner.unary(req, path, codec).await
620        }
621        /// List returns all knowledge bases in the specified workspace
622        pub async fn list(
623            &mut self,
624            request: impl tonic::IntoRequest<super::ListRequest>,
625        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
626            self.inner
627                .ready()
628                .await
629                .map_err(|e| {
630                    tonic::Status::unknown(
631                        format!("Service was not ready: {}", e.into()),
632                    )
633                })?;
634            let codec = tonic::codec::ProstCodec::default();
635            let path = http::uri::PathAndQuery::from_static(
636                "/nominal.ai.v1.KnowledgeBaseService/List",
637            );
638            let mut req = request.into_request();
639            req.extensions_mut()
640                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
641            self.inner.unary(req, path, codec).await
642        }
643        /// Delete removes a knowledge base by its RID
644        pub async fn delete(
645            &mut self,
646            request: impl tonic::IntoRequest<super::DeleteRequest>,
647        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
648            self.inner
649                .ready()
650                .await
651                .map_err(|e| {
652                    tonic::Status::unknown(
653                        format!("Service was not ready: {}", e.into()),
654                    )
655                })?;
656            let codec = tonic::codec::ProstCodec::default();
657            let path = http::uri::PathAndQuery::from_static(
658                "/nominal.ai.v1.KnowledgeBaseService/Delete",
659            );
660            let mut req = request.into_request();
661            req.extensions_mut()
662                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
663            self.inner.unary(req, path, codec).await
664        }
665        /// GetBatch retrieves multiple knowledge bases by their RIDs
666        pub async fn get_batch(
667            &mut self,
668            request: impl tonic::IntoRequest<super::GetBatchRequest>,
669        ) -> std::result::Result<
670            tonic::Response<super::GetBatchResponse>,
671            tonic::Status,
672        > {
673            self.inner
674                .ready()
675                .await
676                .map_err(|e| {
677                    tonic::Status::unknown(
678                        format!("Service was not ready: {}", e.into()),
679                    )
680                })?;
681            let codec = tonic::codec::ProstCodec::default();
682            let path = http::uri::PathAndQuery::from_static(
683                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
684            );
685            let mut req = request.into_request();
686            req.extensions_mut()
687                .insert(
688                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
689                );
690            self.inner.unary(req, path, codec).await
691        }
692        /// GenerateSummaryDescription generates a summary description for an attachment rid
693        pub async fn generate_summary_description(
694            &mut self,
695            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
696        ) -> std::result::Result<
697            tonic::Response<super::GenerateSummaryDescriptionResponse>,
698            tonic::Status,
699        > {
700            self.inner
701                .ready()
702                .await
703                .map_err(|e| {
704                    tonic::Status::unknown(
705                        format!("Service was not ready: {}", e.into()),
706                    )
707                })?;
708            let codec = tonic::codec::ProstCodec::default();
709            let path = http::uri::PathAndQuery::from_static(
710                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
711            );
712            let mut req = request.into_request();
713            req.extensions_mut()
714                .insert(
715                    GrpcMethod::new(
716                        "nominal.ai.v1.KnowledgeBaseService",
717                        "GenerateSummaryDescription",
718                    ),
719                );
720            self.inner.unary(req, path, codec).await
721        }
722    }
723}
724#[derive(Clone, PartialEq, ::prost::Message)]
725pub struct GetSnapshotRidByUserMessageIdRequest {
726    #[prost(string, tag = "1")]
727    pub conversation_rid: ::prost::alloc::string::String,
728    #[prost(string, tag = "2")]
729    pub message_id: ::prost::alloc::string::String,
730}
731/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
732/// This occurs in the instance where a message was sent in a non-workbook context
733#[derive(Clone, PartialEq, ::prost::Message)]
734pub struct GetSnapshotRidByUserMessageIdResponse {
735    #[prost(string, optional, tag = "1")]
736    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
737}
738/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
739#[derive(Clone, Copy, PartialEq, ::prost::Message)]
740pub struct ReadOnlyMode {}
741/// EditMode configures edit mode where all tools are available
742#[derive(Clone, Copy, PartialEq, ::prost::Message)]
743pub struct EditMode {
744    /// when set to true, we auto accept edits for any tools typically requiring approval
745    #[prost(bool, optional, tag = "1")]
746    pub auto_accept: ::core::option::Option<bool>,
747}
748/// ConversationMode specifies the mode of the conversation
749#[derive(Clone, Copy, PartialEq, ::prost::Message)]
750pub struct ConversationMode {
751    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
752    pub mode: ::core::option::Option<conversation_mode::Mode>,
753}
754/// Nested message and enum types in `ConversationMode`.
755pub mod conversation_mode {
756    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
757    pub enum Mode {
758        #[prost(message, tag = "1")]
759        ReadOnly(super::ReadOnlyMode),
760        #[prost(message, tag = "2")]
761        Edit(super::EditMode),
762    }
763}
764/// When the agent makes a request to use a tool, the user responds
765/// with one of these for every request - mapping a tool id to its approval/denial
766#[derive(Clone, PartialEq, ::prost::Message)]
767pub struct ToolApprovalResult {
768    /// identifies the tool call
769    #[prost(string, tag = "1")]
770    pub tool_call_id: ::prost::alloc::string::String,
771    #[prost(oneof = "tool_approval_result::Response", tags = "2, 3")]
772    pub response: ::core::option::Option<tool_approval_result::Response>,
773}
774/// Nested message and enum types in `ToolApprovalResult`.
775pub mod tool_approval_result {
776    #[derive(Clone, PartialEq, ::prost::Oneof)]
777    pub enum Response {
778        #[prost(message, tag = "2")]
779        Approved(super::ToolApprovedResponse),
780        #[prost(message, tag = "3")]
781        Denied(super::ToolDeniedResponse),
782    }
783}
784#[derive(Clone, PartialEq, ::prost::Message)]
785pub struct ToolApprovedResponse {
786    /// json string representation of the override argument if the user
787    /// needs it to be changed in some way
788    #[prost(string, optional, tag = "1")]
789    pub override_args: ::core::option::Option<::prost::alloc::string::String>,
790}
791#[derive(Clone, PartialEq, ::prost::Message)]
792pub struct ToolDeniedResponse {
793    #[prost(string, tag = "2")]
794    pub denial_reason: ::prost::alloc::string::String,
795}
796/// RetryRequest retries the last request (e.g., if it was interrupted/failed part-way through)
797#[derive(Clone, Copy, PartialEq, ::prost::Message)]
798pub struct RetryRequest {}
799/// UserPromptRequest contains a new user message
800#[derive(Clone, PartialEq, ::prost::Message)]
801pub struct UserPromptRequest {
802    #[prost(message, optional, tag = "1")]
803    pub message: ::core::option::Option<UserModelMessage>,
804    /// Optional: image files to provide to the agent
805    #[prost(message, repeated, tag = "2")]
806    pub images: ::prost::alloc::vec::Vec<ImagePart>,
807}
808/// ToolApprovalRequest contains tool approval results
809#[derive(Clone, PartialEq, ::prost::Message)]
810pub struct ToolApprovalRequest {
811    #[prost(message, repeated, tag = "1")]
812    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
813}
814/// StreamChatRequest is a request to stream chat messages for AI agent.
815#[derive(Clone, PartialEq, ::prost::Message)]
816pub struct StreamChatRequest {
817    /// The conversation ID
818    #[prost(string, tag = "1")]
819    pub conversation_rid: ::prost::alloc::string::String,
820    /// DEPRECATED: use request_type.user_prompt.message instead
821    #[deprecated]
822    #[prost(message, optional, tag = "2")]
823    pub message: ::core::option::Option<UserModelMessage>,
824    /// DEPRECATED: use request_type.user_prompt.images instead
825    #[deprecated]
826    #[prost(message, repeated, tag = "3")]
827    pub images: ::prost::alloc::vec::Vec<ImagePart>,
828    /// DEPRECATED: use request_type.tool_approval instead
829    #[deprecated]
830    #[prost(message, repeated, tag = "6")]
831    pub tool_approvals: ::prost::alloc::vec::Vec<ToolApprovalResult>,
832    /// The type of request - exactly one should be set
833    #[prost(oneof = "stream_chat_request::RequestType", tags = "7, 8, 9")]
834    pub request_type: ::core::option::Option<stream_chat_request::RequestType>,
835    /// Context-specific fields based on the oneofKind.
836    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5, 10")]
837    pub context: ::core::option::Option<stream_chat_request::Context>,
838}
839/// Nested message and enum types in `StreamChatRequest`.
840pub mod stream_chat_request {
841    /// The type of request - exactly one should be set
842    #[derive(Clone, PartialEq, ::prost::Oneof)]
843    pub enum RequestType {
844        #[prost(message, tag = "7")]
845        Retry(super::RetryRequest),
846        #[prost(message, tag = "8")]
847        UserPrompt(super::UserPromptRequest),
848        #[prost(message, tag = "9")]
849        ToolApproval(super::ToolApprovalRequest),
850    }
851    /// Context-specific fields based on the oneofKind.
852    #[derive(Clone, PartialEq, ::prost::Oneof)]
853    pub enum Context {
854        #[prost(message, tag = "4")]
855        Workbook(super::WorkbookContext),
856        #[prost(message, tag = "5")]
857        Global(super::GlobalContext),
858        #[prost(message, tag = "10")]
859        Checklist(super::ChecklistContext),
860    }
861}
862/// WorkbookContext contains workbook-specific context fields
863#[derive(Clone, PartialEq, ::prost::Message)]
864pub struct WorkbookContext {
865    /// RID of the workbook to use for context
866    #[prost(string, tag = "1")]
867    pub workbook_rid: ::prost::alloc::string::String,
868    /// The user's presence in the workbook
869    #[prost(message, optional, tag = "2")]
870    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
871}
872/// ChecklistContext for use when the agent is being messaged in the context of a
873/// checklist currently being edited
874#[derive(Clone, PartialEq, ::prost::Message)]
875pub struct ChecklistContext {
876    /// RID of the checklist being edited
877    #[prost(string, tag = "1")]
878    pub checklist_rid: ::prost::alloc::string::String,
879    /// Name of the branch that this edit is being done on
880    /// This is equivalent to the 'draft name' for a check being edited in the UI
881    #[prost(string, tag = "2")]
882    pub branch_name: ::prost::alloc::string::String,
883    /// A checklist opened in edit mode will always have some resource by which it
884    /// is referencing for viewing. It can either be an asset or a run
885    #[prost(oneof = "checklist_context::ReferenceRid", tags = "3, 4")]
886    pub reference_rid: ::core::option::Option<checklist_context::ReferenceRid>,
887}
888/// Nested message and enum types in `ChecklistContext`.
889pub mod checklist_context {
890    /// A checklist opened in edit mode will always have some resource by which it
891    /// is referencing for viewing. It can either be an asset or a run
892    #[derive(Clone, PartialEq, ::prost::Oneof)]
893    pub enum ReferenceRid {
894        #[prost(string, tag = "3")]
895        Asset(::prost::alloc::string::String),
896        #[prost(string, tag = "4")]
897        Run(::prost::alloc::string::String),
898    }
899}
900/// DefaultContext (no context)
901#[derive(Clone, Copy, PartialEq, ::prost::Message)]
902pub struct GlobalContext {}
903/// WorkbookUserPresence contains the user's presence in the workbook
904/// which is used to describe what the user is viewing at the time of the message.
905#[derive(Clone, Copy, PartialEq, ::prost::Message)]
906pub struct WorkbookUserPresence {
907    #[prost(int32, tag = "1")]
908    pub tab_index: i32,
909    #[prost(message, optional, tag = "2")]
910    pub range: ::core::option::Option<TimeRange>,
911}
912/// CreateConversation request will create a new conversation thread
913/// if old conversation id is not set, a brand new, clear chat is created
914/// If old conversation id is set without a previous message id, the full conversation thread will be copied
915/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
916/// the above case is useful for branching a conversation into a new thread
917#[derive(Clone, PartialEq, ::prost::Message)]
918pub struct CreateConversationRequest {
919    #[prost(string, tag = "1")]
920    pub title: ::prost::alloc::string::String,
921    #[prost(string, tag = "2")]
922    pub workspace_rid: ::prost::alloc::string::String,
923    #[prost(string, optional, tag = "3")]
924    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
925    #[prost(string, optional, tag = "4")]
926    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
927    #[prost(message, optional, tag = "5")]
928    pub conversation_mode: ::core::option::Option<ConversationMode>,
929}
930/// CreateConversationResponse will return the conversation id for the new conversation
931#[derive(Clone, PartialEq, ::prost::Message)]
932pub struct CreateConversationResponse {
933    #[prost(string, tag = "1")]
934    pub new_conversation_rid: ::prost::alloc::string::String,
935}
936/// Updates the fields if specified (optional means no change for that field)
937#[derive(Clone, PartialEq, ::prost::Message)]
938pub struct UpdateConversationMetadataRequest {
939    #[prost(string, optional, tag = "1")]
940    pub title: ::core::option::Option<::prost::alloc::string::String>,
941    #[prost(string, tag = "2")]
942    pub conversation_rid: ::prost::alloc::string::String,
943    #[prost(message, optional, tag = "3")]
944    pub conversation_mode: ::core::option::Option<ConversationMode>,
945}
946#[derive(Clone, Copy, PartialEq, ::prost::Message)]
947pub struct UpdateConversationMetadataResponse {}
948#[derive(Clone, PartialEq, ::prost::Message)]
949pub struct DeleteConversationRequest {
950    #[prost(string, tag = "1")]
951    pub conversation_rid: ::prost::alloc::string::String,
952}
953#[derive(Clone, Copy, PartialEq, ::prost::Message)]
954pub struct DeleteConversationResponse {}
955/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
956/// by provided rid. To start from a particular message - you can also provide a message id.
957#[derive(Clone, PartialEq, ::prost::Message)]
958pub struct GetConversationRequest {
959    #[prost(string, tag = "1")]
960    pub conversation_rid: ::prost::alloc::string::String,
961    #[prost(string, optional, tag = "2")]
962    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
963    #[prost(int32, optional, tag = "3")]
964    pub max_message_count: ::core::option::Option<i32>,
965}
966/// Model message with id allows you to identify the message ID of a given message
967#[derive(Clone, PartialEq, ::prost::Message)]
968pub struct ModelMessageWithId {
969    #[prost(string, tag = "3")]
970    pub message_id: ::prost::alloc::string::String,
971    /// WB agent user messages can have snapshot rids associated with them
972    #[prost(string, optional, tag = "4")]
973    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
974    #[prost(message, repeated, tag = "5")]
975    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
976    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
977    pub content: ::core::option::Option<model_message_with_id::Content>,
978}
979/// Nested message and enum types in `ModelMessageWithId`.
980pub mod model_message_with_id {
981    #[derive(Clone, PartialEq, ::prost::Oneof)]
982    pub enum Content {
983        #[prost(message, tag = "1")]
984        Message(super::ModelMessage),
985        #[prost(message, tag = "2")]
986        ToolAction(super::ToolAction),
987    }
988}
989#[derive(Clone, PartialEq, ::prost::Message)]
990pub struct GetConversationResponse {
991    #[prost(message, repeated, tag = "1")]
992    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
993    #[prost(message, optional, tag = "2")]
994    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
995}
996/// Will generate all conversation threads that this user has in this workspace
997#[derive(Clone, PartialEq, ::prost::Message)]
998pub struct ListConversationsRequest {
999    #[prost(string, tag = "1")]
1000    pub workspace_rid: ::prost::alloc::string::String,
1001    #[prost(string, optional, tag = "2")]
1002    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1003    #[prost(int32, optional, tag = "3")]
1004    pub page_size: ::core::option::Option<i32>,
1005}
1006#[derive(Clone, PartialEq, ::prost::Message)]
1007pub struct ConversationMetadata {
1008    #[prost(string, tag = "1")]
1009    pub conversation_rid: ::prost::alloc::string::String,
1010    #[prost(string, tag = "2")]
1011    pub title: ::prost::alloc::string::String,
1012    #[prost(message, optional, tag = "3")]
1013    pub created_at: ::core::option::Option<
1014        super::super::super::google::protobuf::Timestamp,
1015    >,
1016    #[prost(message, optional, tag = "4")]
1017    pub last_updated_at: ::core::option::Option<
1018        super::super::super::google::protobuf::Timestamp,
1019    >,
1020    #[prost(message, optional, tag = "5")]
1021    pub mode: ::core::option::Option<ConversationMode>,
1022}
1023/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
1024/// to get a full conversation from storage. These are ordered by creation time.
1025#[derive(Clone, PartialEq, ::prost::Message)]
1026pub struct ListConversationsResponse {
1027    #[prost(message, repeated, tag = "1")]
1028    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
1029    #[prost(string, optional, tag = "2")]
1030    pub next_page_token: ::core::option::Option<::prost::alloc::string::String>,
1031}
1032#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1033pub struct TimeRange {
1034    #[prost(message, optional, tag = "1")]
1035    pub range_start: ::core::option::Option<Timestamp>,
1036    #[prost(message, optional, tag = "2")]
1037    pub range_end: ::core::option::Option<Timestamp>,
1038}
1039#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1040pub struct Timestamp {
1041    #[prost(int32, tag = "1")]
1042    pub seconds: i32,
1043    #[prost(int32, tag = "2")]
1044    pub nanoseconds: i32,
1045}
1046/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
1047/// Each message type has its own structure and content.
1048#[derive(Clone, PartialEq, ::prost::Message)]
1049pub struct ModelMessage {
1050    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
1051    pub kind: ::core::option::Option<model_message::Kind>,
1052}
1053/// Nested message and enum types in `ModelMessage`.
1054pub mod model_message {
1055    #[derive(Clone, PartialEq, ::prost::Oneof)]
1056    pub enum Kind {
1057        #[prost(message, tag = "1")]
1058        User(super::UserModelMessage),
1059        #[prost(message, tag = "2")]
1060        Assistant(super::AssistantModelMessage),
1061    }
1062}
1063/// A user message containing text
1064#[derive(Clone, PartialEq, ::prost::Message)]
1065pub struct UserModelMessage {
1066    #[prost(message, repeated, tag = "1")]
1067    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
1068}
1069/// An assistant message containing text
1070#[derive(Clone, PartialEq, ::prost::Message)]
1071pub struct AssistantModelMessage {
1072    #[prost(message, repeated, tag = "1")]
1073    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
1074}
1075#[derive(Clone, PartialEq, ::prost::Message)]
1076pub struct UserContentPart {
1077    #[prost(oneof = "user_content_part::Part", tags = "1")]
1078    pub part: ::core::option::Option<user_content_part::Part>,
1079}
1080/// Nested message and enum types in `UserContentPart`.
1081pub mod user_content_part {
1082    #[derive(Clone, PartialEq, ::prost::Oneof)]
1083    pub enum Part {
1084        #[prost(message, tag = "1")]
1085        Text(super::TextPart),
1086    }
1087}
1088/// Content part for assistant messages: can be text, reasoning, or mutation.
1089#[derive(Clone, PartialEq, ::prost::Message)]
1090pub struct AssistantContentPart {
1091    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
1092    pub part: ::core::option::Option<assistant_content_part::Part>,
1093}
1094/// Nested message and enum types in `AssistantContentPart`.
1095pub mod assistant_content_part {
1096    #[derive(Clone, PartialEq, ::prost::Oneof)]
1097    pub enum Part {
1098        #[prost(message, tag = "1")]
1099        Text(super::TextPart),
1100        #[prost(message, tag = "2")]
1101        Reasoning(super::ReasoningPart),
1102    }
1103}
1104/// Text part for user or assistant messages.
1105#[derive(Clone, PartialEq, ::prost::Message)]
1106pub struct TextPart {
1107    #[prost(string, tag = "1")]
1108    pub text: ::prost::alloc::string::String,
1109}
1110/// User-supplied image part.
1111#[derive(Clone, PartialEq, ::prost::Message)]
1112pub struct ImagePart {
1113    /// The base64-encoded image data
1114    #[prost(bytes = "vec", tag = "1")]
1115    pub data: ::prost::alloc::vec::Vec<u8>,
1116    /// The media type of the image (e.g. "image/png", "image/jpeg")
1117    #[prost(string, optional, tag = "2")]
1118    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
1119    /// Optional: the filename of the image
1120    #[prost(string, optional, tag = "3")]
1121    pub filename: ::core::option::Option<::prost::alloc::string::String>,
1122}
1123/// Reasoning part for assistant messages.
1124#[derive(Clone, PartialEq, ::prost::Message)]
1125pub struct ReasoningPart {
1126    #[prost(string, tag = "1")]
1127    pub reasoning: ::prost::alloc::string::String,
1128}
1129/// StreamChatResponse is a discriminated union response to a StreamChatRequest
1130#[derive(Clone, PartialEq, ::prost::Message)]
1131pub struct StreamChatResponse {
1132    #[prost(
1133        oneof = "stream_chat_response::Response",
1134        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10"
1135    )]
1136    pub response: ::core::option::Option<stream_chat_response::Response>,
1137}
1138/// Nested message and enum types in `StreamChatResponse`.
1139pub mod stream_chat_response {
1140    #[derive(Clone, PartialEq, ::prost::Oneof)]
1141    pub enum Response {
1142        #[prost(message, tag = "1")]
1143        Finish(super::Finish),
1144        #[prost(message, tag = "2")]
1145        Error(super::Error),
1146        #[prost(message, tag = "3")]
1147        TextStart(super::TextStart),
1148        #[prost(message, tag = "4")]
1149        TextDelta(super::TextDelta),
1150        #[prost(message, tag = "5")]
1151        TextEnd(super::TextEnd),
1152        #[prost(message, tag = "6")]
1153        ReasoningStart(super::ReasoningStart),
1154        #[prost(message, tag = "7")]
1155        ReasoningDelta(super::ReasoningDelta),
1156        #[prost(message, tag = "8")]
1157        ReasoningEnd(super::ReasoningEnd),
1158        #[prost(message, tag = "10")]
1159        ToolAction(super::ToolAction),
1160    }
1161}
1162#[derive(Clone, PartialEq, ::prost::Message)]
1163pub struct ToolCallDescription {
1164    #[prost(string, tag = "1")]
1165    pub tool_call_id: ::prost::alloc::string::String,
1166    #[prost(string, tag = "2")]
1167    pub tool_name: ::prost::alloc::string::String,
1168    /// string representation of the proposed tool args for display
1169    #[prost(string, tag = "3")]
1170    pub tool_args_json_string: ::prost::alloc::string::String,
1171    /// used to conditionally render an approval button based the outcome of the tool call
1172    #[prost(enumeration = "ToolCallStatus", tag = "4")]
1173    pub status: i32,
1174}
1175/// Indicates the end of a chat session
1176#[derive(Clone, PartialEq, ::prost::Message)]
1177pub struct Finish {
1178    /// The message ids in order of all generated messages for this agent run
1179    /// These ids can be used to branch a message from that specific message
1180    #[prost(string, repeated, tag = "1")]
1181    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1182    /// In the case that this is the first agent run in a conversation thread, we also
1183    /// return the new conversation title generated
1184    #[prost(string, optional, tag = "2")]
1185    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
1186    #[prost(message, repeated, tag = "3")]
1187    pub tool_approval_requests: ::prost::alloc::vec::Vec<ToolCallDescription>,
1188}
1189/// An error that occurred during the chat session
1190#[derive(Clone, PartialEq, ::prost::Message)]
1191pub struct Error {
1192    #[prost(string, tag = "1")]
1193    pub message: ::prost::alloc::string::String,
1194}
1195/// Indicates the start of a text message from the agent
1196#[derive(Clone, PartialEq, ::prost::Message)]
1197pub struct TextStart {
1198    /// uniquely identifies the text message (e.g. uuid) so that the client can
1199    /// merge parallel message streams (if it happens).
1200    #[prost(string, tag = "1")]
1201    pub id: ::prost::alloc::string::String,
1202}
1203/// A delta (continuation) of a text message from the agent
1204#[derive(Clone, PartialEq, ::prost::Message)]
1205pub struct TextDelta {
1206    #[prost(string, tag = "1")]
1207    pub id: ::prost::alloc::string::String,
1208    /// The next chunk of text
1209    #[prost(string, tag = "2")]
1210    pub delta: ::prost::alloc::string::String,
1211}
1212/// Indicates the end of a text message from the agent
1213#[derive(Clone, PartialEq, ::prost::Message)]
1214pub struct TextEnd {
1215    #[prost(string, tag = "1")]
1216    pub id: ::prost::alloc::string::String,
1217}
1218/// Indicates the start of a reasoning message from the agent
1219#[derive(Clone, PartialEq, ::prost::Message)]
1220pub struct ReasoningStart {
1221    #[prost(string, tag = "1")]
1222    pub id: ::prost::alloc::string::String,
1223}
1224/// A delta (continuation) of a reasoning message from the agent
1225#[derive(Clone, PartialEq, ::prost::Message)]
1226pub struct ReasoningDelta {
1227    #[prost(string, tag = "1")]
1228    pub id: ::prost::alloc::string::String,
1229    /// The next chunk of reasoning
1230    #[prost(string, tag = "2")]
1231    pub delta: ::prost::alloc::string::String,
1232}
1233/// Indicates the end of a reasoning message from the agent
1234#[derive(Clone, PartialEq, ::prost::Message)]
1235pub struct ReasoningEnd {
1236    #[prost(string, tag = "1")]
1237    pub id: ::prost::alloc::string::String,
1238}
1239/// this is a concise description of a tool call that the agent is making internally
1240/// without revealing too much detail about the tool call, it informs the user what the agent is doing
1241/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
1242/// "Search channels for My Datasource"
1243#[derive(Clone, PartialEq, ::prost::Message)]
1244pub struct ToolAction {
1245    #[prost(string, tag = "1")]
1246    pub id: ::prost::alloc::string::String,
1247    /// "Thought", "Read", "Find", "Look-up", etc.
1248    #[prost(string, tag = "2")]
1249    pub tool_action_verb: ::prost::alloc::string::String,
1250    /// "workbook", "channel", "variable", "panel", etc.
1251    #[prost(string, optional, tag = "3")]
1252    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
1253}
1254#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1255#[repr(i32)]
1256pub enum ToolCallStatus {
1257    Unspecified = 0,
1258    Approved = 1,
1259    Denied = 2,
1260    AwaitingApproval = 3,
1261}
1262impl ToolCallStatus {
1263    /// String value of the enum field names used in the ProtoBuf definition.
1264    ///
1265    /// The values are not transformed in any way and thus are considered stable
1266    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1267    pub fn as_str_name(&self) -> &'static str {
1268        match self {
1269            Self::Unspecified => "TOOL_CALL_STATUS_UNSPECIFIED",
1270            Self::Approved => "TOOL_CALL_STATUS_APPROVED",
1271            Self::Denied => "TOOL_CALL_STATUS_DENIED",
1272            Self::AwaitingApproval => "TOOL_CALL_STATUS_AWAITING_APPROVAL",
1273        }
1274    }
1275    /// Creates an enum from field names used in the ProtoBuf definition.
1276    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1277        match value {
1278            "TOOL_CALL_STATUS_UNSPECIFIED" => Some(Self::Unspecified),
1279            "TOOL_CALL_STATUS_APPROVED" => Some(Self::Approved),
1280            "TOOL_CALL_STATUS_DENIED" => Some(Self::Denied),
1281            "TOOL_CALL_STATUS_AWAITING_APPROVAL" => Some(Self::AwaitingApproval),
1282            _ => None,
1283        }
1284    }
1285}
1286/// Generated client implementations.
1287pub mod ai_agent_service_client {
1288    #![allow(
1289        unused_variables,
1290        dead_code,
1291        missing_docs,
1292        clippy::wildcard_imports,
1293        clippy::let_unit_value,
1294    )]
1295    use tonic::codegen::*;
1296    use tonic::codegen::http::Uri;
1297    /// AIAgentService provides AI-powered assistance for general operations
1298    #[derive(Debug, Clone)]
1299    pub struct AiAgentServiceClient<T> {
1300        inner: tonic::client::Grpc<T>,
1301    }
1302    impl AiAgentServiceClient<tonic::transport::Channel> {
1303        /// Attempt to create a new client by connecting to a given endpoint.
1304        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1305        where
1306            D: TryInto<tonic::transport::Endpoint>,
1307            D::Error: Into<StdError>,
1308        {
1309            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1310            Ok(Self::new(conn))
1311        }
1312    }
1313    impl<T> AiAgentServiceClient<T>
1314    where
1315        T: tonic::client::GrpcService<tonic::body::Body>,
1316        T::Error: Into<StdError>,
1317        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1318        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1319    {
1320        pub fn new(inner: T) -> Self {
1321            let inner = tonic::client::Grpc::new(inner);
1322            Self { inner }
1323        }
1324        pub fn with_origin(inner: T, origin: Uri) -> Self {
1325            let inner = tonic::client::Grpc::with_origin(inner, origin);
1326            Self { inner }
1327        }
1328        pub fn with_interceptor<F>(
1329            inner: T,
1330            interceptor: F,
1331        ) -> AiAgentServiceClient<InterceptedService<T, F>>
1332        where
1333            F: tonic::service::Interceptor,
1334            T::ResponseBody: Default,
1335            T: tonic::codegen::Service<
1336                http::Request<tonic::body::Body>,
1337                Response = http::Response<
1338                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1339                >,
1340            >,
1341            <T as tonic::codegen::Service<
1342                http::Request<tonic::body::Body>,
1343            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1344        {
1345            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
1346        }
1347        /// Compress requests with the given encoding.
1348        ///
1349        /// This requires the server to support it otherwise it might respond with an
1350        /// error.
1351        #[must_use]
1352        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1353            self.inner = self.inner.send_compressed(encoding);
1354            self
1355        }
1356        /// Enable decompressing responses.
1357        #[must_use]
1358        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1359            self.inner = self.inner.accept_compressed(encoding);
1360            self
1361        }
1362        /// Limits the maximum size of a decoded message.
1363        ///
1364        /// Default: `4MB`
1365        #[must_use]
1366        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1367            self.inner = self.inner.max_decoding_message_size(limit);
1368            self
1369        }
1370        /// Limits the maximum size of an encoded message.
1371        ///
1372        /// Default: `usize::MAX`
1373        #[must_use]
1374        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1375            self.inner = self.inner.max_encoding_message_size(limit);
1376            self
1377        }
1378        /// StreamChat handles bidirectional streaming chat for AI agent
1379        pub async fn stream_chat(
1380            &mut self,
1381            request: impl tonic::IntoRequest<super::StreamChatRequest>,
1382        ) -> std::result::Result<
1383            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
1384            tonic::Status,
1385        > {
1386            self.inner
1387                .ready()
1388                .await
1389                .map_err(|e| {
1390                    tonic::Status::unknown(
1391                        format!("Service was not ready: {}", e.into()),
1392                    )
1393                })?;
1394            let codec = tonic::codec::ProstCodec::default();
1395            let path = http::uri::PathAndQuery::from_static(
1396                "/nominal.ai.v1.AIAgentService/StreamChat",
1397            );
1398            let mut req = request.into_request();
1399            req.extensions_mut()
1400                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
1401            self.inner.server_streaming(req, path, codec).await
1402        }
1403        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
1404        pub async fn get_conversation(
1405            &mut self,
1406            request: impl tonic::IntoRequest<super::GetConversationRequest>,
1407        ) -> std::result::Result<
1408            tonic::Response<super::GetConversationResponse>,
1409            tonic::Status,
1410        > {
1411            self.inner
1412                .ready()
1413                .await
1414                .map_err(|e| {
1415                    tonic::Status::unknown(
1416                        format!("Service was not ready: {}", e.into()),
1417                    )
1418                })?;
1419            let codec = tonic::codec::ProstCodec::default();
1420            let path = http::uri::PathAndQuery::from_static(
1421                "/nominal.ai.v1.AIAgentService/GetConversation",
1422            );
1423            let mut req = request.into_request();
1424            req.extensions_mut()
1425                .insert(
1426                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
1427                );
1428            self.inner.unary(req, path, codec).await
1429        }
1430        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
1431        pub async fn list_conversations(
1432            &mut self,
1433            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
1434        ) -> std::result::Result<
1435            tonic::Response<super::ListConversationsResponse>,
1436            tonic::Status,
1437        > {
1438            self.inner
1439                .ready()
1440                .await
1441                .map_err(|e| {
1442                    tonic::Status::unknown(
1443                        format!("Service was not ready: {}", e.into()),
1444                    )
1445                })?;
1446            let codec = tonic::codec::ProstCodec::default();
1447            let path = http::uri::PathAndQuery::from_static(
1448                "/nominal.ai.v1.AIAgentService/ListConversations",
1449            );
1450            let mut req = request.into_request();
1451            req.extensions_mut()
1452                .insert(
1453                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
1454                );
1455            self.inner.unary(req, path, codec).await
1456        }
1457        /// CreateConversation handles creating a conversation and assigning it a conversation rid
1458        pub async fn create_conversation(
1459            &mut self,
1460            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
1461        ) -> std::result::Result<
1462            tonic::Response<super::CreateConversationResponse>,
1463            tonic::Status,
1464        > {
1465            self.inner
1466                .ready()
1467                .await
1468                .map_err(|e| {
1469                    tonic::Status::unknown(
1470                        format!("Service was not ready: {}", e.into()),
1471                    )
1472                })?;
1473            let codec = tonic::codec::ProstCodec::default();
1474            let path = http::uri::PathAndQuery::from_static(
1475                "/nominal.ai.v1.AIAgentService/CreateConversation",
1476            );
1477            let mut req = request.into_request();
1478            req.extensions_mut()
1479                .insert(
1480                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
1481                );
1482            self.inner.unary(req, path, codec).await
1483        }
1484        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
1485        pub async fn update_conversation_metadata(
1486            &mut self,
1487            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
1488        ) -> std::result::Result<
1489            tonic::Response<super::UpdateConversationMetadataResponse>,
1490            tonic::Status,
1491        > {
1492            self.inner
1493                .ready()
1494                .await
1495                .map_err(|e| {
1496                    tonic::Status::unknown(
1497                        format!("Service was not ready: {}", e.into()),
1498                    )
1499                })?;
1500            let codec = tonic::codec::ProstCodec::default();
1501            let path = http::uri::PathAndQuery::from_static(
1502                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
1503            );
1504            let mut req = request.into_request();
1505            req.extensions_mut()
1506                .insert(
1507                    GrpcMethod::new(
1508                        "nominal.ai.v1.AIAgentService",
1509                        "UpdateConversationMetadata",
1510                    ),
1511                );
1512            self.inner.unary(req, path, codec).await
1513        }
1514        /// DeleteConversation handles deleting a specific conversation by conversation rid
1515        pub async fn delete_conversation(
1516            &mut self,
1517            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
1518        ) -> std::result::Result<
1519            tonic::Response<super::DeleteConversationResponse>,
1520            tonic::Status,
1521        > {
1522            self.inner
1523                .ready()
1524                .await
1525                .map_err(|e| {
1526                    tonic::Status::unknown(
1527                        format!("Service was not ready: {}", e.into()),
1528                    )
1529                })?;
1530            let codec = tonic::codec::ProstCodec::default();
1531            let path = http::uri::PathAndQuery::from_static(
1532                "/nominal.ai.v1.AIAgentService/DeleteConversation",
1533            );
1534            let mut req = request.into_request();
1535            req.extensions_mut()
1536                .insert(
1537                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
1538                );
1539            self.inner.unary(req, path, codec).await
1540        }
1541        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
1542        pub async fn get_snapshot_rid_by_user_message_id(
1543            &mut self,
1544            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
1545        ) -> std::result::Result<
1546            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
1547            tonic::Status,
1548        > {
1549            self.inner
1550                .ready()
1551                .await
1552                .map_err(|e| {
1553                    tonic::Status::unknown(
1554                        format!("Service was not ready: {}", e.into()),
1555                    )
1556                })?;
1557            let codec = tonic::codec::ProstCodec::default();
1558            let path = http::uri::PathAndQuery::from_static(
1559                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
1560            );
1561            let mut req = request.into_request();
1562            req.extensions_mut()
1563                .insert(
1564                    GrpcMethod::new(
1565                        "nominal.ai.v1.AIAgentService",
1566                        "GetSnapshotRidByUserMessageId",
1567                    ),
1568                );
1569            self.inner.unary(req, path, codec).await
1570        }
1571    }
1572}