nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    /// The conversation ID
6    #[prost(string, tag = "1")]
7    pub conversation_rid: ::prost::alloc::string::String,
8    /// The user message to append to the conversation
9    #[prost(message, optional, tag = "2")]
10    pub message: ::core::option::Option<UserModelMessage>,
11    /// Optional: image files to provide to the agent
12    #[prost(message, repeated, tag = "3")]
13    pub images: ::prost::alloc::vec::Vec<ImagePart>,
14    /// Context-specific fields based on the oneofKind.
15    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
16    pub context: ::core::option::Option<stream_chat_request::Context>,
17}
18/// Nested message and enum types in `StreamChatRequest`.
19pub mod stream_chat_request {
20    /// Context-specific fields based on the oneofKind.
21    #[derive(Clone, PartialEq, ::prost::Oneof)]
22    pub enum Context {
23        #[prost(message, tag = "4")]
24        Workbook(super::WorkbookContext),
25        #[prost(message, tag = "5")]
26        Global(super::GlobalContext),
27    }
28}
29/// WorkbookContext contains workbook-specific context fields
30#[derive(Clone, PartialEq, ::prost::Message)]
31pub struct WorkbookContext {
32    /// RID of the workbook to use for context
33    #[prost(string, tag = "1")]
34    pub workbook_rid: ::prost::alloc::string::String,
35    /// Optional: the user's presence in the workbook
36    #[prost(message, optional, tag = "2")]
37    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
38}
39/// DefaultContext (no context)
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct GlobalContext {}
42/// WorkbookUserPresence contains the user's presence in the workbook
43/// which is used to describe what the user is viewing at the time of the message.
44#[derive(Clone, Copy, PartialEq, ::prost::Message)]
45pub struct WorkbookUserPresence {
46    #[prost(int32, optional, tag = "1")]
47    pub tab_index: ::core::option::Option<i32>,
48    #[prost(message, optional, tag = "2")]
49    pub range: ::core::option::Option<TimeRange>,
50}
51/// CreateConversation request will create a new conversation thread
52/// if old conversation id is not set, a brand new, clear chat is created
53/// If old conversation id is set without a previous message id, the full conversation thread will be copied
54/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
55/// the above case is useful for branching a conversation into a new thread
56#[derive(Clone, PartialEq, ::prost::Message)]
57pub struct CreateConversationRequest {
58    #[prost(string, tag = "1")]
59    pub title: ::prost::alloc::string::String,
60    #[prost(string, tag = "2")]
61    pub workspace_rid: ::prost::alloc::string::String,
62    #[prost(string, optional, tag = "3")]
63    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
64    #[prost(string, optional, tag = "4")]
65    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
66}
67/// CreateConversationResponse will return the conversation id for the new conversation
68#[derive(Clone, PartialEq, ::prost::Message)]
69pub struct CreateConversationResponse {
70    #[prost(string, tag = "1")]
71    pub new_conversation_rid: ::prost::alloc::string::String,
72}
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct UpdateConversationMetadataRequest {
75    #[prost(string, tag = "1")]
76    pub title: ::prost::alloc::string::String,
77    #[prost(string, tag = "2")]
78    pub conversation_rid: ::prost::alloc::string::String,
79}
80#[derive(Clone, Copy, PartialEq, ::prost::Message)]
81pub struct UpdateConversationMetadataResponse {}
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct DeleteConversationRequest {
84    #[prost(string, tag = "1")]
85    pub conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, Copy, PartialEq, ::prost::Message)]
88pub struct DeleteConversationResponse {}
89/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
90/// by provided rid. To start from a particular message - you can also provide a message id.
91#[derive(Clone, PartialEq, ::prost::Message)]
92pub struct GetConversationRequest {
93    #[prost(string, tag = "1")]
94    pub conversation_rid: ::prost::alloc::string::String,
95    #[prost(string, optional, tag = "2")]
96    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
97    #[prost(int32, optional, tag = "3")]
98    pub max_message_count: ::core::option::Option<i32>,
99}
100/// Model message with id allows you to identify the message ID of a given message
101#[derive(Clone, PartialEq, ::prost::Message)]
102pub struct ModelMessageWithId {
103    #[prost(string, tag = "3")]
104    pub message_id: ::prost::alloc::string::String,
105    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
106    pub content: ::core::option::Option<model_message_with_id::Content>,
107}
108/// Nested message and enum types in `ModelMessageWithId`.
109pub mod model_message_with_id {
110    #[derive(Clone, PartialEq, ::prost::Oneof)]
111    pub enum Content {
112        #[prost(message, tag = "1")]
113        Message(super::ModelMessage),
114        #[prost(message, tag = "2")]
115        ToolAction(super::ToolAction),
116    }
117}
118#[derive(Clone, PartialEq, ::prost::Message)]
119pub struct GetConversationResponse {
120    #[prost(message, repeated, tag = "1")]
121    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
122    #[prost(message, optional, tag = "2")]
123    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
124}
125/// Will generate all conversation threads that this user has in this workspace
126#[derive(Clone, PartialEq, ::prost::Message)]
127pub struct ListConversationsRequest {
128    #[prost(string, tag = "1")]
129    pub workspace_rid: ::prost::alloc::string::String,
130}
131#[derive(Clone, PartialEq, ::prost::Message)]
132pub struct ConversationMetadata {
133    #[prost(string, tag = "1")]
134    pub conversation_rid: ::prost::alloc::string::String,
135    #[prost(string, tag = "2")]
136    pub title: ::prost::alloc::string::String,
137    #[prost(message, optional, tag = "3")]
138    pub created_at: ::core::option::Option<
139        super::super::super::google::protobuf::Timestamp,
140    >,
141    #[prost(message, optional, tag = "4")]
142    pub last_updated_at: ::core::option::Option<
143        super::super::super::google::protobuf::Timestamp,
144    >,
145}
146/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
147/// to get a full conversation from storage. These are ordered by creation time.
148#[derive(Clone, PartialEq, ::prost::Message)]
149pub struct ListConversationsResponse {
150    #[prost(message, repeated, tag = "1")]
151    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
152}
153#[derive(Clone, Copy, PartialEq, ::prost::Message)]
154pub struct TimeRange {
155    #[prost(message, optional, tag = "1")]
156    pub range_start: ::core::option::Option<Timestamp>,
157    #[prost(message, optional, tag = "2")]
158    pub range_end: ::core::option::Option<Timestamp>,
159}
160#[derive(Clone, Copy, PartialEq, ::prost::Message)]
161pub struct Timestamp {
162    #[prost(int32, tag = "1")]
163    pub seconds: i32,
164    #[prost(int32, tag = "2")]
165    pub nanoseconds: i32,
166}
167/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
168/// Each message type has its own structure and content.
169#[derive(Clone, PartialEq, ::prost::Message)]
170pub struct ModelMessage {
171    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
172    pub kind: ::core::option::Option<model_message::Kind>,
173}
174/// Nested message and enum types in `ModelMessage`.
175pub mod model_message {
176    #[derive(Clone, PartialEq, ::prost::Oneof)]
177    pub enum Kind {
178        #[prost(message, tag = "1")]
179        User(super::UserModelMessage),
180        #[prost(message, tag = "2")]
181        Assistant(super::AssistantModelMessage),
182    }
183}
184/// A user message containing text
185#[derive(Clone, PartialEq, ::prost::Message)]
186pub struct UserModelMessage {
187    #[prost(message, repeated, tag = "1")]
188    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
189}
190/// An assistant message containing text
191#[derive(Clone, PartialEq, ::prost::Message)]
192pub struct AssistantModelMessage {
193    #[prost(message, repeated, tag = "1")]
194    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
195}
196#[derive(Clone, PartialEq, ::prost::Message)]
197pub struct UserContentPart {
198    #[prost(oneof = "user_content_part::Part", tags = "1")]
199    pub part: ::core::option::Option<user_content_part::Part>,
200}
201/// Nested message and enum types in `UserContentPart`.
202pub mod user_content_part {
203    #[derive(Clone, PartialEq, ::prost::Oneof)]
204    pub enum Part {
205        #[prost(message, tag = "1")]
206        Text(super::TextPart),
207    }
208}
209/// Content part for assistant messages: can be text, reasoning, or mutation.
210#[derive(Clone, PartialEq, ::prost::Message)]
211pub struct AssistantContentPart {
212    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
213    pub part: ::core::option::Option<assistant_content_part::Part>,
214}
215/// Nested message and enum types in `AssistantContentPart`.
216pub mod assistant_content_part {
217    #[derive(Clone, PartialEq, ::prost::Oneof)]
218    pub enum Part {
219        #[prost(message, tag = "1")]
220        Text(super::TextPart),
221        #[prost(message, tag = "2")]
222        Reasoning(super::ReasoningPart),
223    }
224}
225/// Text part for user or assistant messages.
226#[derive(Clone, PartialEq, ::prost::Message)]
227pub struct TextPart {
228    #[prost(string, tag = "1")]
229    pub text: ::prost::alloc::string::String,
230}
231/// User-supplied image part.
232#[derive(Clone, PartialEq, ::prost::Message)]
233pub struct ImagePart {
234    /// The base64-encoded image data
235    #[prost(bytes = "vec", tag = "1")]
236    pub data: ::prost::alloc::vec::Vec<u8>,
237    /// The media type of the image (e.g. "image/png", "image/jpeg")
238    #[prost(string, optional, tag = "2")]
239    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
240    /// Optional: the filename of the image
241    #[prost(string, optional, tag = "3")]
242    pub filename: ::core::option::Option<::prost::alloc::string::String>,
243}
244/// Reasoning part for assistant messages.
245#[derive(Clone, PartialEq, ::prost::Message)]
246pub struct ReasoningPart {
247    #[prost(string, tag = "1")]
248    pub reasoning: ::prost::alloc::string::String,
249}
250/// StreamChatResponse is a discriminated union response to a StreamChatRequest
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct StreamChatResponse {
253    #[prost(
254        oneof = "stream_chat_response::Response",
255        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
256    )]
257    pub response: ::core::option::Option<stream_chat_response::Response>,
258}
259/// Nested message and enum types in `StreamChatResponse`.
260pub mod stream_chat_response {
261    #[derive(Clone, PartialEq, ::prost::Oneof)]
262    pub enum Response {
263        #[prost(message, tag = "1")]
264        Finish(super::Finish),
265        #[prost(message, tag = "2")]
266        Error(super::Error),
267        #[prost(message, tag = "3")]
268        TextStart(super::TextStart),
269        #[prost(message, tag = "4")]
270        TextDelta(super::TextDelta),
271        #[prost(message, tag = "5")]
272        TextEnd(super::TextEnd),
273        #[prost(message, tag = "6")]
274        ReasoningStart(super::ReasoningStart),
275        #[prost(message, tag = "7")]
276        ReasoningDelta(super::ReasoningDelta),
277        #[prost(message, tag = "8")]
278        ReasoningEnd(super::ReasoningEnd),
279        /// this will be deprecated in favor of MCP-based mutations
280        #[prost(message, tag = "9")]
281        WorkbookMutation(super::WorkbookMutation),
282        #[prost(message, tag = "10")]
283        ToolAction(super::ToolAction),
284    }
285}
286/// Indicates the end of a chat session
287#[derive(Clone, PartialEq, ::prost::Message)]
288pub struct Finish {
289    /// The message ids in order of all generated messages for this agent run
290    /// These ids can be used to branch a message from that specific message
291    #[prost(string, repeated, tag = "1")]
292    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
293    /// In the case that this is the first agent run in a conversation thread, we also
294    /// return the new conversation title generated
295    #[prost(string, optional, tag = "2")]
296    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
297}
298/// An error that occurred during the chat session
299#[derive(Clone, PartialEq, ::prost::Message)]
300pub struct Error {
301    #[prost(string, tag = "1")]
302    pub message: ::prost::alloc::string::String,
303}
304/// Indicates the start of a text message from the agent
305#[derive(Clone, PartialEq, ::prost::Message)]
306pub struct TextStart {
307    /// uniquely identifies the text message (e.g. uuid) so that the client can
308    /// merge parallel message streams (if it happens).
309    #[prost(string, tag = "1")]
310    pub id: ::prost::alloc::string::String,
311}
312/// A delta (continuation) of a text message from the agent
313#[derive(Clone, PartialEq, ::prost::Message)]
314pub struct TextDelta {
315    #[prost(string, tag = "1")]
316    pub id: ::prost::alloc::string::String,
317    /// The next chunk of text
318    #[prost(string, tag = "2")]
319    pub delta: ::prost::alloc::string::String,
320}
321/// Indicates the end of a text message from the agent
322#[derive(Clone, PartialEq, ::prost::Message)]
323pub struct TextEnd {
324    #[prost(string, tag = "1")]
325    pub id: ::prost::alloc::string::String,
326}
327/// Indicates the start of a reasoning message from the agent
328#[derive(Clone, PartialEq, ::prost::Message)]
329pub struct ReasoningStart {
330    #[prost(string, tag = "1")]
331    pub id: ::prost::alloc::string::String,
332}
333/// A delta (continuation) of a reasoning message from the agent
334#[derive(Clone, PartialEq, ::prost::Message)]
335pub struct ReasoningDelta {
336    #[prost(string, tag = "1")]
337    pub id: ::prost::alloc::string::String,
338    /// The next chunk of reasoning
339    #[prost(string, tag = "2")]
340    pub delta: ::prost::alloc::string::String,
341}
342/// Indicates the end of a reasoning message from the agent
343#[derive(Clone, PartialEq, ::prost::Message)]
344pub struct ReasoningEnd {
345    #[prost(string, tag = "1")]
346    pub id: ::prost::alloc::string::String,
347}
348/// Add a new tab to the workbook
349#[derive(Clone, PartialEq, ::prost::Message)]
350pub struct AddTabMutation {
351    /// if tab_name is not provided, we'll name it "New Tab"
352    #[prost(string, optional, tag = "1")]
353    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
354}
355/// this is the "result" of the mutation
356#[derive(Clone, PartialEq, ::prost::Message)]
357pub struct AddOrUpdatePanelMutation {
358    /// JSON-serialized representation of IVizDefinition
359    #[prost(string, tag = "1")]
360    pub panel_as_json: ::prost::alloc::string::String,
361    #[prost(string, tag = "2")]
362    pub panel_id: ::prost::alloc::string::String,
363    #[prost(int32, tag = "3")]
364    pub tab_index: i32,
365}
366#[derive(Clone, PartialEq, ::prost::Message)]
367pub struct RemovePanelsMutation {
368    #[prost(string, repeated, tag = "1")]
369    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
370}
371/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
372#[derive(Clone, PartialEq, ::prost::Message)]
373pub struct AddOrReplaceVariableMutation {
374    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
375    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
376    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
377    #[prost(string, tag = "1")]
378    pub compute_spec_as_json: ::prost::alloc::string::String,
379    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
380    #[prost(string, optional, tag = "2")]
381    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
382    #[prost(string, optional, tag = "3")]
383    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
384}
385/// DeleteVariablesMutation is a mutation to delete variables from the workbook
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct DeleteVariablesMutation {
388    #[prost(string, repeated, tag = "1")]
389    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
390}
391/// WorkbookMutation is a mutation to the workbook
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct WorkbookMutation {
394    #[prost(string, tag = "1")]
395    pub id: ::prost::alloc::string::String,
396    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
397    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
398}
399/// Nested message and enum types in `WorkbookMutation`.
400pub mod workbook_mutation {
401    #[derive(Clone, PartialEq, ::prost::Oneof)]
402    pub enum Mutation {
403        #[prost(message, tag = "2")]
404        AddTab(super::AddTabMutation),
405        #[prost(message, tag = "3")]
406        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
407        #[prost(message, tag = "4")]
408        RemovePanels(super::RemovePanelsMutation),
409        #[prost(message, tag = "5")]
410        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
411        #[prost(message, tag = "6")]
412        DeleteVariables(super::DeleteVariablesMutation),
413    }
414}
415/// this is a concise description of a tool call that the agent is making internally
416/// without revealing too much detail about the tool call, it informs the user what the agent is doing
417/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
418/// "Search channels for My Datasource"
419#[derive(Clone, PartialEq, ::prost::Message)]
420pub struct ToolAction {
421    #[prost(string, tag = "1")]
422    pub id: ::prost::alloc::string::String,
423    /// "Thought", "Read", "Find", "Look-up", etc.
424    #[prost(string, tag = "2")]
425    pub tool_action_verb: ::prost::alloc::string::String,
426    /// "workbook", "channel", "variable", "panel", etc.
427    #[prost(string, optional, tag = "3")]
428    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
429}
430/// Generated client implementations.
431pub mod ai_agent_service_client {
432    #![allow(
433        unused_variables,
434        dead_code,
435        missing_docs,
436        clippy::wildcard_imports,
437        clippy::let_unit_value,
438    )]
439    use tonic::codegen::*;
440    use tonic::codegen::http::Uri;
441    /// AIAgentService provides AI-powered assistance for general operations
442    #[derive(Debug, Clone)]
443    pub struct AiAgentServiceClient<T> {
444        inner: tonic::client::Grpc<T>,
445    }
446    impl AiAgentServiceClient<tonic::transport::Channel> {
447        /// Attempt to create a new client by connecting to a given endpoint.
448        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
449        where
450            D: TryInto<tonic::transport::Endpoint>,
451            D::Error: Into<StdError>,
452        {
453            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
454            Ok(Self::new(conn))
455        }
456    }
457    impl<T> AiAgentServiceClient<T>
458    where
459        T: tonic::client::GrpcService<tonic::body::Body>,
460        T::Error: Into<StdError>,
461        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
462        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
463    {
464        pub fn new(inner: T) -> Self {
465            let inner = tonic::client::Grpc::new(inner);
466            Self { inner }
467        }
468        pub fn with_origin(inner: T, origin: Uri) -> Self {
469            let inner = tonic::client::Grpc::with_origin(inner, origin);
470            Self { inner }
471        }
472        pub fn with_interceptor<F>(
473            inner: T,
474            interceptor: F,
475        ) -> AiAgentServiceClient<InterceptedService<T, F>>
476        where
477            F: tonic::service::Interceptor,
478            T::ResponseBody: Default,
479            T: tonic::codegen::Service<
480                http::Request<tonic::body::Body>,
481                Response = http::Response<
482                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
483                >,
484            >,
485            <T as tonic::codegen::Service<
486                http::Request<tonic::body::Body>,
487            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
488        {
489            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
490        }
491        /// Compress requests with the given encoding.
492        ///
493        /// This requires the server to support it otherwise it might respond with an
494        /// error.
495        #[must_use]
496        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
497            self.inner = self.inner.send_compressed(encoding);
498            self
499        }
500        /// Enable decompressing responses.
501        #[must_use]
502        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
503            self.inner = self.inner.accept_compressed(encoding);
504            self
505        }
506        /// Limits the maximum size of a decoded message.
507        ///
508        /// Default: `4MB`
509        #[must_use]
510        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
511            self.inner = self.inner.max_decoding_message_size(limit);
512            self
513        }
514        /// Limits the maximum size of an encoded message.
515        ///
516        /// Default: `usize::MAX`
517        #[must_use]
518        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
519            self.inner = self.inner.max_encoding_message_size(limit);
520            self
521        }
522        /// StreamChat handles bidirectional streaming chat for AI agent
523        pub async fn stream_chat(
524            &mut self,
525            request: impl tonic::IntoRequest<super::StreamChatRequest>,
526        ) -> std::result::Result<
527            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
528            tonic::Status,
529        > {
530            self.inner
531                .ready()
532                .await
533                .map_err(|e| {
534                    tonic::Status::unknown(
535                        format!("Service was not ready: {}", e.into()),
536                    )
537                })?;
538            let codec = tonic::codec::ProstCodec::default();
539            let path = http::uri::PathAndQuery::from_static(
540                "/nominal.ai.v1.AIAgentService/StreamChat",
541            );
542            let mut req = request.into_request();
543            req.extensions_mut()
544                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
545            self.inner.server_streaming(req, path, codec).await
546        }
547        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
548        pub async fn get_conversation(
549            &mut self,
550            request: impl tonic::IntoRequest<super::GetConversationRequest>,
551        ) -> std::result::Result<
552            tonic::Response<super::GetConversationResponse>,
553            tonic::Status,
554        > {
555            self.inner
556                .ready()
557                .await
558                .map_err(|e| {
559                    tonic::Status::unknown(
560                        format!("Service was not ready: {}", e.into()),
561                    )
562                })?;
563            let codec = tonic::codec::ProstCodec::default();
564            let path = http::uri::PathAndQuery::from_static(
565                "/nominal.ai.v1.AIAgentService/GetConversation",
566            );
567            let mut req = request.into_request();
568            req.extensions_mut()
569                .insert(
570                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
571                );
572            self.inner.unary(req, path, codec).await
573        }
574        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
575        pub async fn list_conversations(
576            &mut self,
577            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
578        ) -> std::result::Result<
579            tonic::Response<super::ListConversationsResponse>,
580            tonic::Status,
581        > {
582            self.inner
583                .ready()
584                .await
585                .map_err(|e| {
586                    tonic::Status::unknown(
587                        format!("Service was not ready: {}", e.into()),
588                    )
589                })?;
590            let codec = tonic::codec::ProstCodec::default();
591            let path = http::uri::PathAndQuery::from_static(
592                "/nominal.ai.v1.AIAgentService/ListConversations",
593            );
594            let mut req = request.into_request();
595            req.extensions_mut()
596                .insert(
597                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
598                );
599            self.inner.unary(req, path, codec).await
600        }
601        /// CreateConversation handles creating a conversation and assigning it a conversation rid
602        pub async fn create_conversation(
603            &mut self,
604            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
605        ) -> std::result::Result<
606            tonic::Response<super::CreateConversationResponse>,
607            tonic::Status,
608        > {
609            self.inner
610                .ready()
611                .await
612                .map_err(|e| {
613                    tonic::Status::unknown(
614                        format!("Service was not ready: {}", e.into()),
615                    )
616                })?;
617            let codec = tonic::codec::ProstCodec::default();
618            let path = http::uri::PathAndQuery::from_static(
619                "/nominal.ai.v1.AIAgentService/CreateConversation",
620            );
621            let mut req = request.into_request();
622            req.extensions_mut()
623                .insert(
624                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
625                );
626            self.inner.unary(req, path, codec).await
627        }
628        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
629        pub async fn update_conversation_metadata(
630            &mut self,
631            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
632        ) -> std::result::Result<
633            tonic::Response<super::UpdateConversationMetadataResponse>,
634            tonic::Status,
635        > {
636            self.inner
637                .ready()
638                .await
639                .map_err(|e| {
640                    tonic::Status::unknown(
641                        format!("Service was not ready: {}", e.into()),
642                    )
643                })?;
644            let codec = tonic::codec::ProstCodec::default();
645            let path = http::uri::PathAndQuery::from_static(
646                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
647            );
648            let mut req = request.into_request();
649            req.extensions_mut()
650                .insert(
651                    GrpcMethod::new(
652                        "nominal.ai.v1.AIAgentService",
653                        "UpdateConversationMetadata",
654                    ),
655                );
656            self.inner.unary(req, path, codec).await
657        }
658        /// DeleteConversation handles deleting a specific conversation by conversation rid
659        pub async fn delete_conversation(
660            &mut self,
661            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
662        ) -> std::result::Result<
663            tonic::Response<super::DeleteConversationResponse>,
664            tonic::Status,
665        > {
666            self.inner
667                .ready()
668                .await
669                .map_err(|e| {
670                    tonic::Status::unknown(
671                        format!("Service was not ready: {}", e.into()),
672                    )
673                })?;
674            let codec = tonic::codec::ProstCodec::default();
675            let path = http::uri::PathAndQuery::from_static(
676                "/nominal.ai.v1.AIAgentService/DeleteConversation",
677            );
678            let mut req = request.into_request();
679            req.extensions_mut()
680                .insert(
681                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
682                );
683            self.inner.unary(req, path, codec).await
684        }
685    }
686}
687#[derive(Clone, Copy, PartialEq, ::prost::Message)]
688pub struct GetProviderStatusRequest {}
689#[derive(Clone, Copy, PartialEq, ::prost::Message)]
690pub struct GetProviderStatusResponse {
691    /// Timestamp when the last status was determined
692    #[prost(message, optional, tag = "1")]
693    pub timestamp: ::core::option::Option<
694        super::super::super::google::protobuf::Timestamp,
695    >,
696    /// Status of the most recent health check probe
697    #[prost(message, optional, tag = "2")]
698    pub last_status: ::core::option::Option<ProviderStatus>,
699    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
700    #[prost(message, optional, tag = "3")]
701    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
702}
703#[derive(Clone, Copy, PartialEq, ::prost::Message)]
704pub struct ProviderStatus {
705    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
706    pub status: ::core::option::Option<provider_status::Status>,
707}
708/// Nested message and enum types in `ProviderStatus`.
709pub mod provider_status {
710    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
711    pub enum Status {
712        #[prost(message, tag = "1")]
713        Healthy(super::Healthy),
714        #[prost(message, tag = "2")]
715        Degraded(super::Degraded),
716    }
717}
718#[derive(Clone, Copy, PartialEq, ::prost::Message)]
719pub struct Healthy {}
720#[derive(Clone, Copy, PartialEq, ::prost::Message)]
721pub struct Degraded {
722    #[prost(enumeration = "DegradationReason", tag = "1")]
723    pub reason: i32,
724}
725#[derive(Clone, Copy, PartialEq, ::prost::Message)]
726pub struct ProviderMetrics {
727    #[prost(int32, tag = "1")]
728    pub time_to_first_token_ms: i32,
729    #[prost(int32, tag = "2")]
730    pub total_time_ms: i32,
731}
732#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
733#[repr(i32)]
734pub enum DegradationReason {
735    Unspecified = 0,
736    HighLatency = 1,
737    Failures = 2,
738    HighLatencyAndFailures = 3,
739}
740impl DegradationReason {
741    /// String value of the enum field names used in the ProtoBuf definition.
742    ///
743    /// The values are not transformed in any way and thus are considered stable
744    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
745    pub fn as_str_name(&self) -> &'static str {
746        match self {
747            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
748            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
749            Self::Failures => "DEGRADATION_REASON_FAILURES",
750            Self::HighLatencyAndFailures => {
751                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
752            }
753        }
754    }
755    /// Creates an enum from field names used in the ProtoBuf definition.
756    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
757        match value {
758            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
759            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
760            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
761            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
762                Some(Self::HighLatencyAndFailures)
763            }
764            _ => None,
765        }
766    }
767}
768/// Generated client implementations.
769pub mod model_provider_health_service_client {
770    #![allow(
771        unused_variables,
772        dead_code,
773        missing_docs,
774        clippy::wildcard_imports,
775        clippy::let_unit_value,
776    )]
777    use tonic::codegen::*;
778    use tonic::codegen::http::Uri;
779    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
780    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
781    /// independent of the complexity of user prompts.
782    #[derive(Debug, Clone)]
783    pub struct ModelProviderHealthServiceClient<T> {
784        inner: tonic::client::Grpc<T>,
785    }
786    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
787        /// Attempt to create a new client by connecting to a given endpoint.
788        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
789        where
790            D: TryInto<tonic::transport::Endpoint>,
791            D::Error: Into<StdError>,
792        {
793            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
794            Ok(Self::new(conn))
795        }
796    }
797    impl<T> ModelProviderHealthServiceClient<T>
798    where
799        T: tonic::client::GrpcService<tonic::body::Body>,
800        T::Error: Into<StdError>,
801        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
802        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
803    {
804        pub fn new(inner: T) -> Self {
805            let inner = tonic::client::Grpc::new(inner);
806            Self { inner }
807        }
808        pub fn with_origin(inner: T, origin: Uri) -> Self {
809            let inner = tonic::client::Grpc::with_origin(inner, origin);
810            Self { inner }
811        }
812        pub fn with_interceptor<F>(
813            inner: T,
814            interceptor: F,
815        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
816        where
817            F: tonic::service::Interceptor,
818            T::ResponseBody: Default,
819            T: tonic::codegen::Service<
820                http::Request<tonic::body::Body>,
821                Response = http::Response<
822                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
823                >,
824            >,
825            <T as tonic::codegen::Service<
826                http::Request<tonic::body::Body>,
827            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
828        {
829            ModelProviderHealthServiceClient::new(
830                InterceptedService::new(inner, interceptor),
831            )
832        }
833        /// Compress requests with the given encoding.
834        ///
835        /// This requires the server to support it otherwise it might respond with an
836        /// error.
837        #[must_use]
838        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
839            self.inner = self.inner.send_compressed(encoding);
840            self
841        }
842        /// Enable decompressing responses.
843        #[must_use]
844        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
845            self.inner = self.inner.accept_compressed(encoding);
846            self
847        }
848        /// Limits the maximum size of a decoded message.
849        ///
850        /// Default: `4MB`
851        #[must_use]
852        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
853            self.inner = self.inner.max_decoding_message_size(limit);
854            self
855        }
856        /// Limits the maximum size of an encoded message.
857        ///
858        /// Default: `usize::MAX`
859        #[must_use]
860        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
861            self.inner = self.inner.max_encoding_message_size(limit);
862            self
863        }
864        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
865        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
866        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
867        pub async fn get_provider_status(
868            &mut self,
869            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
870        ) -> std::result::Result<
871            tonic::Response<super::GetProviderStatusResponse>,
872            tonic::Status,
873        > {
874            self.inner
875                .ready()
876                .await
877                .map_err(|e| {
878                    tonic::Status::unknown(
879                        format!("Service was not ready: {}", e.into()),
880                    )
881                })?;
882            let codec = tonic::codec::ProstCodec::default();
883            let path = http::uri::PathAndQuery::from_static(
884                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
885            );
886            let mut req = request.into_request();
887            req.extensions_mut()
888                .insert(
889                    GrpcMethod::new(
890                        "nominal.ai.v1.ModelProviderHealthService",
891                        "GetProviderStatus",
892                    ),
893                );
894            self.inner.unary(req, path, codec).await
895        }
896    }
897}
898#[derive(Clone, Copy, PartialEq, ::prost::Message)]
899pub struct IsAiEnabledForUserRequest {}
900#[derive(Clone, Copy, PartialEq, ::prost::Message)]
901pub struct IsAiEnabledForUserResponse {
902    #[prost(bool, tag = "1")]
903    pub is_enabled: bool,
904}
905/// Generated client implementations.
906pub mod ai_features_service_client {
907    #![allow(
908        unused_variables,
909        dead_code,
910        missing_docs,
911        clippy::wildcard_imports,
912        clippy::let_unit_value,
913    )]
914    use tonic::codegen::*;
915    use tonic::codegen::http::Uri;
916    /// AIFeaturesService provides information about enabled AI features
917    #[derive(Debug, Clone)]
918    pub struct AiFeaturesServiceClient<T> {
919        inner: tonic::client::Grpc<T>,
920    }
921    impl AiFeaturesServiceClient<tonic::transport::Channel> {
922        /// Attempt to create a new client by connecting to a given endpoint.
923        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
924        where
925            D: TryInto<tonic::transport::Endpoint>,
926            D::Error: Into<StdError>,
927        {
928            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
929            Ok(Self::new(conn))
930        }
931    }
932    impl<T> AiFeaturesServiceClient<T>
933    where
934        T: tonic::client::GrpcService<tonic::body::Body>,
935        T::Error: Into<StdError>,
936        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
937        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
938    {
939        pub fn new(inner: T) -> Self {
940            let inner = tonic::client::Grpc::new(inner);
941            Self { inner }
942        }
943        pub fn with_origin(inner: T, origin: Uri) -> Self {
944            let inner = tonic::client::Grpc::with_origin(inner, origin);
945            Self { inner }
946        }
947        pub fn with_interceptor<F>(
948            inner: T,
949            interceptor: F,
950        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
951        where
952            F: tonic::service::Interceptor,
953            T::ResponseBody: Default,
954            T: tonic::codegen::Service<
955                http::Request<tonic::body::Body>,
956                Response = http::Response<
957                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
958                >,
959            >,
960            <T as tonic::codegen::Service<
961                http::Request<tonic::body::Body>,
962            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
963        {
964            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
965        }
966        /// Compress requests with the given encoding.
967        ///
968        /// This requires the server to support it otherwise it might respond with an
969        /// error.
970        #[must_use]
971        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
972            self.inner = self.inner.send_compressed(encoding);
973            self
974        }
975        /// Enable decompressing responses.
976        #[must_use]
977        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
978            self.inner = self.inner.accept_compressed(encoding);
979            self
980        }
981        /// Limits the maximum size of a decoded message.
982        ///
983        /// Default: `4MB`
984        #[must_use]
985        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
986            self.inner = self.inner.max_decoding_message_size(limit);
987            self
988        }
989        /// Limits the maximum size of an encoded message.
990        ///
991        /// Default: `usize::MAX`
992        #[must_use]
993        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
994            self.inner = self.inner.max_encoding_message_size(limit);
995            self
996        }
997        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
998        /// Deprecated: This endpoint is no longer maintained and will be removed in a future version.
999        #[deprecated]
1000        pub async fn is_ai_enabled_for_user(
1001            &mut self,
1002            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
1003        ) -> std::result::Result<
1004            tonic::Response<super::IsAiEnabledForUserResponse>,
1005            tonic::Status,
1006        > {
1007            self.inner
1008                .ready()
1009                .await
1010                .map_err(|e| {
1011                    tonic::Status::unknown(
1012                        format!("Service was not ready: {}", e.into()),
1013                    )
1014                })?;
1015            let codec = tonic::codec::ProstCodec::default();
1016            let path = http::uri::PathAndQuery::from_static(
1017                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
1018            );
1019            let mut req = request.into_request();
1020            req.extensions_mut()
1021                .insert(
1022                    GrpcMethod::new(
1023                        "nominal.ai.v1.AIFeaturesService",
1024                        "IsAIEnabledForUser",
1025                    ),
1026                );
1027            self.inner.unary(req, path, codec).await
1028        }
1029    }
1030}
1031/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
1032/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
1033#[derive(Clone, PartialEq, ::prost::Message)]
1034pub struct CreateOrUpdateKnowledgeBaseRequest {
1035    #[prost(string, tag = "1")]
1036    pub attachment_rid: ::prost::alloc::string::String,
1037    /// summary of the knowledge base, will be used by the LLM to decide when to use it
1038    #[prost(string, tag = "2")]
1039    pub summary_description: ::prost::alloc::string::String,
1040    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
1041    pub r#type: ::core::option::Option<i32>,
1042}
1043/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
1044#[derive(Clone, PartialEq, ::prost::Message)]
1045pub struct CreateOrUpdateKnowledgeBaseResponse {
1046    #[prost(string, tag = "1")]
1047    pub knowledge_base_rid: ::prost::alloc::string::String,
1048}
1049/// KnowledgeBase represents a knowledge base entry
1050#[derive(Clone, PartialEq, ::prost::Message)]
1051pub struct KnowledgeBase {
1052    #[prost(string, tag = "1")]
1053    pub knowledge_base_rid: ::prost::alloc::string::String,
1054    #[prost(string, tag = "2")]
1055    pub attachment_rid: ::prost::alloc::string::String,
1056    #[prost(string, tag = "3")]
1057    pub workspace_rid: ::prost::alloc::string::String,
1058    #[prost(string, tag = "4")]
1059    pub summary_description: ::prost::alloc::string::String,
1060    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
1061    pub r#type: i32,
1062    #[prost(int32, tag = "6")]
1063    pub version: i32,
1064}
1065#[derive(Clone, PartialEq, ::prost::Message)]
1066pub struct ListRequest {
1067    #[prost(string, tag = "1")]
1068    pub workspace_rid: ::prost::alloc::string::String,
1069}
1070#[derive(Clone, PartialEq, ::prost::Message)]
1071pub struct ListResponse {
1072    #[prost(message, repeated, tag = "1")]
1073    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1074}
1075#[derive(Clone, PartialEq, ::prost::Message)]
1076pub struct DeleteRequest {
1077    #[prost(string, tag = "1")]
1078    pub knowledge_base_rid: ::prost::alloc::string::String,
1079}
1080#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1081pub struct DeleteResponse {
1082    #[prost(bool, tag = "1")]
1083    pub success: bool,
1084}
1085#[derive(Clone, PartialEq, ::prost::Message)]
1086pub struct GetBatchRequest {
1087    #[prost(string, repeated, tag = "1")]
1088    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1089}
1090#[derive(Clone, PartialEq, ::prost::Message)]
1091pub struct GetBatchResponse {
1092    #[prost(message, repeated, tag = "1")]
1093    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1094}
1095/// generate summary description is intentionally going to return the generated description to the frontend
1096/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1097#[derive(Clone, PartialEq, ::prost::Message)]
1098pub struct GenerateSummaryDescriptionRequest {
1099    #[prost(string, tag = "1")]
1100    pub attachment_rid: ::prost::alloc::string::String,
1101}
1102#[derive(Clone, PartialEq, ::prost::Message)]
1103pub struct GenerateSummaryDescriptionResponse {
1104    #[prost(string, tag = "1")]
1105    pub summary_description: ::prost::alloc::string::String,
1106}
1107/// KnowledgeBaseType defines the types of knowledge base
1108#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1109#[repr(i32)]
1110pub enum KnowledgeBaseType {
1111    /// defaults to PROMPT
1112    Unspecified = 0,
1113    /// knowledge base gets added directly to prompt (needs to be small enough!)
1114    Prompt = 1,
1115    /// knowledge base gets used via vector search on embeddings
1116    Embedding = 2,
1117}
1118impl KnowledgeBaseType {
1119    /// String value of the enum field names used in the ProtoBuf definition.
1120    ///
1121    /// The values are not transformed in any way and thus are considered stable
1122    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1123    pub fn as_str_name(&self) -> &'static str {
1124        match self {
1125            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1126            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1127            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1128        }
1129    }
1130    /// Creates an enum from field names used in the ProtoBuf definition.
1131    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1132        match value {
1133            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1134            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1135            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1136            _ => None,
1137        }
1138    }
1139}
1140/// Generated client implementations.
1141pub mod knowledge_base_service_client {
1142    #![allow(
1143        unused_variables,
1144        dead_code,
1145        missing_docs,
1146        clippy::wildcard_imports,
1147        clippy::let_unit_value,
1148    )]
1149    use tonic::codegen::*;
1150    use tonic::codegen::http::Uri;
1151    /// KnowledgeBaseService provides AI-powered knowledge base management
1152    #[derive(Debug, Clone)]
1153    pub struct KnowledgeBaseServiceClient<T> {
1154        inner: tonic::client::Grpc<T>,
1155    }
1156    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1157        /// Attempt to create a new client by connecting to a given endpoint.
1158        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1159        where
1160            D: TryInto<tonic::transport::Endpoint>,
1161            D::Error: Into<StdError>,
1162        {
1163            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1164            Ok(Self::new(conn))
1165        }
1166    }
1167    impl<T> KnowledgeBaseServiceClient<T>
1168    where
1169        T: tonic::client::GrpcService<tonic::body::Body>,
1170        T::Error: Into<StdError>,
1171        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1172        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1173    {
1174        pub fn new(inner: T) -> Self {
1175            let inner = tonic::client::Grpc::new(inner);
1176            Self { inner }
1177        }
1178        pub fn with_origin(inner: T, origin: Uri) -> Self {
1179            let inner = tonic::client::Grpc::with_origin(inner, origin);
1180            Self { inner }
1181        }
1182        pub fn with_interceptor<F>(
1183            inner: T,
1184            interceptor: F,
1185        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1186        where
1187            F: tonic::service::Interceptor,
1188            T::ResponseBody: Default,
1189            T: tonic::codegen::Service<
1190                http::Request<tonic::body::Body>,
1191                Response = http::Response<
1192                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1193                >,
1194            >,
1195            <T as tonic::codegen::Service<
1196                http::Request<tonic::body::Body>,
1197            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1198        {
1199            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1200        }
1201        /// Compress requests with the given encoding.
1202        ///
1203        /// This requires the server to support it otherwise it might respond with an
1204        /// error.
1205        #[must_use]
1206        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1207            self.inner = self.inner.send_compressed(encoding);
1208            self
1209        }
1210        /// Enable decompressing responses.
1211        #[must_use]
1212        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1213            self.inner = self.inner.accept_compressed(encoding);
1214            self
1215        }
1216        /// Limits the maximum size of a decoded message.
1217        ///
1218        /// Default: `4MB`
1219        #[must_use]
1220        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1221            self.inner = self.inner.max_decoding_message_size(limit);
1222            self
1223        }
1224        /// Limits the maximum size of an encoded message.
1225        ///
1226        /// Default: `usize::MAX`
1227        #[must_use]
1228        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1229            self.inner = self.inner.max_encoding_message_size(limit);
1230            self
1231        }
1232        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1233        pub async fn create_or_update_knowledge_base(
1234            &mut self,
1235            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1236        ) -> std::result::Result<
1237            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1238            tonic::Status,
1239        > {
1240            self.inner
1241                .ready()
1242                .await
1243                .map_err(|e| {
1244                    tonic::Status::unknown(
1245                        format!("Service was not ready: {}", e.into()),
1246                    )
1247                })?;
1248            let codec = tonic::codec::ProstCodec::default();
1249            let path = http::uri::PathAndQuery::from_static(
1250                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1251            );
1252            let mut req = request.into_request();
1253            req.extensions_mut()
1254                .insert(
1255                    GrpcMethod::new(
1256                        "nominal.ai.v1.KnowledgeBaseService",
1257                        "CreateOrUpdateKnowledgeBase",
1258                    ),
1259                );
1260            self.inner.unary(req, path, codec).await
1261        }
1262        /// List returns all knowledge bases in the specified workspace
1263        pub async fn list(
1264            &mut self,
1265            request: impl tonic::IntoRequest<super::ListRequest>,
1266        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1267            self.inner
1268                .ready()
1269                .await
1270                .map_err(|e| {
1271                    tonic::Status::unknown(
1272                        format!("Service was not ready: {}", e.into()),
1273                    )
1274                })?;
1275            let codec = tonic::codec::ProstCodec::default();
1276            let path = http::uri::PathAndQuery::from_static(
1277                "/nominal.ai.v1.KnowledgeBaseService/List",
1278            );
1279            let mut req = request.into_request();
1280            req.extensions_mut()
1281                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1282            self.inner.unary(req, path, codec).await
1283        }
1284        /// Delete removes a knowledge base by its RID
1285        pub async fn delete(
1286            &mut self,
1287            request: impl tonic::IntoRequest<super::DeleteRequest>,
1288        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1289            self.inner
1290                .ready()
1291                .await
1292                .map_err(|e| {
1293                    tonic::Status::unknown(
1294                        format!("Service was not ready: {}", e.into()),
1295                    )
1296                })?;
1297            let codec = tonic::codec::ProstCodec::default();
1298            let path = http::uri::PathAndQuery::from_static(
1299                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1300            );
1301            let mut req = request.into_request();
1302            req.extensions_mut()
1303                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1304            self.inner.unary(req, path, codec).await
1305        }
1306        /// GetBatch retrieves multiple knowledge bases by their RIDs
1307        pub async fn get_batch(
1308            &mut self,
1309            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1310        ) -> std::result::Result<
1311            tonic::Response<super::GetBatchResponse>,
1312            tonic::Status,
1313        > {
1314            self.inner
1315                .ready()
1316                .await
1317                .map_err(|e| {
1318                    tonic::Status::unknown(
1319                        format!("Service was not ready: {}", e.into()),
1320                    )
1321                })?;
1322            let codec = tonic::codec::ProstCodec::default();
1323            let path = http::uri::PathAndQuery::from_static(
1324                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1325            );
1326            let mut req = request.into_request();
1327            req.extensions_mut()
1328                .insert(
1329                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1330                );
1331            self.inner.unary(req, path, codec).await
1332        }
1333        /// GenerateSummaryDescription generates a summary description for an attachment rid
1334        pub async fn generate_summary_description(
1335            &mut self,
1336            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1337        ) -> std::result::Result<
1338            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1339            tonic::Status,
1340        > {
1341            self.inner
1342                .ready()
1343                .await
1344                .map_err(|e| {
1345                    tonic::Status::unknown(
1346                        format!("Service was not ready: {}", e.into()),
1347                    )
1348                })?;
1349            let codec = tonic::codec::ProstCodec::default();
1350            let path = http::uri::PathAndQuery::from_static(
1351                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1352            );
1353            let mut req = request.into_request();
1354            req.extensions_mut()
1355                .insert(
1356                    GrpcMethod::new(
1357                        "nominal.ai.v1.KnowledgeBaseService",
1358                        "GenerateSummaryDescription",
1359                    ),
1360                );
1361            self.inner.unary(req, path, codec).await
1362        }
1363    }
1364}