nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    /// The conversation ID
6    #[prost(string, tag = "1")]
7    pub conversation_rid: ::prost::alloc::string::String,
8    /// The user message to append to the conversation
9    #[prost(message, optional, tag = "2")]
10    pub message: ::core::option::Option<UserModelMessage>,
11    /// Optional: image files to provide to the agent
12    #[prost(message, repeated, tag = "3")]
13    pub images: ::prost::alloc::vec::Vec<ImagePart>,
14    /// Context-specific fields based on the oneofKind.
15    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
16    pub context: ::core::option::Option<stream_chat_request::Context>,
17}
18/// Nested message and enum types in `StreamChatRequest`.
19pub mod stream_chat_request {
20    /// Context-specific fields based on the oneofKind.
21    #[derive(Clone, PartialEq, ::prost::Oneof)]
22    pub enum Context {
23        #[prost(message, tag = "4")]
24        Workbook(super::WorkbookContext),
25        #[prost(message, tag = "5")]
26        Global(super::GlobalContext),
27    }
28}
29/// WorkbookContext contains workbook-specific context fields
30#[derive(Clone, PartialEq, ::prost::Message)]
31pub struct WorkbookContext {
32    /// RID of the workbook to use for context
33    #[prost(string, tag = "1")]
34    pub workbook_rid: ::prost::alloc::string::String,
35    /// Optional: the user's presence in the workbook
36    #[prost(message, optional, tag = "2")]
37    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
38}
39/// DefaultContext (no context)
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct GlobalContext {}
42/// WorkbookUserPresence contains the user's presence in the workbook
43/// which is used to describe what the user is viewing at the time of the message.
44#[derive(Clone, Copy, PartialEq, ::prost::Message)]
45pub struct WorkbookUserPresence {
46    #[prost(int32, optional, tag = "1")]
47    pub tab_index: ::core::option::Option<i32>,
48    #[prost(message, optional, tag = "2")]
49    pub range: ::core::option::Option<TimeRange>,
50}
51/// CreateConversation request will create a new conversation thread
52/// if old conversation id is not set, a brand new, clear chat is created
53/// If old conversation id is set without a previous message id, the full conversation thread will be copied
54/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
55/// the above case is useful for branching a conversation into a new thread
56#[derive(Clone, PartialEq, ::prost::Message)]
57pub struct CreateConversationRequest {
58    #[prost(string, tag = "1")]
59    pub title: ::prost::alloc::string::String,
60    #[prost(string, tag = "2")]
61    pub workspace_rid: ::prost::alloc::string::String,
62    #[prost(string, optional, tag = "3")]
63    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
64    #[prost(string, optional, tag = "4")]
65    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
66}
67/// CreateConversationResponse will return the conversation id for the new conversation
68#[derive(Clone, PartialEq, ::prost::Message)]
69pub struct CreateConversationResponse {
70    #[prost(string, tag = "1")]
71    pub new_conversation_rid: ::prost::alloc::string::String,
72}
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct UpdateConversationMetadataRequest {
75    #[prost(string, tag = "1")]
76    pub title: ::prost::alloc::string::String,
77    #[prost(string, tag = "2")]
78    pub conversation_rid: ::prost::alloc::string::String,
79}
80#[derive(Clone, Copy, PartialEq, ::prost::Message)]
81pub struct UpdateConversationMetadataResponse {}
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct DeleteConversationRequest {
84    #[prost(string, tag = "1")]
85    pub conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, Copy, PartialEq, ::prost::Message)]
88pub struct DeleteConversationResponse {}
89/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
90/// by provided rid. To start from a particular message - you can also provide a message id.
91#[derive(Clone, PartialEq, ::prost::Message)]
92pub struct GetConversationRequest {
93    #[prost(string, tag = "1")]
94    pub conversation_rid: ::prost::alloc::string::String,
95    #[prost(string, optional, tag = "2")]
96    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
97    #[prost(int32, optional, tag = "3")]
98    pub max_message_count: ::core::option::Option<i32>,
99}
100/// Model message with id allows you to identify the message ID of a given message
101#[derive(Clone, PartialEq, ::prost::Message)]
102pub struct ModelMessageWithId {
103    #[prost(string, tag = "3")]
104    pub message_id: ::prost::alloc::string::String,
105    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
106    pub content: ::core::option::Option<model_message_with_id::Content>,
107}
108/// Nested message and enum types in `ModelMessageWithId`.
109pub mod model_message_with_id {
110    #[derive(Clone, PartialEq, ::prost::Oneof)]
111    pub enum Content {
112        #[prost(message, tag = "1")]
113        Message(super::ModelMessage),
114        #[prost(message, tag = "2")]
115        ToolAction(super::ToolAction),
116    }
117}
118#[derive(Clone, PartialEq, ::prost::Message)]
119pub struct GetConversationResponse {
120    #[prost(message, repeated, tag = "1")]
121    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
122    #[prost(message, optional, tag = "2")]
123    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
124}
125/// Will generate all conversation threads that this user has in this workspace
126#[derive(Clone, PartialEq, ::prost::Message)]
127pub struct ListConversationsRequest {
128    #[prost(string, tag = "1")]
129    pub workspace_rid: ::prost::alloc::string::String,
130}
131#[derive(Clone, PartialEq, ::prost::Message)]
132pub struct ConversationMetadata {
133    #[prost(string, tag = "1")]
134    pub conversation_rid: ::prost::alloc::string::String,
135    #[prost(string, tag = "2")]
136    pub title: ::prost::alloc::string::String,
137    #[prost(message, optional, tag = "3")]
138    pub created_at: ::core::option::Option<
139        super::super::super::google::protobuf::Timestamp,
140    >,
141    #[prost(message, optional, tag = "4")]
142    pub last_updated_at: ::core::option::Option<
143        super::super::super::google::protobuf::Timestamp,
144    >,
145}
146/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
147/// to get a full conversation from storage. These are ordered by creation time.
148#[derive(Clone, PartialEq, ::prost::Message)]
149pub struct ListConversationsResponse {
150    #[prost(message, repeated, tag = "1")]
151    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
152}
153#[derive(Clone, Copy, PartialEq, ::prost::Message)]
154pub struct TimeRange {
155    #[prost(message, optional, tag = "1")]
156    pub range_start: ::core::option::Option<Timestamp>,
157    #[prost(message, optional, tag = "2")]
158    pub range_end: ::core::option::Option<Timestamp>,
159}
160#[derive(Clone, Copy, PartialEq, ::prost::Message)]
161pub struct Timestamp {
162    #[prost(int32, tag = "1")]
163    pub seconds: i32,
164    #[prost(int32, tag = "2")]
165    pub nanoseconds: i32,
166}
167/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
168/// Each message type has its own structure and content.
169#[derive(Clone, PartialEq, ::prost::Message)]
170pub struct ModelMessage {
171    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
172    pub kind: ::core::option::Option<model_message::Kind>,
173}
174/// Nested message and enum types in `ModelMessage`.
175pub mod model_message {
176    #[derive(Clone, PartialEq, ::prost::Oneof)]
177    pub enum Kind {
178        #[prost(message, tag = "1")]
179        User(super::UserModelMessage),
180        #[prost(message, tag = "2")]
181        Assistant(super::AssistantModelMessage),
182    }
183}
184/// A user message containing text
185#[derive(Clone, PartialEq, ::prost::Message)]
186pub struct UserModelMessage {
187    #[prost(message, repeated, tag = "1")]
188    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
189}
190/// An assistant message containing text
191#[derive(Clone, PartialEq, ::prost::Message)]
192pub struct AssistantModelMessage {
193    #[prost(message, repeated, tag = "1")]
194    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
195}
196#[derive(Clone, PartialEq, ::prost::Message)]
197pub struct UserContentPart {
198    #[prost(oneof = "user_content_part::Part", tags = "1")]
199    pub part: ::core::option::Option<user_content_part::Part>,
200}
201/// Nested message and enum types in `UserContentPart`.
202pub mod user_content_part {
203    #[derive(Clone, PartialEq, ::prost::Oneof)]
204    pub enum Part {
205        #[prost(message, tag = "1")]
206        Text(super::TextPart),
207    }
208}
209/// Content part for assistant messages: can be text, reasoning, or mutation.
210#[derive(Clone, PartialEq, ::prost::Message)]
211pub struct AssistantContentPart {
212    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
213    pub part: ::core::option::Option<assistant_content_part::Part>,
214}
215/// Nested message and enum types in `AssistantContentPart`.
216pub mod assistant_content_part {
217    #[derive(Clone, PartialEq, ::prost::Oneof)]
218    pub enum Part {
219        #[prost(message, tag = "1")]
220        Text(super::TextPart),
221        #[prost(message, tag = "2")]
222        Reasoning(super::ReasoningPart),
223    }
224}
225/// Text part for user or assistant messages.
226#[derive(Clone, PartialEq, ::prost::Message)]
227pub struct TextPart {
228    #[prost(string, tag = "1")]
229    pub text: ::prost::alloc::string::String,
230}
231/// User-supplied image part.
232#[derive(Clone, PartialEq, ::prost::Message)]
233pub struct ImagePart {
234    /// The base64-encoded image data
235    #[prost(bytes = "vec", tag = "1")]
236    pub data: ::prost::alloc::vec::Vec<u8>,
237    /// The media type of the image (e.g. "image/png", "image/jpeg")
238    #[prost(string, optional, tag = "2")]
239    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
240    /// Optional: the filename of the image
241    #[prost(string, optional, tag = "3")]
242    pub filename: ::core::option::Option<::prost::alloc::string::String>,
243}
244/// Reasoning part for assistant messages.
245#[derive(Clone, PartialEq, ::prost::Message)]
246pub struct ReasoningPart {
247    #[prost(string, tag = "1")]
248    pub reasoning: ::prost::alloc::string::String,
249}
250/// StreamChatResponse is a discriminated union response to a StreamChatRequest
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct StreamChatResponse {
253    #[prost(
254        oneof = "stream_chat_response::Response",
255        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
256    )]
257    pub response: ::core::option::Option<stream_chat_response::Response>,
258}
259/// Nested message and enum types in `StreamChatResponse`.
260pub mod stream_chat_response {
261    #[derive(Clone, PartialEq, ::prost::Oneof)]
262    pub enum Response {
263        #[prost(message, tag = "1")]
264        Finish(super::Finish),
265        #[prost(message, tag = "2")]
266        Error(super::Error),
267        #[prost(message, tag = "3")]
268        TextStart(super::TextStart),
269        #[prost(message, tag = "4")]
270        TextDelta(super::TextDelta),
271        #[prost(message, tag = "5")]
272        TextEnd(super::TextEnd),
273        #[prost(message, tag = "6")]
274        ReasoningStart(super::ReasoningStart),
275        #[prost(message, tag = "7")]
276        ReasoningDelta(super::ReasoningDelta),
277        #[prost(message, tag = "8")]
278        ReasoningEnd(super::ReasoningEnd),
279        /// this will be deprecated in favor of MCP-based mutations
280        #[prost(message, tag = "9")]
281        WorkbookMutation(super::WorkbookMutation),
282        #[prost(message, tag = "10")]
283        ToolAction(super::ToolAction),
284    }
285}
286/// Indicates the end of a chat session
287#[derive(Clone, PartialEq, ::prost::Message)]
288pub struct Finish {
289    /// The message ids in order of all generated messages for this agent run
290    /// These ids can be used to branch a message from that specific message
291    #[prost(string, repeated, tag = "1")]
292    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
293    /// In the case that this is the first agent run in a conversation thread, we also
294    /// return the new conversation title generated
295    #[prost(string, optional, tag = "2")]
296    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
297}
298/// An error that occurred during the chat session
299#[derive(Clone, PartialEq, ::prost::Message)]
300pub struct Error {
301    #[prost(string, tag = "1")]
302    pub message: ::prost::alloc::string::String,
303}
304/// Indicates the start of a text message from the agent
305#[derive(Clone, PartialEq, ::prost::Message)]
306pub struct TextStart {
307    /// uniquely identifies the text message (e.g. uuid) so that the client can
308    /// merge parallel message streams (if it happens).
309    #[prost(string, tag = "1")]
310    pub id: ::prost::alloc::string::String,
311}
312/// A delta (continuation) of a text message from the agent
313#[derive(Clone, PartialEq, ::prost::Message)]
314pub struct TextDelta {
315    #[prost(string, tag = "1")]
316    pub id: ::prost::alloc::string::String,
317    /// The next chunk of text
318    #[prost(string, tag = "2")]
319    pub delta: ::prost::alloc::string::String,
320}
321/// Indicates the end of a text message from the agent
322#[derive(Clone, PartialEq, ::prost::Message)]
323pub struct TextEnd {
324    #[prost(string, tag = "1")]
325    pub id: ::prost::alloc::string::String,
326}
327/// Indicates the start of a reasoning message from the agent
328#[derive(Clone, PartialEq, ::prost::Message)]
329pub struct ReasoningStart {
330    #[prost(string, tag = "1")]
331    pub id: ::prost::alloc::string::String,
332}
333/// A delta (continuation) of a reasoning message from the agent
334#[derive(Clone, PartialEq, ::prost::Message)]
335pub struct ReasoningDelta {
336    #[prost(string, tag = "1")]
337    pub id: ::prost::alloc::string::String,
338    /// The next chunk of reasoning
339    #[prost(string, tag = "2")]
340    pub delta: ::prost::alloc::string::String,
341}
342/// Indicates the end of a reasoning message from the agent
343#[derive(Clone, PartialEq, ::prost::Message)]
344pub struct ReasoningEnd {
345    #[prost(string, tag = "1")]
346    pub id: ::prost::alloc::string::String,
347}
348/// Add a new tab to the workbook
349#[derive(Clone, PartialEq, ::prost::Message)]
350pub struct AddTabMutation {
351    /// if tab_name is not provided, we'll name it "New Tab"
352    #[prost(string, optional, tag = "1")]
353    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
354}
355/// this is the "result" of the mutation
356#[derive(Clone, PartialEq, ::prost::Message)]
357pub struct AddOrUpdatePanelMutation {
358    /// JSON-serialized representation of IVizDefinition
359    #[prost(string, tag = "1")]
360    pub panel_as_json: ::prost::alloc::string::String,
361    #[prost(string, tag = "2")]
362    pub panel_id: ::prost::alloc::string::String,
363    #[prost(int32, tag = "3")]
364    pub tab_index: i32,
365}
366#[derive(Clone, PartialEq, ::prost::Message)]
367pub struct RemovePanelsMutation {
368    #[prost(string, repeated, tag = "1")]
369    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
370}
371/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
372#[derive(Clone, PartialEq, ::prost::Message)]
373pub struct AddOrReplaceVariableMutation {
374    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
375    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
376    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
377    #[prost(string, tag = "1")]
378    pub compute_spec_as_json: ::prost::alloc::string::String,
379    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
380    #[prost(string, optional, tag = "2")]
381    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
382    #[prost(string, optional, tag = "3")]
383    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
384}
385/// DeleteVariablesMutation is a mutation to delete variables from the workbook
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct DeleteVariablesMutation {
388    #[prost(string, repeated, tag = "1")]
389    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
390}
391/// WorkbookMutation is a mutation to the workbook
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct WorkbookMutation {
394    #[prost(string, tag = "1")]
395    pub id: ::prost::alloc::string::String,
396    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
397    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
398}
399/// Nested message and enum types in `WorkbookMutation`.
400pub mod workbook_mutation {
401    #[derive(Clone, PartialEq, ::prost::Oneof)]
402    pub enum Mutation {
403        #[prost(message, tag = "2")]
404        AddTab(super::AddTabMutation),
405        #[prost(message, tag = "3")]
406        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
407        #[prost(message, tag = "4")]
408        RemovePanels(super::RemovePanelsMutation),
409        #[prost(message, tag = "5")]
410        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
411        #[prost(message, tag = "6")]
412        DeleteVariables(super::DeleteVariablesMutation),
413    }
414}
415/// this is a concise description of a tool call that the agent is making internally
416/// without revealing too much detail about the tool call, it informs the user what the agent is doing
417/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
418/// "Search channels for My Datasource"
419#[derive(Clone, PartialEq, ::prost::Message)]
420pub struct ToolAction {
421    #[prost(string, tag = "1")]
422    pub id: ::prost::alloc::string::String,
423    /// "Thought", "Read", "Find", "Look-up", etc.
424    #[prost(string, tag = "2")]
425    pub tool_action_verb: ::prost::alloc::string::String,
426    /// "workbook", "channel", "variable", "panel", etc.
427    #[prost(string, optional, tag = "3")]
428    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
429}
430/// Generated client implementations.
431pub mod ai_agent_service_client {
432    #![allow(
433        unused_variables,
434        dead_code,
435        missing_docs,
436        clippy::wildcard_imports,
437        clippy::let_unit_value,
438    )]
439    use tonic::codegen::*;
440    use tonic::codegen::http::Uri;
441    /// AIAgentService provides AI-powered assistance for general operations
442    #[derive(Debug, Clone)]
443    pub struct AiAgentServiceClient<T> {
444        inner: tonic::client::Grpc<T>,
445    }
446    impl AiAgentServiceClient<tonic::transport::Channel> {
447        /// Attempt to create a new client by connecting to a given endpoint.
448        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
449        where
450            D: TryInto<tonic::transport::Endpoint>,
451            D::Error: Into<StdError>,
452        {
453            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
454            Ok(Self::new(conn))
455        }
456    }
457    impl<T> AiAgentServiceClient<T>
458    where
459        T: tonic::client::GrpcService<tonic::body::Body>,
460        T::Error: Into<StdError>,
461        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
462        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
463    {
464        pub fn new(inner: T) -> Self {
465            let inner = tonic::client::Grpc::new(inner);
466            Self { inner }
467        }
468        pub fn with_origin(inner: T, origin: Uri) -> Self {
469            let inner = tonic::client::Grpc::with_origin(inner, origin);
470            Self { inner }
471        }
472        pub fn with_interceptor<F>(
473            inner: T,
474            interceptor: F,
475        ) -> AiAgentServiceClient<InterceptedService<T, F>>
476        where
477            F: tonic::service::Interceptor,
478            T::ResponseBody: Default,
479            T: tonic::codegen::Service<
480                http::Request<tonic::body::Body>,
481                Response = http::Response<
482                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
483                >,
484            >,
485            <T as tonic::codegen::Service<
486                http::Request<tonic::body::Body>,
487            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
488        {
489            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
490        }
491        /// Compress requests with the given encoding.
492        ///
493        /// This requires the server to support it otherwise it might respond with an
494        /// error.
495        #[must_use]
496        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
497            self.inner = self.inner.send_compressed(encoding);
498            self
499        }
500        /// Enable decompressing responses.
501        #[must_use]
502        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
503            self.inner = self.inner.accept_compressed(encoding);
504            self
505        }
506        /// Limits the maximum size of a decoded message.
507        ///
508        /// Default: `4MB`
509        #[must_use]
510        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
511            self.inner = self.inner.max_decoding_message_size(limit);
512            self
513        }
514        /// Limits the maximum size of an encoded message.
515        ///
516        /// Default: `usize::MAX`
517        #[must_use]
518        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
519            self.inner = self.inner.max_encoding_message_size(limit);
520            self
521        }
522        /// StreamChat handles bidirectional streaming chat for AI agent
523        pub async fn stream_chat(
524            &mut self,
525            request: impl tonic::IntoRequest<super::StreamChatRequest>,
526        ) -> std::result::Result<
527            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
528            tonic::Status,
529        > {
530            self.inner
531                .ready()
532                .await
533                .map_err(|e| {
534                    tonic::Status::unknown(
535                        format!("Service was not ready: {}", e.into()),
536                    )
537                })?;
538            let codec = tonic::codec::ProstCodec::default();
539            let path = http::uri::PathAndQuery::from_static(
540                "/nominal.ai.v1.AIAgentService/StreamChat",
541            );
542            let mut req = request.into_request();
543            req.extensions_mut()
544                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
545            self.inner.server_streaming(req, path, codec).await
546        }
547        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
548        pub async fn get_conversation(
549            &mut self,
550            request: impl tonic::IntoRequest<super::GetConversationRequest>,
551        ) -> std::result::Result<
552            tonic::Response<super::GetConversationResponse>,
553            tonic::Status,
554        > {
555            self.inner
556                .ready()
557                .await
558                .map_err(|e| {
559                    tonic::Status::unknown(
560                        format!("Service was not ready: {}", e.into()),
561                    )
562                })?;
563            let codec = tonic::codec::ProstCodec::default();
564            let path = http::uri::PathAndQuery::from_static(
565                "/nominal.ai.v1.AIAgentService/GetConversation",
566            );
567            let mut req = request.into_request();
568            req.extensions_mut()
569                .insert(
570                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
571                );
572            self.inner.unary(req, path, codec).await
573        }
574        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
575        pub async fn list_conversations(
576            &mut self,
577            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
578        ) -> std::result::Result<
579            tonic::Response<super::ListConversationsResponse>,
580            tonic::Status,
581        > {
582            self.inner
583                .ready()
584                .await
585                .map_err(|e| {
586                    tonic::Status::unknown(
587                        format!("Service was not ready: {}", e.into()),
588                    )
589                })?;
590            let codec = tonic::codec::ProstCodec::default();
591            let path = http::uri::PathAndQuery::from_static(
592                "/nominal.ai.v1.AIAgentService/ListConversations",
593            );
594            let mut req = request.into_request();
595            req.extensions_mut()
596                .insert(
597                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
598                );
599            self.inner.unary(req, path, codec).await
600        }
601        /// CreateConversation handles creating a conversation and assigning it a conversation rid
602        pub async fn create_conversation(
603            &mut self,
604            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
605        ) -> std::result::Result<
606            tonic::Response<super::CreateConversationResponse>,
607            tonic::Status,
608        > {
609            self.inner
610                .ready()
611                .await
612                .map_err(|e| {
613                    tonic::Status::unknown(
614                        format!("Service was not ready: {}", e.into()),
615                    )
616                })?;
617            let codec = tonic::codec::ProstCodec::default();
618            let path = http::uri::PathAndQuery::from_static(
619                "/nominal.ai.v1.AIAgentService/CreateConversation",
620            );
621            let mut req = request.into_request();
622            req.extensions_mut()
623                .insert(
624                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
625                );
626            self.inner.unary(req, path, codec).await
627        }
628        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
629        pub async fn update_conversation_metadata(
630            &mut self,
631            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
632        ) -> std::result::Result<
633            tonic::Response<super::UpdateConversationMetadataResponse>,
634            tonic::Status,
635        > {
636            self.inner
637                .ready()
638                .await
639                .map_err(|e| {
640                    tonic::Status::unknown(
641                        format!("Service was not ready: {}", e.into()),
642                    )
643                })?;
644            let codec = tonic::codec::ProstCodec::default();
645            let path = http::uri::PathAndQuery::from_static(
646                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
647            );
648            let mut req = request.into_request();
649            req.extensions_mut()
650                .insert(
651                    GrpcMethod::new(
652                        "nominal.ai.v1.AIAgentService",
653                        "UpdateConversationMetadata",
654                    ),
655                );
656            self.inner.unary(req, path, codec).await
657        }
658        /// DeleteConversation handles deleting a specific conversation by conversation rid
659        pub async fn delete_conversation(
660            &mut self,
661            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
662        ) -> std::result::Result<
663            tonic::Response<super::DeleteConversationResponse>,
664            tonic::Status,
665        > {
666            self.inner
667                .ready()
668                .await
669                .map_err(|e| {
670                    tonic::Status::unknown(
671                        format!("Service was not ready: {}", e.into()),
672                    )
673                })?;
674            let codec = tonic::codec::ProstCodec::default();
675            let path = http::uri::PathAndQuery::from_static(
676                "/nominal.ai.v1.AIAgentService/DeleteConversation",
677            );
678            let mut req = request.into_request();
679            req.extensions_mut()
680                .insert(
681                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
682                );
683            self.inner.unary(req, path, codec).await
684        }
685    }
686}
687#[derive(Clone, Copy, PartialEq, ::prost::Message)]
688pub struct GetProviderStatusRequest {}
689#[derive(Clone, Copy, PartialEq, ::prost::Message)]
690pub struct GetProviderStatusResponse {
691    /// Timestamp when the last status was determined
692    #[prost(message, optional, tag = "1")]
693    pub timestamp: ::core::option::Option<
694        super::super::super::google::protobuf::Timestamp,
695    >,
696    /// Status of the most recent health check probe
697    #[prost(message, optional, tag = "2")]
698    pub last_status: ::core::option::Option<ProviderStatus>,
699    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
700    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
701    #[deprecated]
702    #[prost(message, optional, tag = "3")]
703    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
704    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
705    #[prost(message, optional, tag = "4")]
706    pub aggregated_status: ::core::option::Option<ProviderStatus>,
707}
708#[derive(Clone, Copy, PartialEq, ::prost::Message)]
709pub struct ProviderStatus {
710    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
711    pub status: ::core::option::Option<provider_status::Status>,
712}
713/// Nested message and enum types in `ProviderStatus`.
714pub mod provider_status {
715    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
716    pub enum Status {
717        #[prost(message, tag = "1")]
718        Healthy(super::Healthy),
719        #[prost(message, tag = "2")]
720        Degraded(super::Degraded),
721    }
722}
723#[derive(Clone, Copy, PartialEq, ::prost::Message)]
724pub struct Healthy {}
725#[derive(Clone, Copy, PartialEq, ::prost::Message)]
726pub struct Degraded {
727    #[prost(enumeration = "DegradationReason", tag = "1")]
728    pub reason: i32,
729}
730#[derive(Clone, Copy, PartialEq, ::prost::Message)]
731pub struct ProviderMetrics {
732    #[prost(int32, tag = "1")]
733    pub time_to_first_token_ms: i32,
734    #[prost(int32, tag = "2")]
735    pub total_time_ms: i32,
736}
737#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
738#[repr(i32)]
739pub enum DegradationReason {
740    Unspecified = 0,
741    HighLatency = 1,
742    Failures = 2,
743    HighLatencyAndFailures = 3,
744}
745impl DegradationReason {
746    /// String value of the enum field names used in the ProtoBuf definition.
747    ///
748    /// The values are not transformed in any way and thus are considered stable
749    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
750    pub fn as_str_name(&self) -> &'static str {
751        match self {
752            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
753            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
754            Self::Failures => "DEGRADATION_REASON_FAILURES",
755            Self::HighLatencyAndFailures => {
756                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
757            }
758        }
759    }
760    /// Creates an enum from field names used in the ProtoBuf definition.
761    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
762        match value {
763            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
764            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
765            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
766            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
767                Some(Self::HighLatencyAndFailures)
768            }
769            _ => None,
770        }
771    }
772}
773/// Generated client implementations.
774pub mod model_provider_health_service_client {
775    #![allow(
776        unused_variables,
777        dead_code,
778        missing_docs,
779        clippy::wildcard_imports,
780        clippy::let_unit_value,
781    )]
782    use tonic::codegen::*;
783    use tonic::codegen::http::Uri;
784    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
785    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
786    /// independent of the complexity of user prompts.
787    #[derive(Debug, Clone)]
788    pub struct ModelProviderHealthServiceClient<T> {
789        inner: tonic::client::Grpc<T>,
790    }
791    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
792        /// Attempt to create a new client by connecting to a given endpoint.
793        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
794        where
795            D: TryInto<tonic::transport::Endpoint>,
796            D::Error: Into<StdError>,
797        {
798            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
799            Ok(Self::new(conn))
800        }
801    }
802    impl<T> ModelProviderHealthServiceClient<T>
803    where
804        T: tonic::client::GrpcService<tonic::body::Body>,
805        T::Error: Into<StdError>,
806        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
807        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
808    {
809        pub fn new(inner: T) -> Self {
810            let inner = tonic::client::Grpc::new(inner);
811            Self { inner }
812        }
813        pub fn with_origin(inner: T, origin: Uri) -> Self {
814            let inner = tonic::client::Grpc::with_origin(inner, origin);
815            Self { inner }
816        }
817        pub fn with_interceptor<F>(
818            inner: T,
819            interceptor: F,
820        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
821        where
822            F: tonic::service::Interceptor,
823            T::ResponseBody: Default,
824            T: tonic::codegen::Service<
825                http::Request<tonic::body::Body>,
826                Response = http::Response<
827                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
828                >,
829            >,
830            <T as tonic::codegen::Service<
831                http::Request<tonic::body::Body>,
832            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
833        {
834            ModelProviderHealthServiceClient::new(
835                InterceptedService::new(inner, interceptor),
836            )
837        }
838        /// Compress requests with the given encoding.
839        ///
840        /// This requires the server to support it otherwise it might respond with an
841        /// error.
842        #[must_use]
843        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
844            self.inner = self.inner.send_compressed(encoding);
845            self
846        }
847        /// Enable decompressing responses.
848        #[must_use]
849        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
850            self.inner = self.inner.accept_compressed(encoding);
851            self
852        }
853        /// Limits the maximum size of a decoded message.
854        ///
855        /// Default: `4MB`
856        #[must_use]
857        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
858            self.inner = self.inner.max_decoding_message_size(limit);
859            self
860        }
861        /// Limits the maximum size of an encoded message.
862        ///
863        /// Default: `usize::MAX`
864        #[must_use]
865        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
866            self.inner = self.inner.max_encoding_message_size(limit);
867            self
868        }
869        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
870        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
871        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
872        pub async fn get_provider_status(
873            &mut self,
874            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
875        ) -> std::result::Result<
876            tonic::Response<super::GetProviderStatusResponse>,
877            tonic::Status,
878        > {
879            self.inner
880                .ready()
881                .await
882                .map_err(|e| {
883                    tonic::Status::unknown(
884                        format!("Service was not ready: {}", e.into()),
885                    )
886                })?;
887            let codec = tonic::codec::ProstCodec::default();
888            let path = http::uri::PathAndQuery::from_static(
889                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
890            );
891            let mut req = request.into_request();
892            req.extensions_mut()
893                .insert(
894                    GrpcMethod::new(
895                        "nominal.ai.v1.ModelProviderHealthService",
896                        "GetProviderStatus",
897                    ),
898                );
899            self.inner.unary(req, path, codec).await
900        }
901    }
902}
903#[derive(Clone, Copy, PartialEq, ::prost::Message)]
904pub struct IsAiEnabledForUserRequest {}
905#[derive(Clone, Copy, PartialEq, ::prost::Message)]
906pub struct IsAiEnabledForUserResponse {
907    #[prost(bool, tag = "1")]
908    pub is_enabled: bool,
909}
910/// Generated client implementations.
911pub mod ai_features_service_client {
912    #![allow(
913        unused_variables,
914        dead_code,
915        missing_docs,
916        clippy::wildcard_imports,
917        clippy::let_unit_value,
918    )]
919    use tonic::codegen::*;
920    use tonic::codegen::http::Uri;
921    /// AIFeaturesService provides information about enabled AI features
922    #[derive(Debug, Clone)]
923    pub struct AiFeaturesServiceClient<T> {
924        inner: tonic::client::Grpc<T>,
925    }
926    impl AiFeaturesServiceClient<tonic::transport::Channel> {
927        /// Attempt to create a new client by connecting to a given endpoint.
928        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
929        where
930            D: TryInto<tonic::transport::Endpoint>,
931            D::Error: Into<StdError>,
932        {
933            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
934            Ok(Self::new(conn))
935        }
936    }
937    impl<T> AiFeaturesServiceClient<T>
938    where
939        T: tonic::client::GrpcService<tonic::body::Body>,
940        T::Error: Into<StdError>,
941        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
942        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
943    {
944        pub fn new(inner: T) -> Self {
945            let inner = tonic::client::Grpc::new(inner);
946            Self { inner }
947        }
948        pub fn with_origin(inner: T, origin: Uri) -> Self {
949            let inner = tonic::client::Grpc::with_origin(inner, origin);
950            Self { inner }
951        }
952        pub fn with_interceptor<F>(
953            inner: T,
954            interceptor: F,
955        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
956        where
957            F: tonic::service::Interceptor,
958            T::ResponseBody: Default,
959            T: tonic::codegen::Service<
960                http::Request<tonic::body::Body>,
961                Response = http::Response<
962                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
963                >,
964            >,
965            <T as tonic::codegen::Service<
966                http::Request<tonic::body::Body>,
967            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
968        {
969            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
970        }
971        /// Compress requests with the given encoding.
972        ///
973        /// This requires the server to support it otherwise it might respond with an
974        /// error.
975        #[must_use]
976        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
977            self.inner = self.inner.send_compressed(encoding);
978            self
979        }
980        /// Enable decompressing responses.
981        #[must_use]
982        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
983            self.inner = self.inner.accept_compressed(encoding);
984            self
985        }
986        /// Limits the maximum size of a decoded message.
987        ///
988        /// Default: `4MB`
989        #[must_use]
990        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
991            self.inner = self.inner.max_decoding_message_size(limit);
992            self
993        }
994        /// Limits the maximum size of an encoded message.
995        ///
996        /// Default: `usize::MAX`
997        #[must_use]
998        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
999            self.inner = self.inner.max_encoding_message_size(limit);
1000            self
1001        }
1002        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
1003        /// Deprecated: This endpoint is no longer maintained and will be removed in a future version.
1004        #[deprecated]
1005        pub async fn is_ai_enabled_for_user(
1006            &mut self,
1007            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
1008        ) -> std::result::Result<
1009            tonic::Response<super::IsAiEnabledForUserResponse>,
1010            tonic::Status,
1011        > {
1012            self.inner
1013                .ready()
1014                .await
1015                .map_err(|e| {
1016                    tonic::Status::unknown(
1017                        format!("Service was not ready: {}", e.into()),
1018                    )
1019                })?;
1020            let codec = tonic::codec::ProstCodec::default();
1021            let path = http::uri::PathAndQuery::from_static(
1022                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
1023            );
1024            let mut req = request.into_request();
1025            req.extensions_mut()
1026                .insert(
1027                    GrpcMethod::new(
1028                        "nominal.ai.v1.AIFeaturesService",
1029                        "IsAIEnabledForUser",
1030                    ),
1031                );
1032            self.inner.unary(req, path, codec).await
1033        }
1034    }
1035}
1036/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
1037/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
1038#[derive(Clone, PartialEq, ::prost::Message)]
1039pub struct CreateOrUpdateKnowledgeBaseRequest {
1040    #[prost(string, tag = "1")]
1041    pub attachment_rid: ::prost::alloc::string::String,
1042    /// summary of the knowledge base, will be used by the LLM to decide when to use it
1043    #[prost(string, tag = "2")]
1044    pub summary_description: ::prost::alloc::string::String,
1045    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
1046    pub r#type: ::core::option::Option<i32>,
1047}
1048/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
1049#[derive(Clone, PartialEq, ::prost::Message)]
1050pub struct CreateOrUpdateKnowledgeBaseResponse {
1051    #[prost(string, tag = "1")]
1052    pub knowledge_base_rid: ::prost::alloc::string::String,
1053}
1054/// KnowledgeBase represents a knowledge base entry
1055#[derive(Clone, PartialEq, ::prost::Message)]
1056pub struct KnowledgeBase {
1057    #[prost(string, tag = "1")]
1058    pub knowledge_base_rid: ::prost::alloc::string::String,
1059    #[prost(string, tag = "2")]
1060    pub attachment_rid: ::prost::alloc::string::String,
1061    #[prost(string, tag = "3")]
1062    pub workspace_rid: ::prost::alloc::string::String,
1063    #[prost(string, tag = "4")]
1064    pub summary_description: ::prost::alloc::string::String,
1065    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
1066    pub r#type: i32,
1067    #[prost(int32, tag = "6")]
1068    pub version: i32,
1069}
1070#[derive(Clone, PartialEq, ::prost::Message)]
1071pub struct ListRequest {
1072    #[prost(string, tag = "1")]
1073    pub workspace_rid: ::prost::alloc::string::String,
1074}
1075#[derive(Clone, PartialEq, ::prost::Message)]
1076pub struct ListResponse {
1077    #[prost(message, repeated, tag = "1")]
1078    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1079}
1080#[derive(Clone, PartialEq, ::prost::Message)]
1081pub struct DeleteRequest {
1082    #[prost(string, tag = "1")]
1083    pub knowledge_base_rid: ::prost::alloc::string::String,
1084}
1085#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1086pub struct DeleteResponse {
1087    #[prost(bool, tag = "1")]
1088    pub success: bool,
1089}
1090#[derive(Clone, PartialEq, ::prost::Message)]
1091pub struct GetBatchRequest {
1092    #[prost(string, repeated, tag = "1")]
1093    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1094}
1095#[derive(Clone, PartialEq, ::prost::Message)]
1096pub struct GetBatchResponse {
1097    #[prost(message, repeated, tag = "1")]
1098    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1099}
1100/// generate summary description is intentionally going to return the generated description to the frontend
1101/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1102#[derive(Clone, PartialEq, ::prost::Message)]
1103pub struct GenerateSummaryDescriptionRequest {
1104    #[prost(string, tag = "1")]
1105    pub attachment_rid: ::prost::alloc::string::String,
1106}
1107#[derive(Clone, PartialEq, ::prost::Message)]
1108pub struct GenerateSummaryDescriptionResponse {
1109    #[prost(string, tag = "1")]
1110    pub summary_description: ::prost::alloc::string::String,
1111}
1112/// KnowledgeBaseType defines the types of knowledge base
1113#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1114#[repr(i32)]
1115pub enum KnowledgeBaseType {
1116    /// defaults to PROMPT
1117    Unspecified = 0,
1118    /// knowledge base gets added directly to prompt (needs to be small enough!)
1119    Prompt = 1,
1120    /// knowledge base gets used via vector search on embeddings
1121    Embedding = 2,
1122}
1123impl KnowledgeBaseType {
1124    /// String value of the enum field names used in the ProtoBuf definition.
1125    ///
1126    /// The values are not transformed in any way and thus are considered stable
1127    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1128    pub fn as_str_name(&self) -> &'static str {
1129        match self {
1130            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1131            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1132            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1133        }
1134    }
1135    /// Creates an enum from field names used in the ProtoBuf definition.
1136    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1137        match value {
1138            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1139            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1140            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1141            _ => None,
1142        }
1143    }
1144}
1145/// Generated client implementations.
1146pub mod knowledge_base_service_client {
1147    #![allow(
1148        unused_variables,
1149        dead_code,
1150        missing_docs,
1151        clippy::wildcard_imports,
1152        clippy::let_unit_value,
1153    )]
1154    use tonic::codegen::*;
1155    use tonic::codegen::http::Uri;
1156    /// KnowledgeBaseService provides AI-powered knowledge base management
1157    #[derive(Debug, Clone)]
1158    pub struct KnowledgeBaseServiceClient<T> {
1159        inner: tonic::client::Grpc<T>,
1160    }
1161    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1162        /// Attempt to create a new client by connecting to a given endpoint.
1163        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1164        where
1165            D: TryInto<tonic::transport::Endpoint>,
1166            D::Error: Into<StdError>,
1167        {
1168            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1169            Ok(Self::new(conn))
1170        }
1171    }
1172    impl<T> KnowledgeBaseServiceClient<T>
1173    where
1174        T: tonic::client::GrpcService<tonic::body::Body>,
1175        T::Error: Into<StdError>,
1176        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1177        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1178    {
1179        pub fn new(inner: T) -> Self {
1180            let inner = tonic::client::Grpc::new(inner);
1181            Self { inner }
1182        }
1183        pub fn with_origin(inner: T, origin: Uri) -> Self {
1184            let inner = tonic::client::Grpc::with_origin(inner, origin);
1185            Self { inner }
1186        }
1187        pub fn with_interceptor<F>(
1188            inner: T,
1189            interceptor: F,
1190        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1191        where
1192            F: tonic::service::Interceptor,
1193            T::ResponseBody: Default,
1194            T: tonic::codegen::Service<
1195                http::Request<tonic::body::Body>,
1196                Response = http::Response<
1197                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1198                >,
1199            >,
1200            <T as tonic::codegen::Service<
1201                http::Request<tonic::body::Body>,
1202            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1203        {
1204            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1205        }
1206        /// Compress requests with the given encoding.
1207        ///
1208        /// This requires the server to support it otherwise it might respond with an
1209        /// error.
1210        #[must_use]
1211        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1212            self.inner = self.inner.send_compressed(encoding);
1213            self
1214        }
1215        /// Enable decompressing responses.
1216        #[must_use]
1217        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1218            self.inner = self.inner.accept_compressed(encoding);
1219            self
1220        }
1221        /// Limits the maximum size of a decoded message.
1222        ///
1223        /// Default: `4MB`
1224        #[must_use]
1225        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1226            self.inner = self.inner.max_decoding_message_size(limit);
1227            self
1228        }
1229        /// Limits the maximum size of an encoded message.
1230        ///
1231        /// Default: `usize::MAX`
1232        #[must_use]
1233        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1234            self.inner = self.inner.max_encoding_message_size(limit);
1235            self
1236        }
1237        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1238        pub async fn create_or_update_knowledge_base(
1239            &mut self,
1240            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1241        ) -> std::result::Result<
1242            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1243            tonic::Status,
1244        > {
1245            self.inner
1246                .ready()
1247                .await
1248                .map_err(|e| {
1249                    tonic::Status::unknown(
1250                        format!("Service was not ready: {}", e.into()),
1251                    )
1252                })?;
1253            let codec = tonic::codec::ProstCodec::default();
1254            let path = http::uri::PathAndQuery::from_static(
1255                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1256            );
1257            let mut req = request.into_request();
1258            req.extensions_mut()
1259                .insert(
1260                    GrpcMethod::new(
1261                        "nominal.ai.v1.KnowledgeBaseService",
1262                        "CreateOrUpdateKnowledgeBase",
1263                    ),
1264                );
1265            self.inner.unary(req, path, codec).await
1266        }
1267        /// List returns all knowledge bases in the specified workspace
1268        pub async fn list(
1269            &mut self,
1270            request: impl tonic::IntoRequest<super::ListRequest>,
1271        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1272            self.inner
1273                .ready()
1274                .await
1275                .map_err(|e| {
1276                    tonic::Status::unknown(
1277                        format!("Service was not ready: {}", e.into()),
1278                    )
1279                })?;
1280            let codec = tonic::codec::ProstCodec::default();
1281            let path = http::uri::PathAndQuery::from_static(
1282                "/nominal.ai.v1.KnowledgeBaseService/List",
1283            );
1284            let mut req = request.into_request();
1285            req.extensions_mut()
1286                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1287            self.inner.unary(req, path, codec).await
1288        }
1289        /// Delete removes a knowledge base by its RID
1290        pub async fn delete(
1291            &mut self,
1292            request: impl tonic::IntoRequest<super::DeleteRequest>,
1293        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1294            self.inner
1295                .ready()
1296                .await
1297                .map_err(|e| {
1298                    tonic::Status::unknown(
1299                        format!("Service was not ready: {}", e.into()),
1300                    )
1301                })?;
1302            let codec = tonic::codec::ProstCodec::default();
1303            let path = http::uri::PathAndQuery::from_static(
1304                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1305            );
1306            let mut req = request.into_request();
1307            req.extensions_mut()
1308                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1309            self.inner.unary(req, path, codec).await
1310        }
1311        /// GetBatch retrieves multiple knowledge bases by their RIDs
1312        pub async fn get_batch(
1313            &mut self,
1314            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1315        ) -> std::result::Result<
1316            tonic::Response<super::GetBatchResponse>,
1317            tonic::Status,
1318        > {
1319            self.inner
1320                .ready()
1321                .await
1322                .map_err(|e| {
1323                    tonic::Status::unknown(
1324                        format!("Service was not ready: {}", e.into()),
1325                    )
1326                })?;
1327            let codec = tonic::codec::ProstCodec::default();
1328            let path = http::uri::PathAndQuery::from_static(
1329                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1330            );
1331            let mut req = request.into_request();
1332            req.extensions_mut()
1333                .insert(
1334                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1335                );
1336            self.inner.unary(req, path, codec).await
1337        }
1338        /// GenerateSummaryDescription generates a summary description for an attachment rid
1339        pub async fn generate_summary_description(
1340            &mut self,
1341            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1342        ) -> std::result::Result<
1343            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1344            tonic::Status,
1345        > {
1346            self.inner
1347                .ready()
1348                .await
1349                .map_err(|e| {
1350                    tonic::Status::unknown(
1351                        format!("Service was not ready: {}", e.into()),
1352                    )
1353                })?;
1354            let codec = tonic::codec::ProstCodec::default();
1355            let path = http::uri::PathAndQuery::from_static(
1356                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1357            );
1358            let mut req = request.into_request();
1359            req.extensions_mut()
1360                .insert(
1361                    GrpcMethod::new(
1362                        "nominal.ai.v1.KnowledgeBaseService",
1363                        "GenerateSummaryDescription",
1364                    ),
1365                );
1366            self.inner.unary(req, path, codec).await
1367        }
1368    }
1369}