nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct GetSnapshotRidByUserMessageIdRequest {
4    #[prost(string, tag = "1")]
5    pub conversation_rid: ::prost::alloc::string::String,
6    #[prost(string, tag = "2")]
7    pub message_id: ::prost::alloc::string::String,
8}
9/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
10/// This occurs in the instance where a message was sent in a non-workbook context
11#[derive(Clone, PartialEq, ::prost::Message)]
12pub struct GetSnapshotRidByUserMessageIdResponse {
13    #[prost(string, optional, tag = "1")]
14    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
15}
16/// StreamChatRequest is a request to stream chat messages for AI agent
17#[derive(Clone, PartialEq, ::prost::Message)]
18pub struct StreamChatRequest {
19    /// The conversation ID
20    #[prost(string, tag = "1")]
21    pub conversation_rid: ::prost::alloc::string::String,
22    /// The user message to append to the conversation
23    #[prost(message, optional, tag = "2")]
24    pub message: ::core::option::Option<UserModelMessage>,
25    /// Optional: image files to provide to the agent
26    #[prost(message, repeated, tag = "3")]
27    pub images: ::prost::alloc::vec::Vec<ImagePart>,
28    /// Context-specific fields based on the oneofKind.
29    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
30    pub context: ::core::option::Option<stream_chat_request::Context>,
31}
32/// Nested message and enum types in `StreamChatRequest`.
33pub mod stream_chat_request {
34    /// Context-specific fields based on the oneofKind.
35    #[derive(Clone, PartialEq, ::prost::Oneof)]
36    pub enum Context {
37        #[prost(message, tag = "4")]
38        Workbook(super::WorkbookContext),
39        #[prost(message, tag = "5")]
40        Global(super::GlobalContext),
41    }
42}
43/// WorkbookContext contains workbook-specific context fields
44#[derive(Clone, PartialEq, ::prost::Message)]
45pub struct WorkbookContext {
46    /// RID of the workbook to use for context
47    #[prost(string, tag = "1")]
48    pub workbook_rid: ::prost::alloc::string::String,
49    /// Optional: the user's presence in the workbook
50    #[prost(message, optional, tag = "2")]
51    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
52}
53/// DefaultContext (no context)
54#[derive(Clone, Copy, PartialEq, ::prost::Message)]
55pub struct GlobalContext {}
56/// WorkbookUserPresence contains the user's presence in the workbook
57/// which is used to describe what the user is viewing at the time of the message.
58#[derive(Clone, Copy, PartialEq, ::prost::Message)]
59pub struct WorkbookUserPresence {
60    #[prost(int32, optional, tag = "1")]
61    pub tab_index: ::core::option::Option<i32>,
62    #[prost(message, optional, tag = "2")]
63    pub range: ::core::option::Option<TimeRange>,
64}
65/// CreateConversation request will create a new conversation thread
66/// if old conversation id is not set, a brand new, clear chat is created
67/// If old conversation id is set without a previous message id, the full conversation thread will be copied
68/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
69/// the above case is useful for branching a conversation into a new thread
70#[derive(Clone, PartialEq, ::prost::Message)]
71pub struct CreateConversationRequest {
72    #[prost(string, tag = "1")]
73    pub title: ::prost::alloc::string::String,
74    #[prost(string, tag = "2")]
75    pub workspace_rid: ::prost::alloc::string::String,
76    #[prost(string, optional, tag = "3")]
77    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
78    #[prost(string, optional, tag = "4")]
79    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
80}
81/// CreateConversationResponse will return the conversation id for the new conversation
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct CreateConversationResponse {
84    #[prost(string, tag = "1")]
85    pub new_conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, PartialEq, ::prost::Message)]
88pub struct UpdateConversationMetadataRequest {
89    #[prost(string, tag = "1")]
90    pub title: ::prost::alloc::string::String,
91    #[prost(string, tag = "2")]
92    pub conversation_rid: ::prost::alloc::string::String,
93}
94#[derive(Clone, Copy, PartialEq, ::prost::Message)]
95pub struct UpdateConversationMetadataResponse {}
96#[derive(Clone, PartialEq, ::prost::Message)]
97pub struct DeleteConversationRequest {
98    #[prost(string, tag = "1")]
99    pub conversation_rid: ::prost::alloc::string::String,
100}
101#[derive(Clone, Copy, PartialEq, ::prost::Message)]
102pub struct DeleteConversationResponse {}
103/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
104/// by provided rid. To start from a particular message - you can also provide a message id.
105#[derive(Clone, PartialEq, ::prost::Message)]
106pub struct GetConversationRequest {
107    #[prost(string, tag = "1")]
108    pub conversation_rid: ::prost::alloc::string::String,
109    #[prost(string, optional, tag = "2")]
110    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
111    #[prost(int32, optional, tag = "3")]
112    pub max_message_count: ::core::option::Option<i32>,
113}
114/// Model message with id allows you to identify the message ID of a given message
115#[derive(Clone, PartialEq, ::prost::Message)]
116pub struct ModelMessageWithId {
117    #[prost(string, tag = "3")]
118    pub message_id: ::prost::alloc::string::String,
119    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
120    pub content: ::core::option::Option<model_message_with_id::Content>,
121}
122/// Nested message and enum types in `ModelMessageWithId`.
123pub mod model_message_with_id {
124    #[derive(Clone, PartialEq, ::prost::Oneof)]
125    pub enum Content {
126        #[prost(message, tag = "1")]
127        Message(super::ModelMessage),
128        #[prost(message, tag = "2")]
129        ToolAction(super::ToolAction),
130    }
131}
132#[derive(Clone, PartialEq, ::prost::Message)]
133pub struct GetConversationResponse {
134    #[prost(message, repeated, tag = "1")]
135    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
136    #[prost(message, optional, tag = "2")]
137    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
138}
139/// Will generate all conversation threads that this user has in this workspace
140#[derive(Clone, PartialEq, ::prost::Message)]
141pub struct ListConversationsRequest {
142    #[prost(string, tag = "1")]
143    pub workspace_rid: ::prost::alloc::string::String,
144}
145#[derive(Clone, PartialEq, ::prost::Message)]
146pub struct ConversationMetadata {
147    #[prost(string, tag = "1")]
148    pub conversation_rid: ::prost::alloc::string::String,
149    #[prost(string, tag = "2")]
150    pub title: ::prost::alloc::string::String,
151    #[prost(message, optional, tag = "3")]
152    pub created_at: ::core::option::Option<
153        super::super::super::google::protobuf::Timestamp,
154    >,
155    #[prost(message, optional, tag = "4")]
156    pub last_updated_at: ::core::option::Option<
157        super::super::super::google::protobuf::Timestamp,
158    >,
159}
160/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
161/// to get a full conversation from storage. These are ordered by creation time.
162#[derive(Clone, PartialEq, ::prost::Message)]
163pub struct ListConversationsResponse {
164    #[prost(message, repeated, tag = "1")]
165    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
166}
167#[derive(Clone, Copy, PartialEq, ::prost::Message)]
168pub struct TimeRange {
169    #[prost(message, optional, tag = "1")]
170    pub range_start: ::core::option::Option<Timestamp>,
171    #[prost(message, optional, tag = "2")]
172    pub range_end: ::core::option::Option<Timestamp>,
173}
174#[derive(Clone, Copy, PartialEq, ::prost::Message)]
175pub struct Timestamp {
176    #[prost(int32, tag = "1")]
177    pub seconds: i32,
178    #[prost(int32, tag = "2")]
179    pub nanoseconds: i32,
180}
181/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
182/// Each message type has its own structure and content.
183#[derive(Clone, PartialEq, ::prost::Message)]
184pub struct ModelMessage {
185    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
186    pub kind: ::core::option::Option<model_message::Kind>,
187}
188/// Nested message and enum types in `ModelMessage`.
189pub mod model_message {
190    #[derive(Clone, PartialEq, ::prost::Oneof)]
191    pub enum Kind {
192        #[prost(message, tag = "1")]
193        User(super::UserModelMessage),
194        #[prost(message, tag = "2")]
195        Assistant(super::AssistantModelMessage),
196    }
197}
198/// A user message containing text
199#[derive(Clone, PartialEq, ::prost::Message)]
200pub struct UserModelMessage {
201    #[prost(message, repeated, tag = "1")]
202    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
203}
204/// An assistant message containing text
205#[derive(Clone, PartialEq, ::prost::Message)]
206pub struct AssistantModelMessage {
207    #[prost(message, repeated, tag = "1")]
208    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
209}
210#[derive(Clone, PartialEq, ::prost::Message)]
211pub struct UserContentPart {
212    #[prost(oneof = "user_content_part::Part", tags = "1")]
213    pub part: ::core::option::Option<user_content_part::Part>,
214}
215/// Nested message and enum types in `UserContentPart`.
216pub mod user_content_part {
217    #[derive(Clone, PartialEq, ::prost::Oneof)]
218    pub enum Part {
219        #[prost(message, tag = "1")]
220        Text(super::TextPart),
221    }
222}
223/// Content part for assistant messages: can be text, reasoning, or mutation.
224#[derive(Clone, PartialEq, ::prost::Message)]
225pub struct AssistantContentPart {
226    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
227    pub part: ::core::option::Option<assistant_content_part::Part>,
228}
229/// Nested message and enum types in `AssistantContentPart`.
230pub mod assistant_content_part {
231    #[derive(Clone, PartialEq, ::prost::Oneof)]
232    pub enum Part {
233        #[prost(message, tag = "1")]
234        Text(super::TextPart),
235        #[prost(message, tag = "2")]
236        Reasoning(super::ReasoningPart),
237    }
238}
239/// Text part for user or assistant messages.
240#[derive(Clone, PartialEq, ::prost::Message)]
241pub struct TextPart {
242    #[prost(string, tag = "1")]
243    pub text: ::prost::alloc::string::String,
244}
245/// User-supplied image part.
246#[derive(Clone, PartialEq, ::prost::Message)]
247pub struct ImagePart {
248    /// The base64-encoded image data
249    #[prost(bytes = "vec", tag = "1")]
250    pub data: ::prost::alloc::vec::Vec<u8>,
251    /// The media type of the image (e.g. "image/png", "image/jpeg")
252    #[prost(string, optional, tag = "2")]
253    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
254    /// Optional: the filename of the image
255    #[prost(string, optional, tag = "3")]
256    pub filename: ::core::option::Option<::prost::alloc::string::String>,
257}
258/// Reasoning part for assistant messages.
259#[derive(Clone, PartialEq, ::prost::Message)]
260pub struct ReasoningPart {
261    #[prost(string, tag = "1")]
262    pub reasoning: ::prost::alloc::string::String,
263}
264/// StreamChatResponse is a discriminated union response to a StreamChatRequest
265#[derive(Clone, PartialEq, ::prost::Message)]
266pub struct StreamChatResponse {
267    #[prost(
268        oneof = "stream_chat_response::Response",
269        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
270    )]
271    pub response: ::core::option::Option<stream_chat_response::Response>,
272}
273/// Nested message and enum types in `StreamChatResponse`.
274pub mod stream_chat_response {
275    #[derive(Clone, PartialEq, ::prost::Oneof)]
276    pub enum Response {
277        #[prost(message, tag = "1")]
278        Finish(super::Finish),
279        #[prost(message, tag = "2")]
280        Error(super::Error),
281        #[prost(message, tag = "3")]
282        TextStart(super::TextStart),
283        #[prost(message, tag = "4")]
284        TextDelta(super::TextDelta),
285        #[prost(message, tag = "5")]
286        TextEnd(super::TextEnd),
287        #[prost(message, tag = "6")]
288        ReasoningStart(super::ReasoningStart),
289        #[prost(message, tag = "7")]
290        ReasoningDelta(super::ReasoningDelta),
291        #[prost(message, tag = "8")]
292        ReasoningEnd(super::ReasoningEnd),
293        /// this will be deprecated in favor of MCP-based mutations
294        #[prost(message, tag = "9")]
295        WorkbookMutation(super::WorkbookMutation),
296        #[prost(message, tag = "10")]
297        ToolAction(super::ToolAction),
298    }
299}
300/// Indicates the end of a chat session
301#[derive(Clone, PartialEq, ::prost::Message)]
302pub struct Finish {
303    /// The message ids in order of all generated messages for this agent run
304    /// These ids can be used to branch a message from that specific message
305    #[prost(string, repeated, tag = "1")]
306    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
307    /// In the case that this is the first agent run in a conversation thread, we also
308    /// return the new conversation title generated
309    #[prost(string, optional, tag = "2")]
310    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
311}
312/// An error that occurred during the chat session
313#[derive(Clone, PartialEq, ::prost::Message)]
314pub struct Error {
315    #[prost(string, tag = "1")]
316    pub message: ::prost::alloc::string::String,
317}
318/// Indicates the start of a text message from the agent
319#[derive(Clone, PartialEq, ::prost::Message)]
320pub struct TextStart {
321    /// uniquely identifies the text message (e.g. uuid) so that the client can
322    /// merge parallel message streams (if it happens).
323    #[prost(string, tag = "1")]
324    pub id: ::prost::alloc::string::String,
325}
326/// A delta (continuation) of a text message from the agent
327#[derive(Clone, PartialEq, ::prost::Message)]
328pub struct TextDelta {
329    #[prost(string, tag = "1")]
330    pub id: ::prost::alloc::string::String,
331    /// The next chunk of text
332    #[prost(string, tag = "2")]
333    pub delta: ::prost::alloc::string::String,
334}
335/// Indicates the end of a text message from the agent
336#[derive(Clone, PartialEq, ::prost::Message)]
337pub struct TextEnd {
338    #[prost(string, tag = "1")]
339    pub id: ::prost::alloc::string::String,
340}
341/// Indicates the start of a reasoning message from the agent
342#[derive(Clone, PartialEq, ::prost::Message)]
343pub struct ReasoningStart {
344    #[prost(string, tag = "1")]
345    pub id: ::prost::alloc::string::String,
346}
347/// A delta (continuation) of a reasoning message from the agent
348#[derive(Clone, PartialEq, ::prost::Message)]
349pub struct ReasoningDelta {
350    #[prost(string, tag = "1")]
351    pub id: ::prost::alloc::string::String,
352    /// The next chunk of reasoning
353    #[prost(string, tag = "2")]
354    pub delta: ::prost::alloc::string::String,
355}
356/// Indicates the end of a reasoning message from the agent
357#[derive(Clone, PartialEq, ::prost::Message)]
358pub struct ReasoningEnd {
359    #[prost(string, tag = "1")]
360    pub id: ::prost::alloc::string::String,
361}
362/// Add a new tab to the workbook
363#[derive(Clone, PartialEq, ::prost::Message)]
364pub struct AddTabMutation {
365    /// if tab_name is not provided, we'll name it "New Tab"
366    #[prost(string, optional, tag = "1")]
367    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
368}
369/// this is the "result" of the mutation
370#[derive(Clone, PartialEq, ::prost::Message)]
371pub struct AddOrUpdatePanelMutation {
372    /// JSON-serialized representation of IVizDefinition
373    #[prost(string, tag = "1")]
374    pub panel_as_json: ::prost::alloc::string::String,
375    #[prost(string, tag = "2")]
376    pub panel_id: ::prost::alloc::string::String,
377    #[prost(int32, tag = "3")]
378    pub tab_index: i32,
379}
380#[derive(Clone, PartialEq, ::prost::Message)]
381pub struct RemovePanelsMutation {
382    #[prost(string, repeated, tag = "1")]
383    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
384}
385/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct AddOrReplaceVariableMutation {
388    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
389    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
390    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
391    #[prost(string, tag = "1")]
392    pub compute_spec_as_json: ::prost::alloc::string::String,
393    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
394    #[prost(string, optional, tag = "2")]
395    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
396    #[prost(string, optional, tag = "3")]
397    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
398}
399/// DeleteVariablesMutation is a mutation to delete variables from the workbook
400#[derive(Clone, PartialEq, ::prost::Message)]
401pub struct DeleteVariablesMutation {
402    #[prost(string, repeated, tag = "1")]
403    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
404}
405/// WorkbookMutation is a mutation to the workbook
406#[derive(Clone, PartialEq, ::prost::Message)]
407pub struct WorkbookMutation {
408    #[prost(string, tag = "1")]
409    pub id: ::prost::alloc::string::String,
410    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
411    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
412}
413/// Nested message and enum types in `WorkbookMutation`.
414pub mod workbook_mutation {
415    #[derive(Clone, PartialEq, ::prost::Oneof)]
416    pub enum Mutation {
417        #[prost(message, tag = "2")]
418        AddTab(super::AddTabMutation),
419        #[prost(message, tag = "3")]
420        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
421        #[prost(message, tag = "4")]
422        RemovePanels(super::RemovePanelsMutation),
423        #[prost(message, tag = "5")]
424        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
425        #[prost(message, tag = "6")]
426        DeleteVariables(super::DeleteVariablesMutation),
427    }
428}
429/// this is a concise description of a tool call that the agent is making internally
430/// without revealing too much detail about the tool call, it informs the user what the agent is doing
431/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
432/// "Search channels for My Datasource"
433#[derive(Clone, PartialEq, ::prost::Message)]
434pub struct ToolAction {
435    #[prost(string, tag = "1")]
436    pub id: ::prost::alloc::string::String,
437    /// "Thought", "Read", "Find", "Look-up", etc.
438    #[prost(string, tag = "2")]
439    pub tool_action_verb: ::prost::alloc::string::String,
440    /// "workbook", "channel", "variable", "panel", etc.
441    #[prost(string, optional, tag = "3")]
442    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
443}
444/// Generated client implementations.
445pub mod ai_agent_service_client {
446    #![allow(
447        unused_variables,
448        dead_code,
449        missing_docs,
450        clippy::wildcard_imports,
451        clippy::let_unit_value,
452    )]
453    use tonic::codegen::*;
454    use tonic::codegen::http::Uri;
455    /// AIAgentService provides AI-powered assistance for general operations
456    #[derive(Debug, Clone)]
457    pub struct AiAgentServiceClient<T> {
458        inner: tonic::client::Grpc<T>,
459    }
460    impl AiAgentServiceClient<tonic::transport::Channel> {
461        /// Attempt to create a new client by connecting to a given endpoint.
462        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
463        where
464            D: TryInto<tonic::transport::Endpoint>,
465            D::Error: Into<StdError>,
466        {
467            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
468            Ok(Self::new(conn))
469        }
470    }
471    impl<T> AiAgentServiceClient<T>
472    where
473        T: tonic::client::GrpcService<tonic::body::Body>,
474        T::Error: Into<StdError>,
475        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
476        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
477    {
478        pub fn new(inner: T) -> Self {
479            let inner = tonic::client::Grpc::new(inner);
480            Self { inner }
481        }
482        pub fn with_origin(inner: T, origin: Uri) -> Self {
483            let inner = tonic::client::Grpc::with_origin(inner, origin);
484            Self { inner }
485        }
486        pub fn with_interceptor<F>(
487            inner: T,
488            interceptor: F,
489        ) -> AiAgentServiceClient<InterceptedService<T, F>>
490        where
491            F: tonic::service::Interceptor,
492            T::ResponseBody: Default,
493            T: tonic::codegen::Service<
494                http::Request<tonic::body::Body>,
495                Response = http::Response<
496                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
497                >,
498            >,
499            <T as tonic::codegen::Service<
500                http::Request<tonic::body::Body>,
501            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
502        {
503            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
504        }
505        /// Compress requests with the given encoding.
506        ///
507        /// This requires the server to support it otherwise it might respond with an
508        /// error.
509        #[must_use]
510        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
511            self.inner = self.inner.send_compressed(encoding);
512            self
513        }
514        /// Enable decompressing responses.
515        #[must_use]
516        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
517            self.inner = self.inner.accept_compressed(encoding);
518            self
519        }
520        /// Limits the maximum size of a decoded message.
521        ///
522        /// Default: `4MB`
523        #[must_use]
524        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
525            self.inner = self.inner.max_decoding_message_size(limit);
526            self
527        }
528        /// Limits the maximum size of an encoded message.
529        ///
530        /// Default: `usize::MAX`
531        #[must_use]
532        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
533            self.inner = self.inner.max_encoding_message_size(limit);
534            self
535        }
536        /// StreamChat handles bidirectional streaming chat for AI agent
537        pub async fn stream_chat(
538            &mut self,
539            request: impl tonic::IntoRequest<super::StreamChatRequest>,
540        ) -> std::result::Result<
541            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
542            tonic::Status,
543        > {
544            self.inner
545                .ready()
546                .await
547                .map_err(|e| {
548                    tonic::Status::unknown(
549                        format!("Service was not ready: {}", e.into()),
550                    )
551                })?;
552            let codec = tonic::codec::ProstCodec::default();
553            let path = http::uri::PathAndQuery::from_static(
554                "/nominal.ai.v1.AIAgentService/StreamChat",
555            );
556            let mut req = request.into_request();
557            req.extensions_mut()
558                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
559            self.inner.server_streaming(req, path, codec).await
560        }
561        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
562        pub async fn get_conversation(
563            &mut self,
564            request: impl tonic::IntoRequest<super::GetConversationRequest>,
565        ) -> std::result::Result<
566            tonic::Response<super::GetConversationResponse>,
567            tonic::Status,
568        > {
569            self.inner
570                .ready()
571                .await
572                .map_err(|e| {
573                    tonic::Status::unknown(
574                        format!("Service was not ready: {}", e.into()),
575                    )
576                })?;
577            let codec = tonic::codec::ProstCodec::default();
578            let path = http::uri::PathAndQuery::from_static(
579                "/nominal.ai.v1.AIAgentService/GetConversation",
580            );
581            let mut req = request.into_request();
582            req.extensions_mut()
583                .insert(
584                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
585                );
586            self.inner.unary(req, path, codec).await
587        }
588        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
589        pub async fn list_conversations(
590            &mut self,
591            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
592        ) -> std::result::Result<
593            tonic::Response<super::ListConversationsResponse>,
594            tonic::Status,
595        > {
596            self.inner
597                .ready()
598                .await
599                .map_err(|e| {
600                    tonic::Status::unknown(
601                        format!("Service was not ready: {}", e.into()),
602                    )
603                })?;
604            let codec = tonic::codec::ProstCodec::default();
605            let path = http::uri::PathAndQuery::from_static(
606                "/nominal.ai.v1.AIAgentService/ListConversations",
607            );
608            let mut req = request.into_request();
609            req.extensions_mut()
610                .insert(
611                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
612                );
613            self.inner.unary(req, path, codec).await
614        }
615        /// CreateConversation handles creating a conversation and assigning it a conversation rid
616        pub async fn create_conversation(
617            &mut self,
618            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
619        ) -> std::result::Result<
620            tonic::Response<super::CreateConversationResponse>,
621            tonic::Status,
622        > {
623            self.inner
624                .ready()
625                .await
626                .map_err(|e| {
627                    tonic::Status::unknown(
628                        format!("Service was not ready: {}", e.into()),
629                    )
630                })?;
631            let codec = tonic::codec::ProstCodec::default();
632            let path = http::uri::PathAndQuery::from_static(
633                "/nominal.ai.v1.AIAgentService/CreateConversation",
634            );
635            let mut req = request.into_request();
636            req.extensions_mut()
637                .insert(
638                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
639                );
640            self.inner.unary(req, path, codec).await
641        }
642        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
643        pub async fn update_conversation_metadata(
644            &mut self,
645            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
646        ) -> std::result::Result<
647            tonic::Response<super::UpdateConversationMetadataResponse>,
648            tonic::Status,
649        > {
650            self.inner
651                .ready()
652                .await
653                .map_err(|e| {
654                    tonic::Status::unknown(
655                        format!("Service was not ready: {}", e.into()),
656                    )
657                })?;
658            let codec = tonic::codec::ProstCodec::default();
659            let path = http::uri::PathAndQuery::from_static(
660                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
661            );
662            let mut req = request.into_request();
663            req.extensions_mut()
664                .insert(
665                    GrpcMethod::new(
666                        "nominal.ai.v1.AIAgentService",
667                        "UpdateConversationMetadata",
668                    ),
669                );
670            self.inner.unary(req, path, codec).await
671        }
672        /// DeleteConversation handles deleting a specific conversation by conversation rid
673        pub async fn delete_conversation(
674            &mut self,
675            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
676        ) -> std::result::Result<
677            tonic::Response<super::DeleteConversationResponse>,
678            tonic::Status,
679        > {
680            self.inner
681                .ready()
682                .await
683                .map_err(|e| {
684                    tonic::Status::unknown(
685                        format!("Service was not ready: {}", e.into()),
686                    )
687                })?;
688            let codec = tonic::codec::ProstCodec::default();
689            let path = http::uri::PathAndQuery::from_static(
690                "/nominal.ai.v1.AIAgentService/DeleteConversation",
691            );
692            let mut req = request.into_request();
693            req.extensions_mut()
694                .insert(
695                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
696                );
697            self.inner.unary(req, path, codec).await
698        }
699        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
700        pub async fn get_snapshot_rid_by_user_message_id(
701            &mut self,
702            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
703        ) -> std::result::Result<
704            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
705            tonic::Status,
706        > {
707            self.inner
708                .ready()
709                .await
710                .map_err(|e| {
711                    tonic::Status::unknown(
712                        format!("Service was not ready: {}", e.into()),
713                    )
714                })?;
715            let codec = tonic::codec::ProstCodec::default();
716            let path = http::uri::PathAndQuery::from_static(
717                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
718            );
719            let mut req = request.into_request();
720            req.extensions_mut()
721                .insert(
722                    GrpcMethod::new(
723                        "nominal.ai.v1.AIAgentService",
724                        "GetSnapshotRidByUserMessageId",
725                    ),
726                );
727            self.inner.unary(req, path, codec).await
728        }
729    }
730}
731#[derive(Clone, Copy, PartialEq, ::prost::Message)]
732pub struct GetProviderStatusRequest {}
733#[derive(Clone, Copy, PartialEq, ::prost::Message)]
734pub struct GetProviderStatusResponse {
735    /// Timestamp when the last status was determined
736    #[prost(message, optional, tag = "1")]
737    pub timestamp: ::core::option::Option<
738        super::super::super::google::protobuf::Timestamp,
739    >,
740    /// Status of the most recent health check probe
741    #[prost(message, optional, tag = "2")]
742    pub last_status: ::core::option::Option<ProviderStatus>,
743    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
744    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
745    #[deprecated]
746    #[prost(message, optional, tag = "3")]
747    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
748    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
749    #[prost(message, optional, tag = "4")]
750    pub aggregated_status: ::core::option::Option<ProviderStatus>,
751}
752#[derive(Clone, Copy, PartialEq, ::prost::Message)]
753pub struct ProviderStatus {
754    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
755    pub status: ::core::option::Option<provider_status::Status>,
756}
757/// Nested message and enum types in `ProviderStatus`.
758pub mod provider_status {
759    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
760    pub enum Status {
761        #[prost(message, tag = "1")]
762        Healthy(super::Healthy),
763        #[prost(message, tag = "2")]
764        Degraded(super::Degraded),
765    }
766}
767#[derive(Clone, Copy, PartialEq, ::prost::Message)]
768pub struct Healthy {}
769#[derive(Clone, Copy, PartialEq, ::prost::Message)]
770pub struct Degraded {
771    #[prost(enumeration = "DegradationReason", tag = "1")]
772    pub reason: i32,
773}
774#[derive(Clone, Copy, PartialEq, ::prost::Message)]
775pub struct ProviderMetrics {
776    #[prost(int32, tag = "1")]
777    pub time_to_first_token_ms: i32,
778    #[prost(int32, tag = "2")]
779    pub total_time_ms: i32,
780}
781#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
782#[repr(i32)]
783pub enum DegradationReason {
784    Unspecified = 0,
785    HighLatency = 1,
786    Failures = 2,
787    HighLatencyAndFailures = 3,
788}
789impl DegradationReason {
790    /// String value of the enum field names used in the ProtoBuf definition.
791    ///
792    /// The values are not transformed in any way and thus are considered stable
793    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
794    pub fn as_str_name(&self) -> &'static str {
795        match self {
796            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
797            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
798            Self::Failures => "DEGRADATION_REASON_FAILURES",
799            Self::HighLatencyAndFailures => {
800                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
801            }
802        }
803    }
804    /// Creates an enum from field names used in the ProtoBuf definition.
805    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
806        match value {
807            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
808            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
809            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
810            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
811                Some(Self::HighLatencyAndFailures)
812            }
813            _ => None,
814        }
815    }
816}
817/// Generated client implementations.
818pub mod model_provider_health_service_client {
819    #![allow(
820        unused_variables,
821        dead_code,
822        missing_docs,
823        clippy::wildcard_imports,
824        clippy::let_unit_value,
825    )]
826    use tonic::codegen::*;
827    use tonic::codegen::http::Uri;
828    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
829    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
830    /// independent of the complexity of user prompts.
831    #[derive(Debug, Clone)]
832    pub struct ModelProviderHealthServiceClient<T> {
833        inner: tonic::client::Grpc<T>,
834    }
835    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
836        /// Attempt to create a new client by connecting to a given endpoint.
837        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
838        where
839            D: TryInto<tonic::transport::Endpoint>,
840            D::Error: Into<StdError>,
841        {
842            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
843            Ok(Self::new(conn))
844        }
845    }
846    impl<T> ModelProviderHealthServiceClient<T>
847    where
848        T: tonic::client::GrpcService<tonic::body::Body>,
849        T::Error: Into<StdError>,
850        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
851        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
852    {
853        pub fn new(inner: T) -> Self {
854            let inner = tonic::client::Grpc::new(inner);
855            Self { inner }
856        }
857        pub fn with_origin(inner: T, origin: Uri) -> Self {
858            let inner = tonic::client::Grpc::with_origin(inner, origin);
859            Self { inner }
860        }
861        pub fn with_interceptor<F>(
862            inner: T,
863            interceptor: F,
864        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
865        where
866            F: tonic::service::Interceptor,
867            T::ResponseBody: Default,
868            T: tonic::codegen::Service<
869                http::Request<tonic::body::Body>,
870                Response = http::Response<
871                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
872                >,
873            >,
874            <T as tonic::codegen::Service<
875                http::Request<tonic::body::Body>,
876            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
877        {
878            ModelProviderHealthServiceClient::new(
879                InterceptedService::new(inner, interceptor),
880            )
881        }
882        /// Compress requests with the given encoding.
883        ///
884        /// This requires the server to support it otherwise it might respond with an
885        /// error.
886        #[must_use]
887        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
888            self.inner = self.inner.send_compressed(encoding);
889            self
890        }
891        /// Enable decompressing responses.
892        #[must_use]
893        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
894            self.inner = self.inner.accept_compressed(encoding);
895            self
896        }
897        /// Limits the maximum size of a decoded message.
898        ///
899        /// Default: `4MB`
900        #[must_use]
901        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
902            self.inner = self.inner.max_decoding_message_size(limit);
903            self
904        }
905        /// Limits the maximum size of an encoded message.
906        ///
907        /// Default: `usize::MAX`
908        #[must_use]
909        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
910            self.inner = self.inner.max_encoding_message_size(limit);
911            self
912        }
913        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
914        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
915        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
916        pub async fn get_provider_status(
917            &mut self,
918            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
919        ) -> std::result::Result<
920            tonic::Response<super::GetProviderStatusResponse>,
921            tonic::Status,
922        > {
923            self.inner
924                .ready()
925                .await
926                .map_err(|e| {
927                    tonic::Status::unknown(
928                        format!("Service was not ready: {}", e.into()),
929                    )
930                })?;
931            let codec = tonic::codec::ProstCodec::default();
932            let path = http::uri::PathAndQuery::from_static(
933                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
934            );
935            let mut req = request.into_request();
936            req.extensions_mut()
937                .insert(
938                    GrpcMethod::new(
939                        "nominal.ai.v1.ModelProviderHealthService",
940                        "GetProviderStatus",
941                    ),
942                );
943            self.inner.unary(req, path, codec).await
944        }
945    }
946}
947/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
948/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
949#[derive(Clone, PartialEq, ::prost::Message)]
950pub struct CreateOrUpdateKnowledgeBaseRequest {
951    #[prost(string, tag = "1")]
952    pub attachment_rid: ::prost::alloc::string::String,
953    /// summary of the knowledge base, will be used by the LLM to decide when to use it
954    #[prost(string, tag = "2")]
955    pub summary_description: ::prost::alloc::string::String,
956    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
957    pub r#type: ::core::option::Option<i32>,
958}
959/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
960#[derive(Clone, PartialEq, ::prost::Message)]
961pub struct CreateOrUpdateKnowledgeBaseResponse {
962    #[prost(string, tag = "1")]
963    pub knowledge_base_rid: ::prost::alloc::string::String,
964}
965/// KnowledgeBase represents a knowledge base entry
966#[derive(Clone, PartialEq, ::prost::Message)]
967pub struct KnowledgeBase {
968    #[prost(string, tag = "1")]
969    pub knowledge_base_rid: ::prost::alloc::string::String,
970    #[prost(string, tag = "2")]
971    pub attachment_rid: ::prost::alloc::string::String,
972    #[prost(string, tag = "3")]
973    pub workspace_rid: ::prost::alloc::string::String,
974    #[prost(string, tag = "4")]
975    pub summary_description: ::prost::alloc::string::String,
976    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
977    pub r#type: i32,
978    #[prost(int32, tag = "6")]
979    pub version: i32,
980}
981#[derive(Clone, PartialEq, ::prost::Message)]
982pub struct ListRequest {
983    #[prost(string, tag = "1")]
984    pub workspace_rid: ::prost::alloc::string::String,
985}
986#[derive(Clone, PartialEq, ::prost::Message)]
987pub struct ListResponse {
988    #[prost(message, repeated, tag = "1")]
989    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
990}
991#[derive(Clone, PartialEq, ::prost::Message)]
992pub struct DeleteRequest {
993    #[prost(string, tag = "1")]
994    pub knowledge_base_rid: ::prost::alloc::string::String,
995}
996#[derive(Clone, Copy, PartialEq, ::prost::Message)]
997pub struct DeleteResponse {
998    #[prost(bool, tag = "1")]
999    pub success: bool,
1000}
1001#[derive(Clone, PartialEq, ::prost::Message)]
1002pub struct GetBatchRequest {
1003    #[prost(string, repeated, tag = "1")]
1004    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1005}
1006#[derive(Clone, PartialEq, ::prost::Message)]
1007pub struct GetBatchResponse {
1008    #[prost(message, repeated, tag = "1")]
1009    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1010}
1011/// generate summary description is intentionally going to return the generated description to the frontend
1012/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1013#[derive(Clone, PartialEq, ::prost::Message)]
1014pub struct GenerateSummaryDescriptionRequest {
1015    #[prost(string, tag = "1")]
1016    pub attachment_rid: ::prost::alloc::string::String,
1017}
1018#[derive(Clone, PartialEq, ::prost::Message)]
1019pub struct GenerateSummaryDescriptionResponse {
1020    #[prost(string, tag = "1")]
1021    pub summary_description: ::prost::alloc::string::String,
1022}
1023/// KnowledgeBaseType defines the types of knowledge base
1024#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1025#[repr(i32)]
1026pub enum KnowledgeBaseType {
1027    /// defaults to PROMPT
1028    Unspecified = 0,
1029    /// knowledge base gets added directly to prompt (needs to be small enough!)
1030    Prompt = 1,
1031    /// knowledge base gets used via vector search on embeddings
1032    Embedding = 2,
1033}
1034impl KnowledgeBaseType {
1035    /// String value of the enum field names used in the ProtoBuf definition.
1036    ///
1037    /// The values are not transformed in any way and thus are considered stable
1038    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1039    pub fn as_str_name(&self) -> &'static str {
1040        match self {
1041            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1042            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1043            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1044        }
1045    }
1046    /// Creates an enum from field names used in the ProtoBuf definition.
1047    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1048        match value {
1049            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1050            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1051            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1052            _ => None,
1053        }
1054    }
1055}
1056/// Generated client implementations.
1057pub mod knowledge_base_service_client {
1058    #![allow(
1059        unused_variables,
1060        dead_code,
1061        missing_docs,
1062        clippy::wildcard_imports,
1063        clippy::let_unit_value,
1064    )]
1065    use tonic::codegen::*;
1066    use tonic::codegen::http::Uri;
1067    /// KnowledgeBaseService provides AI-powered knowledge base management
1068    #[derive(Debug, Clone)]
1069    pub struct KnowledgeBaseServiceClient<T> {
1070        inner: tonic::client::Grpc<T>,
1071    }
1072    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1073        /// Attempt to create a new client by connecting to a given endpoint.
1074        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1075        where
1076            D: TryInto<tonic::transport::Endpoint>,
1077            D::Error: Into<StdError>,
1078        {
1079            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1080            Ok(Self::new(conn))
1081        }
1082    }
1083    impl<T> KnowledgeBaseServiceClient<T>
1084    where
1085        T: tonic::client::GrpcService<tonic::body::Body>,
1086        T::Error: Into<StdError>,
1087        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1088        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1089    {
1090        pub fn new(inner: T) -> Self {
1091            let inner = tonic::client::Grpc::new(inner);
1092            Self { inner }
1093        }
1094        pub fn with_origin(inner: T, origin: Uri) -> Self {
1095            let inner = tonic::client::Grpc::with_origin(inner, origin);
1096            Self { inner }
1097        }
1098        pub fn with_interceptor<F>(
1099            inner: T,
1100            interceptor: F,
1101        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1102        where
1103            F: tonic::service::Interceptor,
1104            T::ResponseBody: Default,
1105            T: tonic::codegen::Service<
1106                http::Request<tonic::body::Body>,
1107                Response = http::Response<
1108                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1109                >,
1110            >,
1111            <T as tonic::codegen::Service<
1112                http::Request<tonic::body::Body>,
1113            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1114        {
1115            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1116        }
1117        /// Compress requests with the given encoding.
1118        ///
1119        /// This requires the server to support it otherwise it might respond with an
1120        /// error.
1121        #[must_use]
1122        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1123            self.inner = self.inner.send_compressed(encoding);
1124            self
1125        }
1126        /// Enable decompressing responses.
1127        #[must_use]
1128        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1129            self.inner = self.inner.accept_compressed(encoding);
1130            self
1131        }
1132        /// Limits the maximum size of a decoded message.
1133        ///
1134        /// Default: `4MB`
1135        #[must_use]
1136        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1137            self.inner = self.inner.max_decoding_message_size(limit);
1138            self
1139        }
1140        /// Limits the maximum size of an encoded message.
1141        ///
1142        /// Default: `usize::MAX`
1143        #[must_use]
1144        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1145            self.inner = self.inner.max_encoding_message_size(limit);
1146            self
1147        }
1148        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1149        pub async fn create_or_update_knowledge_base(
1150            &mut self,
1151            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1152        ) -> std::result::Result<
1153            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1154            tonic::Status,
1155        > {
1156            self.inner
1157                .ready()
1158                .await
1159                .map_err(|e| {
1160                    tonic::Status::unknown(
1161                        format!("Service was not ready: {}", e.into()),
1162                    )
1163                })?;
1164            let codec = tonic::codec::ProstCodec::default();
1165            let path = http::uri::PathAndQuery::from_static(
1166                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1167            );
1168            let mut req = request.into_request();
1169            req.extensions_mut()
1170                .insert(
1171                    GrpcMethod::new(
1172                        "nominal.ai.v1.KnowledgeBaseService",
1173                        "CreateOrUpdateKnowledgeBase",
1174                    ),
1175                );
1176            self.inner.unary(req, path, codec).await
1177        }
1178        /// List returns all knowledge bases in the specified workspace
1179        pub async fn list(
1180            &mut self,
1181            request: impl tonic::IntoRequest<super::ListRequest>,
1182        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1183            self.inner
1184                .ready()
1185                .await
1186                .map_err(|e| {
1187                    tonic::Status::unknown(
1188                        format!("Service was not ready: {}", e.into()),
1189                    )
1190                })?;
1191            let codec = tonic::codec::ProstCodec::default();
1192            let path = http::uri::PathAndQuery::from_static(
1193                "/nominal.ai.v1.KnowledgeBaseService/List",
1194            );
1195            let mut req = request.into_request();
1196            req.extensions_mut()
1197                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1198            self.inner.unary(req, path, codec).await
1199        }
1200        /// Delete removes a knowledge base by its RID
1201        pub async fn delete(
1202            &mut self,
1203            request: impl tonic::IntoRequest<super::DeleteRequest>,
1204        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1205            self.inner
1206                .ready()
1207                .await
1208                .map_err(|e| {
1209                    tonic::Status::unknown(
1210                        format!("Service was not ready: {}", e.into()),
1211                    )
1212                })?;
1213            let codec = tonic::codec::ProstCodec::default();
1214            let path = http::uri::PathAndQuery::from_static(
1215                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1216            );
1217            let mut req = request.into_request();
1218            req.extensions_mut()
1219                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1220            self.inner.unary(req, path, codec).await
1221        }
1222        /// GetBatch retrieves multiple knowledge bases by their RIDs
1223        pub async fn get_batch(
1224            &mut self,
1225            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1226        ) -> std::result::Result<
1227            tonic::Response<super::GetBatchResponse>,
1228            tonic::Status,
1229        > {
1230            self.inner
1231                .ready()
1232                .await
1233                .map_err(|e| {
1234                    tonic::Status::unknown(
1235                        format!("Service was not ready: {}", e.into()),
1236                    )
1237                })?;
1238            let codec = tonic::codec::ProstCodec::default();
1239            let path = http::uri::PathAndQuery::from_static(
1240                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1241            );
1242            let mut req = request.into_request();
1243            req.extensions_mut()
1244                .insert(
1245                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1246                );
1247            self.inner.unary(req, path, codec).await
1248        }
1249        /// GenerateSummaryDescription generates a summary description for an attachment rid
1250        pub async fn generate_summary_description(
1251            &mut self,
1252            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1253        ) -> std::result::Result<
1254            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1255            tonic::Status,
1256        > {
1257            self.inner
1258                .ready()
1259                .await
1260                .map_err(|e| {
1261                    tonic::Status::unknown(
1262                        format!("Service was not ready: {}", e.into()),
1263                    )
1264                })?;
1265            let codec = tonic::codec::ProstCodec::default();
1266            let path = http::uri::PathAndQuery::from_static(
1267                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1268            );
1269            let mut req = request.into_request();
1270            req.extensions_mut()
1271                .insert(
1272                    GrpcMethod::new(
1273                        "nominal.ai.v1.KnowledgeBaseService",
1274                        "GenerateSummaryDescription",
1275                    ),
1276                );
1277            self.inner.unary(req, path, codec).await
1278        }
1279    }
1280}