nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct GetSnapshotRidByUserMessageIdRequest {
4    #[prost(string, tag = "1")]
5    pub conversation_rid: ::prost::alloc::string::String,
6    #[prost(string, tag = "2")]
7    pub message_id: ::prost::alloc::string::String,
8}
9/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
10/// This occurs in the instance where a message was sent in a non-workbook context
11#[derive(Clone, PartialEq, ::prost::Message)]
12pub struct GetSnapshotRidByUserMessageIdResponse {
13    #[prost(string, optional, tag = "1")]
14    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
15}
16/// ReadOnlyMode configures read-only mode where edit tools are shadowed/disabled
17#[derive(Clone, Copy, PartialEq, ::prost::Message)]
18pub struct ReadOnlyMode {}
19/// EditMode configures edit mode where all tools are available
20#[derive(Clone, Copy, PartialEq, ::prost::Message)]
21pub struct EditMode {}
22/// ConversationMode specifies the mode of the conversation
23#[derive(Clone, Copy, PartialEq, ::prost::Message)]
24pub struct ConversationMode {
25    #[prost(oneof = "conversation_mode::Mode", tags = "1, 2")]
26    pub mode: ::core::option::Option<conversation_mode::Mode>,
27}
28/// Nested message and enum types in `ConversationMode`.
29pub mod conversation_mode {
30    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
31    pub enum Mode {
32        #[prost(message, tag = "1")]
33        ReadOnly(super::ReadOnlyMode),
34        #[prost(message, tag = "2")]
35        Edit(super::EditMode),
36    }
37}
38/// StreamChatRequest is a request to stream chat messages for AI agent
39#[derive(Clone, PartialEq, ::prost::Message)]
40pub struct StreamChatRequest {
41    /// The conversation ID
42    #[prost(string, tag = "1")]
43    pub conversation_rid: ::prost::alloc::string::String,
44    /// The user message to append to the conversation
45    #[prost(message, optional, tag = "2")]
46    pub message: ::core::option::Option<UserModelMessage>,
47    /// Optional: image files to provide to the agent
48    #[prost(message, repeated, tag = "3")]
49    pub images: ::prost::alloc::vec::Vec<ImagePart>,
50    /// Context-specific fields based on the oneofKind.
51    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
52    pub context: ::core::option::Option<stream_chat_request::Context>,
53}
54/// Nested message and enum types in `StreamChatRequest`.
55pub mod stream_chat_request {
56    /// Context-specific fields based on the oneofKind.
57    #[derive(Clone, PartialEq, ::prost::Oneof)]
58    pub enum Context {
59        #[prost(message, tag = "4")]
60        Workbook(super::WorkbookContext),
61        #[prost(message, tag = "5")]
62        Global(super::GlobalContext),
63    }
64}
65/// WorkbookContext contains workbook-specific context fields
66#[derive(Clone, PartialEq, ::prost::Message)]
67pub struct WorkbookContext {
68    /// RID of the workbook to use for context
69    #[prost(string, tag = "1")]
70    pub workbook_rid: ::prost::alloc::string::String,
71    /// Optional: the user's presence in the workbook
72    #[prost(message, optional, tag = "2")]
73    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
74}
75/// DefaultContext (no context)
76#[derive(Clone, Copy, PartialEq, ::prost::Message)]
77pub struct GlobalContext {}
78/// WorkbookUserPresence contains the user's presence in the workbook
79/// which is used to describe what the user is viewing at the time of the message.
80#[derive(Clone, Copy, PartialEq, ::prost::Message)]
81pub struct WorkbookUserPresence {
82    #[prost(int32, optional, tag = "1")]
83    pub tab_index: ::core::option::Option<i32>,
84    #[prost(message, optional, tag = "2")]
85    pub range: ::core::option::Option<TimeRange>,
86}
87/// CreateConversation request will create a new conversation thread
88/// if old conversation id is not set, a brand new, clear chat is created
89/// If old conversation id is set without a previous message id, the full conversation thread will be copied
90/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
91/// the above case is useful for branching a conversation into a new thread
92#[derive(Clone, PartialEq, ::prost::Message)]
93pub struct CreateConversationRequest {
94    #[prost(string, tag = "1")]
95    pub title: ::prost::alloc::string::String,
96    #[prost(string, tag = "2")]
97    pub workspace_rid: ::prost::alloc::string::String,
98    #[prost(string, optional, tag = "3")]
99    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
100    #[prost(string, optional, tag = "4")]
101    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
102    #[prost(message, optional, tag = "5")]
103    pub conversation_mode: ::core::option::Option<ConversationMode>,
104}
105/// CreateConversationResponse will return the conversation id for the new conversation
106#[derive(Clone, PartialEq, ::prost::Message)]
107pub struct CreateConversationResponse {
108    #[prost(string, tag = "1")]
109    pub new_conversation_rid: ::prost::alloc::string::String,
110}
111/// Updates the fields if specified (optional means no change for that field)
112#[derive(Clone, PartialEq, ::prost::Message)]
113pub struct UpdateConversationMetadataRequest {
114    #[prost(string, optional, tag = "1")]
115    pub title: ::core::option::Option<::prost::alloc::string::String>,
116    #[prost(string, tag = "2")]
117    pub conversation_rid: ::prost::alloc::string::String,
118    #[prost(message, optional, tag = "3")]
119    pub conversation_mode: ::core::option::Option<ConversationMode>,
120}
121#[derive(Clone, Copy, PartialEq, ::prost::Message)]
122pub struct UpdateConversationMetadataResponse {}
123#[derive(Clone, PartialEq, ::prost::Message)]
124pub struct DeleteConversationRequest {
125    #[prost(string, tag = "1")]
126    pub conversation_rid: ::prost::alloc::string::String,
127}
128#[derive(Clone, Copy, PartialEq, ::prost::Message)]
129pub struct DeleteConversationResponse {}
130/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
131/// by provided rid. To start from a particular message - you can also provide a message id.
132#[derive(Clone, PartialEq, ::prost::Message)]
133pub struct GetConversationRequest {
134    #[prost(string, tag = "1")]
135    pub conversation_rid: ::prost::alloc::string::String,
136    #[prost(string, optional, tag = "2")]
137    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
138    #[prost(int32, optional, tag = "3")]
139    pub max_message_count: ::core::option::Option<i32>,
140}
141/// Model message with id allows you to identify the message ID of a given message
142#[derive(Clone, PartialEq, ::prost::Message)]
143pub struct ModelMessageWithId {
144    #[prost(string, tag = "3")]
145    pub message_id: ::prost::alloc::string::String,
146    /// WB agent user messages can have snapshot rids associated with them
147    #[prost(string, optional, tag = "4")]
148    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
149    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
150    pub content: ::core::option::Option<model_message_with_id::Content>,
151}
152/// Nested message and enum types in `ModelMessageWithId`.
153pub mod model_message_with_id {
154    #[derive(Clone, PartialEq, ::prost::Oneof)]
155    pub enum Content {
156        #[prost(message, tag = "1")]
157        Message(super::ModelMessage),
158        #[prost(message, tag = "2")]
159        ToolAction(super::ToolAction),
160    }
161}
162#[derive(Clone, PartialEq, ::prost::Message)]
163pub struct GetConversationResponse {
164    #[prost(message, repeated, tag = "1")]
165    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
166    #[prost(message, optional, tag = "2")]
167    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
168}
169/// Will generate all conversation threads that this user has in this workspace
170#[derive(Clone, PartialEq, ::prost::Message)]
171pub struct ListConversationsRequest {
172    #[prost(string, tag = "1")]
173    pub workspace_rid: ::prost::alloc::string::String,
174}
175#[derive(Clone, PartialEq, ::prost::Message)]
176pub struct ConversationMetadata {
177    #[prost(string, tag = "1")]
178    pub conversation_rid: ::prost::alloc::string::String,
179    #[prost(string, tag = "2")]
180    pub title: ::prost::alloc::string::String,
181    #[prost(message, optional, tag = "3")]
182    pub created_at: ::core::option::Option<
183        super::super::super::google::protobuf::Timestamp,
184    >,
185    #[prost(message, optional, tag = "4")]
186    pub last_updated_at: ::core::option::Option<
187        super::super::super::google::protobuf::Timestamp,
188    >,
189    #[prost(message, optional, tag = "5")]
190    pub mode: ::core::option::Option<ConversationMode>,
191}
192/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
193/// to get a full conversation from storage. These are ordered by creation time.
194#[derive(Clone, PartialEq, ::prost::Message)]
195pub struct ListConversationsResponse {
196    #[prost(message, repeated, tag = "1")]
197    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
198}
199#[derive(Clone, Copy, PartialEq, ::prost::Message)]
200pub struct TimeRange {
201    #[prost(message, optional, tag = "1")]
202    pub range_start: ::core::option::Option<Timestamp>,
203    #[prost(message, optional, tag = "2")]
204    pub range_end: ::core::option::Option<Timestamp>,
205}
206#[derive(Clone, Copy, PartialEq, ::prost::Message)]
207pub struct Timestamp {
208    #[prost(int32, tag = "1")]
209    pub seconds: i32,
210    #[prost(int32, tag = "2")]
211    pub nanoseconds: i32,
212}
213/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
214/// Each message type has its own structure and content.
215#[derive(Clone, PartialEq, ::prost::Message)]
216pub struct ModelMessage {
217    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
218    pub kind: ::core::option::Option<model_message::Kind>,
219}
220/// Nested message and enum types in `ModelMessage`.
221pub mod model_message {
222    #[derive(Clone, PartialEq, ::prost::Oneof)]
223    pub enum Kind {
224        #[prost(message, tag = "1")]
225        User(super::UserModelMessage),
226        #[prost(message, tag = "2")]
227        Assistant(super::AssistantModelMessage),
228    }
229}
230/// A user message containing text
231#[derive(Clone, PartialEq, ::prost::Message)]
232pub struct UserModelMessage {
233    #[prost(message, repeated, tag = "1")]
234    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
235}
236/// An assistant message containing text
237#[derive(Clone, PartialEq, ::prost::Message)]
238pub struct AssistantModelMessage {
239    #[prost(message, repeated, tag = "1")]
240    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
241}
242#[derive(Clone, PartialEq, ::prost::Message)]
243pub struct UserContentPart {
244    #[prost(oneof = "user_content_part::Part", tags = "1")]
245    pub part: ::core::option::Option<user_content_part::Part>,
246}
247/// Nested message and enum types in `UserContentPart`.
248pub mod user_content_part {
249    #[derive(Clone, PartialEq, ::prost::Oneof)]
250    pub enum Part {
251        #[prost(message, tag = "1")]
252        Text(super::TextPart),
253    }
254}
255/// Content part for assistant messages: can be text, reasoning, or mutation.
256#[derive(Clone, PartialEq, ::prost::Message)]
257pub struct AssistantContentPart {
258    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
259    pub part: ::core::option::Option<assistant_content_part::Part>,
260}
261/// Nested message and enum types in `AssistantContentPart`.
262pub mod assistant_content_part {
263    #[derive(Clone, PartialEq, ::prost::Oneof)]
264    pub enum Part {
265        #[prost(message, tag = "1")]
266        Text(super::TextPart),
267        #[prost(message, tag = "2")]
268        Reasoning(super::ReasoningPart),
269    }
270}
271/// Text part for user or assistant messages.
272#[derive(Clone, PartialEq, ::prost::Message)]
273pub struct TextPart {
274    #[prost(string, tag = "1")]
275    pub text: ::prost::alloc::string::String,
276}
277/// User-supplied image part.
278#[derive(Clone, PartialEq, ::prost::Message)]
279pub struct ImagePart {
280    /// The base64-encoded image data
281    #[prost(bytes = "vec", tag = "1")]
282    pub data: ::prost::alloc::vec::Vec<u8>,
283    /// The media type of the image (e.g. "image/png", "image/jpeg")
284    #[prost(string, optional, tag = "2")]
285    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
286    /// Optional: the filename of the image
287    #[prost(string, optional, tag = "3")]
288    pub filename: ::core::option::Option<::prost::alloc::string::String>,
289}
290/// Reasoning part for assistant messages.
291#[derive(Clone, PartialEq, ::prost::Message)]
292pub struct ReasoningPart {
293    #[prost(string, tag = "1")]
294    pub reasoning: ::prost::alloc::string::String,
295}
296/// StreamChatResponse is a discriminated union response to a StreamChatRequest
297#[derive(Clone, PartialEq, ::prost::Message)]
298pub struct StreamChatResponse {
299    #[prost(
300        oneof = "stream_chat_response::Response",
301        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10"
302    )]
303    pub response: ::core::option::Option<stream_chat_response::Response>,
304}
305/// Nested message and enum types in `StreamChatResponse`.
306pub mod stream_chat_response {
307    #[derive(Clone, PartialEq, ::prost::Oneof)]
308    pub enum Response {
309        #[prost(message, tag = "1")]
310        Finish(super::Finish),
311        #[prost(message, tag = "2")]
312        Error(super::Error),
313        #[prost(message, tag = "3")]
314        TextStart(super::TextStart),
315        #[prost(message, tag = "4")]
316        TextDelta(super::TextDelta),
317        #[prost(message, tag = "5")]
318        TextEnd(super::TextEnd),
319        #[prost(message, tag = "6")]
320        ReasoningStart(super::ReasoningStart),
321        #[prost(message, tag = "7")]
322        ReasoningDelta(super::ReasoningDelta),
323        #[prost(message, tag = "8")]
324        ReasoningEnd(super::ReasoningEnd),
325        #[prost(message, tag = "10")]
326        ToolAction(super::ToolAction),
327    }
328}
329/// Indicates the end of a chat session
330#[derive(Clone, PartialEq, ::prost::Message)]
331pub struct Finish {
332    /// The message ids in order of all generated messages for this agent run
333    /// These ids can be used to branch a message from that specific message
334    #[prost(string, repeated, tag = "1")]
335    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
336    /// In the case that this is the first agent run in a conversation thread, we also
337    /// return the new conversation title generated
338    #[prost(string, optional, tag = "2")]
339    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
340}
341/// An error that occurred during the chat session
342#[derive(Clone, PartialEq, ::prost::Message)]
343pub struct Error {
344    #[prost(string, tag = "1")]
345    pub message: ::prost::alloc::string::String,
346}
347/// Indicates the start of a text message from the agent
348#[derive(Clone, PartialEq, ::prost::Message)]
349pub struct TextStart {
350    /// uniquely identifies the text message (e.g. uuid) so that the client can
351    /// merge parallel message streams (if it happens).
352    #[prost(string, tag = "1")]
353    pub id: ::prost::alloc::string::String,
354}
355/// A delta (continuation) of a text message from the agent
356#[derive(Clone, PartialEq, ::prost::Message)]
357pub struct TextDelta {
358    #[prost(string, tag = "1")]
359    pub id: ::prost::alloc::string::String,
360    /// The next chunk of text
361    #[prost(string, tag = "2")]
362    pub delta: ::prost::alloc::string::String,
363}
364/// Indicates the end of a text message from the agent
365#[derive(Clone, PartialEq, ::prost::Message)]
366pub struct TextEnd {
367    #[prost(string, tag = "1")]
368    pub id: ::prost::alloc::string::String,
369}
370/// Indicates the start of a reasoning message from the agent
371#[derive(Clone, PartialEq, ::prost::Message)]
372pub struct ReasoningStart {
373    #[prost(string, tag = "1")]
374    pub id: ::prost::alloc::string::String,
375}
376/// A delta (continuation) of a reasoning message from the agent
377#[derive(Clone, PartialEq, ::prost::Message)]
378pub struct ReasoningDelta {
379    #[prost(string, tag = "1")]
380    pub id: ::prost::alloc::string::String,
381    /// The next chunk of reasoning
382    #[prost(string, tag = "2")]
383    pub delta: ::prost::alloc::string::String,
384}
385/// Indicates the end of a reasoning message from the agent
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct ReasoningEnd {
388    #[prost(string, tag = "1")]
389    pub id: ::prost::alloc::string::String,
390}
391/// this is a concise description of a tool call that the agent is making internally
392/// without revealing too much detail about the tool call, it informs the user what the agent is doing
393/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
394/// "Search channels for My Datasource"
395#[derive(Clone, PartialEq, ::prost::Message)]
396pub struct ToolAction {
397    #[prost(string, tag = "1")]
398    pub id: ::prost::alloc::string::String,
399    /// "Thought", "Read", "Find", "Look-up", etc.
400    #[prost(string, tag = "2")]
401    pub tool_action_verb: ::prost::alloc::string::String,
402    /// "workbook", "channel", "variable", "panel", etc.
403    #[prost(string, optional, tag = "3")]
404    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
405}
406/// Generated client implementations.
407pub mod ai_agent_service_client {
408    #![allow(
409        unused_variables,
410        dead_code,
411        missing_docs,
412        clippy::wildcard_imports,
413        clippy::let_unit_value,
414    )]
415    use tonic::codegen::*;
416    use tonic::codegen::http::Uri;
417    /// AIAgentService provides AI-powered assistance for general operations
418    #[derive(Debug, Clone)]
419    pub struct AiAgentServiceClient<T> {
420        inner: tonic::client::Grpc<T>,
421    }
422    impl AiAgentServiceClient<tonic::transport::Channel> {
423        /// Attempt to create a new client by connecting to a given endpoint.
424        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
425        where
426            D: TryInto<tonic::transport::Endpoint>,
427            D::Error: Into<StdError>,
428        {
429            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
430            Ok(Self::new(conn))
431        }
432    }
433    impl<T> AiAgentServiceClient<T>
434    where
435        T: tonic::client::GrpcService<tonic::body::Body>,
436        T::Error: Into<StdError>,
437        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
438        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
439    {
440        pub fn new(inner: T) -> Self {
441            let inner = tonic::client::Grpc::new(inner);
442            Self { inner }
443        }
444        pub fn with_origin(inner: T, origin: Uri) -> Self {
445            let inner = tonic::client::Grpc::with_origin(inner, origin);
446            Self { inner }
447        }
448        pub fn with_interceptor<F>(
449            inner: T,
450            interceptor: F,
451        ) -> AiAgentServiceClient<InterceptedService<T, F>>
452        where
453            F: tonic::service::Interceptor,
454            T::ResponseBody: Default,
455            T: tonic::codegen::Service<
456                http::Request<tonic::body::Body>,
457                Response = http::Response<
458                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
459                >,
460            >,
461            <T as tonic::codegen::Service<
462                http::Request<tonic::body::Body>,
463            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
464        {
465            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
466        }
467        /// Compress requests with the given encoding.
468        ///
469        /// This requires the server to support it otherwise it might respond with an
470        /// error.
471        #[must_use]
472        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
473            self.inner = self.inner.send_compressed(encoding);
474            self
475        }
476        /// Enable decompressing responses.
477        #[must_use]
478        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
479            self.inner = self.inner.accept_compressed(encoding);
480            self
481        }
482        /// Limits the maximum size of a decoded message.
483        ///
484        /// Default: `4MB`
485        #[must_use]
486        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
487            self.inner = self.inner.max_decoding_message_size(limit);
488            self
489        }
490        /// Limits the maximum size of an encoded message.
491        ///
492        /// Default: `usize::MAX`
493        #[must_use]
494        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
495            self.inner = self.inner.max_encoding_message_size(limit);
496            self
497        }
498        /// StreamChat handles bidirectional streaming chat for AI agent
499        pub async fn stream_chat(
500            &mut self,
501            request: impl tonic::IntoRequest<super::StreamChatRequest>,
502        ) -> std::result::Result<
503            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
504            tonic::Status,
505        > {
506            self.inner
507                .ready()
508                .await
509                .map_err(|e| {
510                    tonic::Status::unknown(
511                        format!("Service was not ready: {}", e.into()),
512                    )
513                })?;
514            let codec = tonic::codec::ProstCodec::default();
515            let path = http::uri::PathAndQuery::from_static(
516                "/nominal.ai.v1.AIAgentService/StreamChat",
517            );
518            let mut req = request.into_request();
519            req.extensions_mut()
520                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
521            self.inner.server_streaming(req, path, codec).await
522        }
523        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
524        pub async fn get_conversation(
525            &mut self,
526            request: impl tonic::IntoRequest<super::GetConversationRequest>,
527        ) -> std::result::Result<
528            tonic::Response<super::GetConversationResponse>,
529            tonic::Status,
530        > {
531            self.inner
532                .ready()
533                .await
534                .map_err(|e| {
535                    tonic::Status::unknown(
536                        format!("Service was not ready: {}", e.into()),
537                    )
538                })?;
539            let codec = tonic::codec::ProstCodec::default();
540            let path = http::uri::PathAndQuery::from_static(
541                "/nominal.ai.v1.AIAgentService/GetConversation",
542            );
543            let mut req = request.into_request();
544            req.extensions_mut()
545                .insert(
546                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
547                );
548            self.inner.unary(req, path, codec).await
549        }
550        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
551        pub async fn list_conversations(
552            &mut self,
553            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
554        ) -> std::result::Result<
555            tonic::Response<super::ListConversationsResponse>,
556            tonic::Status,
557        > {
558            self.inner
559                .ready()
560                .await
561                .map_err(|e| {
562                    tonic::Status::unknown(
563                        format!("Service was not ready: {}", e.into()),
564                    )
565                })?;
566            let codec = tonic::codec::ProstCodec::default();
567            let path = http::uri::PathAndQuery::from_static(
568                "/nominal.ai.v1.AIAgentService/ListConversations",
569            );
570            let mut req = request.into_request();
571            req.extensions_mut()
572                .insert(
573                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
574                );
575            self.inner.unary(req, path, codec).await
576        }
577        /// CreateConversation handles creating a conversation and assigning it a conversation rid
578        pub async fn create_conversation(
579            &mut self,
580            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
581        ) -> std::result::Result<
582            tonic::Response<super::CreateConversationResponse>,
583            tonic::Status,
584        > {
585            self.inner
586                .ready()
587                .await
588                .map_err(|e| {
589                    tonic::Status::unknown(
590                        format!("Service was not ready: {}", e.into()),
591                    )
592                })?;
593            let codec = tonic::codec::ProstCodec::default();
594            let path = http::uri::PathAndQuery::from_static(
595                "/nominal.ai.v1.AIAgentService/CreateConversation",
596            );
597            let mut req = request.into_request();
598            req.extensions_mut()
599                .insert(
600                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
601                );
602            self.inner.unary(req, path, codec).await
603        }
604        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
605        pub async fn update_conversation_metadata(
606            &mut self,
607            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
608        ) -> std::result::Result<
609            tonic::Response<super::UpdateConversationMetadataResponse>,
610            tonic::Status,
611        > {
612            self.inner
613                .ready()
614                .await
615                .map_err(|e| {
616                    tonic::Status::unknown(
617                        format!("Service was not ready: {}", e.into()),
618                    )
619                })?;
620            let codec = tonic::codec::ProstCodec::default();
621            let path = http::uri::PathAndQuery::from_static(
622                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
623            );
624            let mut req = request.into_request();
625            req.extensions_mut()
626                .insert(
627                    GrpcMethod::new(
628                        "nominal.ai.v1.AIAgentService",
629                        "UpdateConversationMetadata",
630                    ),
631                );
632            self.inner.unary(req, path, codec).await
633        }
634        /// DeleteConversation handles deleting a specific conversation by conversation rid
635        pub async fn delete_conversation(
636            &mut self,
637            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
638        ) -> std::result::Result<
639            tonic::Response<super::DeleteConversationResponse>,
640            tonic::Status,
641        > {
642            self.inner
643                .ready()
644                .await
645                .map_err(|e| {
646                    tonic::Status::unknown(
647                        format!("Service was not ready: {}", e.into()),
648                    )
649                })?;
650            let codec = tonic::codec::ProstCodec::default();
651            let path = http::uri::PathAndQuery::from_static(
652                "/nominal.ai.v1.AIAgentService/DeleteConversation",
653            );
654            let mut req = request.into_request();
655            req.extensions_mut()
656                .insert(
657                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
658                );
659            self.inner.unary(req, path, codec).await
660        }
661        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
662        pub async fn get_snapshot_rid_by_user_message_id(
663            &mut self,
664            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
665        ) -> std::result::Result<
666            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
667            tonic::Status,
668        > {
669            self.inner
670                .ready()
671                .await
672                .map_err(|e| {
673                    tonic::Status::unknown(
674                        format!("Service was not ready: {}", e.into()),
675                    )
676                })?;
677            let codec = tonic::codec::ProstCodec::default();
678            let path = http::uri::PathAndQuery::from_static(
679                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
680            );
681            let mut req = request.into_request();
682            req.extensions_mut()
683                .insert(
684                    GrpcMethod::new(
685                        "nominal.ai.v1.AIAgentService",
686                        "GetSnapshotRidByUserMessageId",
687                    ),
688                );
689            self.inner.unary(req, path, codec).await
690        }
691    }
692}
693#[derive(Clone, Copy, PartialEq, ::prost::Message)]
694pub struct GetProviderStatusRequest {}
695#[derive(Clone, Copy, PartialEq, ::prost::Message)]
696pub struct GetProviderStatusResponse {
697    /// Timestamp when the last status was determined
698    #[prost(message, optional, tag = "1")]
699    pub timestamp: ::core::option::Option<
700        super::super::super::google::protobuf::Timestamp,
701    >,
702    /// Status of the most recent health check probe
703    #[prost(message, optional, tag = "2")]
704    pub last_status: ::core::option::Option<ProviderStatus>,
705    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
706    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
707    #[deprecated]
708    #[prost(message, optional, tag = "3")]
709    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
710    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
711    #[prost(message, optional, tag = "4")]
712    pub aggregated_status: ::core::option::Option<ProviderStatus>,
713}
714#[derive(Clone, Copy, PartialEq, ::prost::Message)]
715pub struct ProviderStatus {
716    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
717    pub status: ::core::option::Option<provider_status::Status>,
718}
719/// Nested message and enum types in `ProviderStatus`.
720pub mod provider_status {
721    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
722    pub enum Status {
723        #[prost(message, tag = "1")]
724        Healthy(super::Healthy),
725        #[prost(message, tag = "2")]
726        Degraded(super::Degraded),
727    }
728}
729#[derive(Clone, Copy, PartialEq, ::prost::Message)]
730pub struct Healthy {}
731#[derive(Clone, Copy, PartialEq, ::prost::Message)]
732pub struct Degraded {
733    #[prost(enumeration = "DegradationReason", tag = "1")]
734    pub reason: i32,
735}
736#[derive(Clone, Copy, PartialEq, ::prost::Message)]
737pub struct ProviderMetrics {
738    #[prost(int32, tag = "1")]
739    pub time_to_first_token_ms: i32,
740    #[prost(int32, tag = "2")]
741    pub total_time_ms: i32,
742}
743#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
744#[repr(i32)]
745pub enum DegradationReason {
746    Unspecified = 0,
747    HighLatency = 1,
748    Failures = 2,
749    HighLatencyAndFailures = 3,
750}
751impl DegradationReason {
752    /// String value of the enum field names used in the ProtoBuf definition.
753    ///
754    /// The values are not transformed in any way and thus are considered stable
755    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
756    pub fn as_str_name(&self) -> &'static str {
757        match self {
758            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
759            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
760            Self::Failures => "DEGRADATION_REASON_FAILURES",
761            Self::HighLatencyAndFailures => {
762                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
763            }
764        }
765    }
766    /// Creates an enum from field names used in the ProtoBuf definition.
767    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
768        match value {
769            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
770            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
771            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
772            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
773                Some(Self::HighLatencyAndFailures)
774            }
775            _ => None,
776        }
777    }
778}
779/// Generated client implementations.
780pub mod model_provider_health_service_client {
781    #![allow(
782        unused_variables,
783        dead_code,
784        missing_docs,
785        clippy::wildcard_imports,
786        clippy::let_unit_value,
787    )]
788    use tonic::codegen::*;
789    use tonic::codegen::http::Uri;
790    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
791    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
792    /// independent of the complexity of user prompts.
793    #[derive(Debug, Clone)]
794    pub struct ModelProviderHealthServiceClient<T> {
795        inner: tonic::client::Grpc<T>,
796    }
797    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
798        /// Attempt to create a new client by connecting to a given endpoint.
799        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
800        where
801            D: TryInto<tonic::transport::Endpoint>,
802            D::Error: Into<StdError>,
803        {
804            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
805            Ok(Self::new(conn))
806        }
807    }
808    impl<T> ModelProviderHealthServiceClient<T>
809    where
810        T: tonic::client::GrpcService<tonic::body::Body>,
811        T::Error: Into<StdError>,
812        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
813        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
814    {
815        pub fn new(inner: T) -> Self {
816            let inner = tonic::client::Grpc::new(inner);
817            Self { inner }
818        }
819        pub fn with_origin(inner: T, origin: Uri) -> Self {
820            let inner = tonic::client::Grpc::with_origin(inner, origin);
821            Self { inner }
822        }
823        pub fn with_interceptor<F>(
824            inner: T,
825            interceptor: F,
826        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
827        where
828            F: tonic::service::Interceptor,
829            T::ResponseBody: Default,
830            T: tonic::codegen::Service<
831                http::Request<tonic::body::Body>,
832                Response = http::Response<
833                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
834                >,
835            >,
836            <T as tonic::codegen::Service<
837                http::Request<tonic::body::Body>,
838            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
839        {
840            ModelProviderHealthServiceClient::new(
841                InterceptedService::new(inner, interceptor),
842            )
843        }
844        /// Compress requests with the given encoding.
845        ///
846        /// This requires the server to support it otherwise it might respond with an
847        /// error.
848        #[must_use]
849        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
850            self.inner = self.inner.send_compressed(encoding);
851            self
852        }
853        /// Enable decompressing responses.
854        #[must_use]
855        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
856            self.inner = self.inner.accept_compressed(encoding);
857            self
858        }
859        /// Limits the maximum size of a decoded message.
860        ///
861        /// Default: `4MB`
862        #[must_use]
863        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
864            self.inner = self.inner.max_decoding_message_size(limit);
865            self
866        }
867        /// Limits the maximum size of an encoded message.
868        ///
869        /// Default: `usize::MAX`
870        #[must_use]
871        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
872            self.inner = self.inner.max_encoding_message_size(limit);
873            self
874        }
875        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
876        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
877        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
878        pub async fn get_provider_status(
879            &mut self,
880            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
881        ) -> std::result::Result<
882            tonic::Response<super::GetProviderStatusResponse>,
883            tonic::Status,
884        > {
885            self.inner
886                .ready()
887                .await
888                .map_err(|e| {
889                    tonic::Status::unknown(
890                        format!("Service was not ready: {}", e.into()),
891                    )
892                })?;
893            let codec = tonic::codec::ProstCodec::default();
894            let path = http::uri::PathAndQuery::from_static(
895                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
896            );
897            let mut req = request.into_request();
898            req.extensions_mut()
899                .insert(
900                    GrpcMethod::new(
901                        "nominal.ai.v1.ModelProviderHealthService",
902                        "GetProviderStatus",
903                    ),
904                );
905            self.inner.unary(req, path, codec).await
906        }
907    }
908}
909/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
910/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
911#[derive(Clone, PartialEq, ::prost::Message)]
912pub struct CreateOrUpdateKnowledgeBaseRequest {
913    #[prost(string, tag = "1")]
914    pub attachment_rid: ::prost::alloc::string::String,
915    /// summary of the knowledge base, will be used by the LLM to decide when to use it
916    #[prost(string, tag = "2")]
917    pub summary_description: ::prost::alloc::string::String,
918    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
919    pub r#type: ::core::option::Option<i32>,
920}
921/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
922#[derive(Clone, PartialEq, ::prost::Message)]
923pub struct CreateOrUpdateKnowledgeBaseResponse {
924    #[prost(string, tag = "1")]
925    pub knowledge_base_rid: ::prost::alloc::string::String,
926}
927/// KnowledgeBase represents a knowledge base entry
928#[derive(Clone, PartialEq, ::prost::Message)]
929pub struct KnowledgeBase {
930    #[prost(string, tag = "1")]
931    pub knowledge_base_rid: ::prost::alloc::string::String,
932    #[prost(string, tag = "2")]
933    pub attachment_rid: ::prost::alloc::string::String,
934    #[prost(string, tag = "3")]
935    pub workspace_rid: ::prost::alloc::string::String,
936    #[prost(string, tag = "4")]
937    pub summary_description: ::prost::alloc::string::String,
938    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
939    pub r#type: i32,
940    #[prost(int32, tag = "6")]
941    pub version: i32,
942}
943#[derive(Clone, PartialEq, ::prost::Message)]
944pub struct ListRequest {
945    #[prost(string, tag = "1")]
946    pub workspace_rid: ::prost::alloc::string::String,
947}
948#[derive(Clone, PartialEq, ::prost::Message)]
949pub struct ListResponse {
950    #[prost(message, repeated, tag = "1")]
951    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
952}
953#[derive(Clone, PartialEq, ::prost::Message)]
954pub struct DeleteRequest {
955    #[prost(string, tag = "1")]
956    pub knowledge_base_rid: ::prost::alloc::string::String,
957}
958#[derive(Clone, Copy, PartialEq, ::prost::Message)]
959pub struct DeleteResponse {
960    #[prost(bool, tag = "1")]
961    pub success: bool,
962}
963#[derive(Clone, PartialEq, ::prost::Message)]
964pub struct GetBatchRequest {
965    #[prost(string, repeated, tag = "1")]
966    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
967}
968#[derive(Clone, PartialEq, ::prost::Message)]
969pub struct GetBatchResponse {
970    #[prost(message, repeated, tag = "1")]
971    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
972}
973/// generate summary description is intentionally going to return the generated description to the frontend
974/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
975#[derive(Clone, PartialEq, ::prost::Message)]
976pub struct GenerateSummaryDescriptionRequest {
977    #[prost(string, tag = "1")]
978    pub attachment_rid: ::prost::alloc::string::String,
979}
980#[derive(Clone, PartialEq, ::prost::Message)]
981pub struct GenerateSummaryDescriptionResponse {
982    #[prost(string, tag = "1")]
983    pub summary_description: ::prost::alloc::string::String,
984}
985/// KnowledgeBaseType defines the types of knowledge base
986#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
987#[repr(i32)]
988pub enum KnowledgeBaseType {
989    /// defaults to PROMPT
990    Unspecified = 0,
991    /// knowledge base gets added directly to prompt (needs to be small enough!)
992    Prompt = 1,
993    /// knowledge base gets used via vector search on embeddings
994    Embedding = 2,
995}
996impl KnowledgeBaseType {
997    /// String value of the enum field names used in the ProtoBuf definition.
998    ///
999    /// The values are not transformed in any way and thus are considered stable
1000    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1001    pub fn as_str_name(&self) -> &'static str {
1002        match self {
1003            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1004            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1005            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1006        }
1007    }
1008    /// Creates an enum from field names used in the ProtoBuf definition.
1009    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1010        match value {
1011            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1012            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1013            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1014            _ => None,
1015        }
1016    }
1017}
1018/// Generated client implementations.
1019pub mod knowledge_base_service_client {
1020    #![allow(
1021        unused_variables,
1022        dead_code,
1023        missing_docs,
1024        clippy::wildcard_imports,
1025        clippy::let_unit_value,
1026    )]
1027    use tonic::codegen::*;
1028    use tonic::codegen::http::Uri;
1029    /// KnowledgeBaseService provides AI-powered knowledge base management
1030    #[derive(Debug, Clone)]
1031    pub struct KnowledgeBaseServiceClient<T> {
1032        inner: tonic::client::Grpc<T>,
1033    }
1034    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1035        /// Attempt to create a new client by connecting to a given endpoint.
1036        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1037        where
1038            D: TryInto<tonic::transport::Endpoint>,
1039            D::Error: Into<StdError>,
1040        {
1041            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1042            Ok(Self::new(conn))
1043        }
1044    }
1045    impl<T> KnowledgeBaseServiceClient<T>
1046    where
1047        T: tonic::client::GrpcService<tonic::body::Body>,
1048        T::Error: Into<StdError>,
1049        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1050        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1051    {
1052        pub fn new(inner: T) -> Self {
1053            let inner = tonic::client::Grpc::new(inner);
1054            Self { inner }
1055        }
1056        pub fn with_origin(inner: T, origin: Uri) -> Self {
1057            let inner = tonic::client::Grpc::with_origin(inner, origin);
1058            Self { inner }
1059        }
1060        pub fn with_interceptor<F>(
1061            inner: T,
1062            interceptor: F,
1063        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1064        where
1065            F: tonic::service::Interceptor,
1066            T::ResponseBody: Default,
1067            T: tonic::codegen::Service<
1068                http::Request<tonic::body::Body>,
1069                Response = http::Response<
1070                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1071                >,
1072            >,
1073            <T as tonic::codegen::Service<
1074                http::Request<tonic::body::Body>,
1075            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1076        {
1077            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1078        }
1079        /// Compress requests with the given encoding.
1080        ///
1081        /// This requires the server to support it otherwise it might respond with an
1082        /// error.
1083        #[must_use]
1084        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1085            self.inner = self.inner.send_compressed(encoding);
1086            self
1087        }
1088        /// Enable decompressing responses.
1089        #[must_use]
1090        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1091            self.inner = self.inner.accept_compressed(encoding);
1092            self
1093        }
1094        /// Limits the maximum size of a decoded message.
1095        ///
1096        /// Default: `4MB`
1097        #[must_use]
1098        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1099            self.inner = self.inner.max_decoding_message_size(limit);
1100            self
1101        }
1102        /// Limits the maximum size of an encoded message.
1103        ///
1104        /// Default: `usize::MAX`
1105        #[must_use]
1106        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1107            self.inner = self.inner.max_encoding_message_size(limit);
1108            self
1109        }
1110        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1111        pub async fn create_or_update_knowledge_base(
1112            &mut self,
1113            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1114        ) -> std::result::Result<
1115            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1116            tonic::Status,
1117        > {
1118            self.inner
1119                .ready()
1120                .await
1121                .map_err(|e| {
1122                    tonic::Status::unknown(
1123                        format!("Service was not ready: {}", e.into()),
1124                    )
1125                })?;
1126            let codec = tonic::codec::ProstCodec::default();
1127            let path = http::uri::PathAndQuery::from_static(
1128                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1129            );
1130            let mut req = request.into_request();
1131            req.extensions_mut()
1132                .insert(
1133                    GrpcMethod::new(
1134                        "nominal.ai.v1.KnowledgeBaseService",
1135                        "CreateOrUpdateKnowledgeBase",
1136                    ),
1137                );
1138            self.inner.unary(req, path, codec).await
1139        }
1140        /// List returns all knowledge bases in the specified workspace
1141        pub async fn list(
1142            &mut self,
1143            request: impl tonic::IntoRequest<super::ListRequest>,
1144        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1145            self.inner
1146                .ready()
1147                .await
1148                .map_err(|e| {
1149                    tonic::Status::unknown(
1150                        format!("Service was not ready: {}", e.into()),
1151                    )
1152                })?;
1153            let codec = tonic::codec::ProstCodec::default();
1154            let path = http::uri::PathAndQuery::from_static(
1155                "/nominal.ai.v1.KnowledgeBaseService/List",
1156            );
1157            let mut req = request.into_request();
1158            req.extensions_mut()
1159                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1160            self.inner.unary(req, path, codec).await
1161        }
1162        /// Delete removes a knowledge base by its RID
1163        pub async fn delete(
1164            &mut self,
1165            request: impl tonic::IntoRequest<super::DeleteRequest>,
1166        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1167            self.inner
1168                .ready()
1169                .await
1170                .map_err(|e| {
1171                    tonic::Status::unknown(
1172                        format!("Service was not ready: {}", e.into()),
1173                    )
1174                })?;
1175            let codec = tonic::codec::ProstCodec::default();
1176            let path = http::uri::PathAndQuery::from_static(
1177                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1178            );
1179            let mut req = request.into_request();
1180            req.extensions_mut()
1181                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1182            self.inner.unary(req, path, codec).await
1183        }
1184        /// GetBatch retrieves multiple knowledge bases by their RIDs
1185        pub async fn get_batch(
1186            &mut self,
1187            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1188        ) -> std::result::Result<
1189            tonic::Response<super::GetBatchResponse>,
1190            tonic::Status,
1191        > {
1192            self.inner
1193                .ready()
1194                .await
1195                .map_err(|e| {
1196                    tonic::Status::unknown(
1197                        format!("Service was not ready: {}", e.into()),
1198                    )
1199                })?;
1200            let codec = tonic::codec::ProstCodec::default();
1201            let path = http::uri::PathAndQuery::from_static(
1202                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1203            );
1204            let mut req = request.into_request();
1205            req.extensions_mut()
1206                .insert(
1207                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1208                );
1209            self.inner.unary(req, path, codec).await
1210        }
1211        /// GenerateSummaryDescription generates a summary description for an attachment rid
1212        pub async fn generate_summary_description(
1213            &mut self,
1214            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1215        ) -> std::result::Result<
1216            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1217            tonic::Status,
1218        > {
1219            self.inner
1220                .ready()
1221                .await
1222                .map_err(|e| {
1223                    tonic::Status::unknown(
1224                        format!("Service was not ready: {}", e.into()),
1225                    )
1226                })?;
1227            let codec = tonic::codec::ProstCodec::default();
1228            let path = http::uri::PathAndQuery::from_static(
1229                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1230            );
1231            let mut req = request.into_request();
1232            req.extensions_mut()
1233                .insert(
1234                    GrpcMethod::new(
1235                        "nominal.ai.v1.KnowledgeBaseService",
1236                        "GenerateSummaryDescription",
1237                    ),
1238                );
1239            self.inner.unary(req, path, codec).await
1240        }
1241    }
1242}