nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2#[derive(Clone, PartialEq, ::prost::Message)]
3pub struct GetSnapshotRidByUserMessageIdRequest {
4    #[prost(string, tag = "1")]
5    pub conversation_rid: ::prost::alloc::string::String,
6    #[prost(string, tag = "2")]
7    pub message_id: ::prost::alloc::string::String,
8}
9/// Will return an empty response body in the case where the message id exists, but there is no associated snapshot
10/// This occurs in the instance where a message was sent in a non-workbook context
11#[derive(Clone, PartialEq, ::prost::Message)]
12pub struct GetSnapshotRidByUserMessageIdResponse {
13    #[prost(string, optional, tag = "1")]
14    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
15}
16/// StreamChatRequest is a request to stream chat messages for AI agent
17#[derive(Clone, PartialEq, ::prost::Message)]
18pub struct StreamChatRequest {
19    /// The conversation ID
20    #[prost(string, tag = "1")]
21    pub conversation_rid: ::prost::alloc::string::String,
22    /// The user message to append to the conversation
23    #[prost(message, optional, tag = "2")]
24    pub message: ::core::option::Option<UserModelMessage>,
25    /// Optional: image files to provide to the agent
26    #[prost(message, repeated, tag = "3")]
27    pub images: ::prost::alloc::vec::Vec<ImagePart>,
28    /// Context-specific fields based on the oneofKind.
29    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
30    pub context: ::core::option::Option<stream_chat_request::Context>,
31}
32/// Nested message and enum types in `StreamChatRequest`.
33pub mod stream_chat_request {
34    /// Context-specific fields based on the oneofKind.
35    #[derive(Clone, PartialEq, ::prost::Oneof)]
36    pub enum Context {
37        #[prost(message, tag = "4")]
38        Workbook(super::WorkbookContext),
39        #[prost(message, tag = "5")]
40        Global(super::GlobalContext),
41    }
42}
43/// WorkbookContext contains workbook-specific context fields
44#[derive(Clone, PartialEq, ::prost::Message)]
45pub struct WorkbookContext {
46    /// RID of the workbook to use for context
47    #[prost(string, tag = "1")]
48    pub workbook_rid: ::prost::alloc::string::String,
49    /// Optional: the user's presence in the workbook
50    #[prost(message, optional, tag = "2")]
51    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
52}
53/// DefaultContext (no context)
54#[derive(Clone, Copy, PartialEq, ::prost::Message)]
55pub struct GlobalContext {}
56/// WorkbookUserPresence contains the user's presence in the workbook
57/// which is used to describe what the user is viewing at the time of the message.
58#[derive(Clone, Copy, PartialEq, ::prost::Message)]
59pub struct WorkbookUserPresence {
60    #[prost(int32, optional, tag = "1")]
61    pub tab_index: ::core::option::Option<i32>,
62    #[prost(message, optional, tag = "2")]
63    pub range: ::core::option::Option<TimeRange>,
64}
65/// CreateConversation request will create a new conversation thread
66/// if old conversation id is not set, a brand new, clear chat is created
67/// If old conversation id is set without a previous message id, the full conversation thread will be copied
68/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
69/// the above case is useful for branching a conversation into a new thread
70#[derive(Clone, PartialEq, ::prost::Message)]
71pub struct CreateConversationRequest {
72    #[prost(string, tag = "1")]
73    pub title: ::prost::alloc::string::String,
74    #[prost(string, tag = "2")]
75    pub workspace_rid: ::prost::alloc::string::String,
76    #[prost(string, optional, tag = "3")]
77    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
78    #[prost(string, optional, tag = "4")]
79    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
80}
81/// CreateConversationResponse will return the conversation id for the new conversation
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct CreateConversationResponse {
84    #[prost(string, tag = "1")]
85    pub new_conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, PartialEq, ::prost::Message)]
88pub struct UpdateConversationMetadataRequest {
89    #[prost(string, tag = "1")]
90    pub title: ::prost::alloc::string::String,
91    #[prost(string, tag = "2")]
92    pub conversation_rid: ::prost::alloc::string::String,
93}
94#[derive(Clone, Copy, PartialEq, ::prost::Message)]
95pub struct UpdateConversationMetadataResponse {}
96#[derive(Clone, PartialEq, ::prost::Message)]
97pub struct DeleteConversationRequest {
98    #[prost(string, tag = "1")]
99    pub conversation_rid: ::prost::alloc::string::String,
100}
101#[derive(Clone, Copy, PartialEq, ::prost::Message)]
102pub struct DeleteConversationResponse {}
103/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
104/// by provided rid. To start from a particular message - you can also provide a message id.
105#[derive(Clone, PartialEq, ::prost::Message)]
106pub struct GetConversationRequest {
107    #[prost(string, tag = "1")]
108    pub conversation_rid: ::prost::alloc::string::String,
109    #[prost(string, optional, tag = "2")]
110    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
111    #[prost(int32, optional, tag = "3")]
112    pub max_message_count: ::core::option::Option<i32>,
113}
114/// Model message with id allows you to identify the message ID of a given message
115#[derive(Clone, PartialEq, ::prost::Message)]
116pub struct ModelMessageWithId {
117    #[prost(string, tag = "3")]
118    pub message_id: ::prost::alloc::string::String,
119    /// WB agent user messages can have snapshot rids associated with them
120    #[prost(string, optional, tag = "4")]
121    pub snapshot_rid: ::core::option::Option<::prost::alloc::string::String>,
122    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
123    pub content: ::core::option::Option<model_message_with_id::Content>,
124}
125/// Nested message and enum types in `ModelMessageWithId`.
126pub mod model_message_with_id {
127    #[derive(Clone, PartialEq, ::prost::Oneof)]
128    pub enum Content {
129        #[prost(message, tag = "1")]
130        Message(super::ModelMessage),
131        #[prost(message, tag = "2")]
132        ToolAction(super::ToolAction),
133    }
134}
135#[derive(Clone, PartialEq, ::prost::Message)]
136pub struct GetConversationResponse {
137    #[prost(message, repeated, tag = "1")]
138    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
139    #[prost(message, optional, tag = "2")]
140    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
141}
142/// Will generate all conversation threads that this user has in this workspace
143#[derive(Clone, PartialEq, ::prost::Message)]
144pub struct ListConversationsRequest {
145    #[prost(string, tag = "1")]
146    pub workspace_rid: ::prost::alloc::string::String,
147}
148#[derive(Clone, PartialEq, ::prost::Message)]
149pub struct ConversationMetadata {
150    #[prost(string, tag = "1")]
151    pub conversation_rid: ::prost::alloc::string::String,
152    #[prost(string, tag = "2")]
153    pub title: ::prost::alloc::string::String,
154    #[prost(message, optional, tag = "3")]
155    pub created_at: ::core::option::Option<
156        super::super::super::google::protobuf::Timestamp,
157    >,
158    #[prost(message, optional, tag = "4")]
159    pub last_updated_at: ::core::option::Option<
160        super::super::super::google::protobuf::Timestamp,
161    >,
162}
163/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
164/// to get a full conversation from storage. These are ordered by creation time.
165#[derive(Clone, PartialEq, ::prost::Message)]
166pub struct ListConversationsResponse {
167    #[prost(message, repeated, tag = "1")]
168    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
169}
170#[derive(Clone, Copy, PartialEq, ::prost::Message)]
171pub struct TimeRange {
172    #[prost(message, optional, tag = "1")]
173    pub range_start: ::core::option::Option<Timestamp>,
174    #[prost(message, optional, tag = "2")]
175    pub range_end: ::core::option::Option<Timestamp>,
176}
177#[derive(Clone, Copy, PartialEq, ::prost::Message)]
178pub struct Timestamp {
179    #[prost(int32, tag = "1")]
180    pub seconds: i32,
181    #[prost(int32, tag = "2")]
182    pub nanoseconds: i32,
183}
184/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
185/// Each message type has its own structure and content.
186#[derive(Clone, PartialEq, ::prost::Message)]
187pub struct ModelMessage {
188    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
189    pub kind: ::core::option::Option<model_message::Kind>,
190}
191/// Nested message and enum types in `ModelMessage`.
192pub mod model_message {
193    #[derive(Clone, PartialEq, ::prost::Oneof)]
194    pub enum Kind {
195        #[prost(message, tag = "1")]
196        User(super::UserModelMessage),
197        #[prost(message, tag = "2")]
198        Assistant(super::AssistantModelMessage),
199    }
200}
201/// A user message containing text
202#[derive(Clone, PartialEq, ::prost::Message)]
203pub struct UserModelMessage {
204    #[prost(message, repeated, tag = "1")]
205    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
206}
207/// An assistant message containing text
208#[derive(Clone, PartialEq, ::prost::Message)]
209pub struct AssistantModelMessage {
210    #[prost(message, repeated, tag = "1")]
211    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
212}
213#[derive(Clone, PartialEq, ::prost::Message)]
214pub struct UserContentPart {
215    #[prost(oneof = "user_content_part::Part", tags = "1")]
216    pub part: ::core::option::Option<user_content_part::Part>,
217}
218/// Nested message and enum types in `UserContentPart`.
219pub mod user_content_part {
220    #[derive(Clone, PartialEq, ::prost::Oneof)]
221    pub enum Part {
222        #[prost(message, tag = "1")]
223        Text(super::TextPart),
224    }
225}
226/// Content part for assistant messages: can be text, reasoning, or mutation.
227#[derive(Clone, PartialEq, ::prost::Message)]
228pub struct AssistantContentPart {
229    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
230    pub part: ::core::option::Option<assistant_content_part::Part>,
231}
232/// Nested message and enum types in `AssistantContentPart`.
233pub mod assistant_content_part {
234    #[derive(Clone, PartialEq, ::prost::Oneof)]
235    pub enum Part {
236        #[prost(message, tag = "1")]
237        Text(super::TextPart),
238        #[prost(message, tag = "2")]
239        Reasoning(super::ReasoningPart),
240    }
241}
242/// Text part for user or assistant messages.
243#[derive(Clone, PartialEq, ::prost::Message)]
244pub struct TextPart {
245    #[prost(string, tag = "1")]
246    pub text: ::prost::alloc::string::String,
247}
248/// User-supplied image part.
249#[derive(Clone, PartialEq, ::prost::Message)]
250pub struct ImagePart {
251    /// The base64-encoded image data
252    #[prost(bytes = "vec", tag = "1")]
253    pub data: ::prost::alloc::vec::Vec<u8>,
254    /// The media type of the image (e.g. "image/png", "image/jpeg")
255    #[prost(string, optional, tag = "2")]
256    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
257    /// Optional: the filename of the image
258    #[prost(string, optional, tag = "3")]
259    pub filename: ::core::option::Option<::prost::alloc::string::String>,
260}
261/// Reasoning part for assistant messages.
262#[derive(Clone, PartialEq, ::prost::Message)]
263pub struct ReasoningPart {
264    #[prost(string, tag = "1")]
265    pub reasoning: ::prost::alloc::string::String,
266}
267/// StreamChatResponse is a discriminated union response to a StreamChatRequest
268#[derive(Clone, PartialEq, ::prost::Message)]
269pub struct StreamChatResponse {
270    #[prost(
271        oneof = "stream_chat_response::Response",
272        tags = "1, 2, 3, 4, 5, 6, 7, 8, 10"
273    )]
274    pub response: ::core::option::Option<stream_chat_response::Response>,
275}
276/// Nested message and enum types in `StreamChatResponse`.
277pub mod stream_chat_response {
278    #[derive(Clone, PartialEq, ::prost::Oneof)]
279    pub enum Response {
280        #[prost(message, tag = "1")]
281        Finish(super::Finish),
282        #[prost(message, tag = "2")]
283        Error(super::Error),
284        #[prost(message, tag = "3")]
285        TextStart(super::TextStart),
286        #[prost(message, tag = "4")]
287        TextDelta(super::TextDelta),
288        #[prost(message, tag = "5")]
289        TextEnd(super::TextEnd),
290        #[prost(message, tag = "6")]
291        ReasoningStart(super::ReasoningStart),
292        #[prost(message, tag = "7")]
293        ReasoningDelta(super::ReasoningDelta),
294        #[prost(message, tag = "8")]
295        ReasoningEnd(super::ReasoningEnd),
296        #[prost(message, tag = "10")]
297        ToolAction(super::ToolAction),
298    }
299}
300/// Indicates the end of a chat session
301#[derive(Clone, PartialEq, ::prost::Message)]
302pub struct Finish {
303    /// The message ids in order of all generated messages for this agent run
304    /// These ids can be used to branch a message from that specific message
305    #[prost(string, repeated, tag = "1")]
306    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
307    /// In the case that this is the first agent run in a conversation thread, we also
308    /// return the new conversation title generated
309    #[prost(string, optional, tag = "2")]
310    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
311}
312/// An error that occurred during the chat session
313#[derive(Clone, PartialEq, ::prost::Message)]
314pub struct Error {
315    #[prost(string, tag = "1")]
316    pub message: ::prost::alloc::string::String,
317}
318/// Indicates the start of a text message from the agent
319#[derive(Clone, PartialEq, ::prost::Message)]
320pub struct TextStart {
321    /// uniquely identifies the text message (e.g. uuid) so that the client can
322    /// merge parallel message streams (if it happens).
323    #[prost(string, tag = "1")]
324    pub id: ::prost::alloc::string::String,
325}
326/// A delta (continuation) of a text message from the agent
327#[derive(Clone, PartialEq, ::prost::Message)]
328pub struct TextDelta {
329    #[prost(string, tag = "1")]
330    pub id: ::prost::alloc::string::String,
331    /// The next chunk of text
332    #[prost(string, tag = "2")]
333    pub delta: ::prost::alloc::string::String,
334}
335/// Indicates the end of a text message from the agent
336#[derive(Clone, PartialEq, ::prost::Message)]
337pub struct TextEnd {
338    #[prost(string, tag = "1")]
339    pub id: ::prost::alloc::string::String,
340}
341/// Indicates the start of a reasoning message from the agent
342#[derive(Clone, PartialEq, ::prost::Message)]
343pub struct ReasoningStart {
344    #[prost(string, tag = "1")]
345    pub id: ::prost::alloc::string::String,
346}
347/// A delta (continuation) of a reasoning message from the agent
348#[derive(Clone, PartialEq, ::prost::Message)]
349pub struct ReasoningDelta {
350    #[prost(string, tag = "1")]
351    pub id: ::prost::alloc::string::String,
352    /// The next chunk of reasoning
353    #[prost(string, tag = "2")]
354    pub delta: ::prost::alloc::string::String,
355}
356/// Indicates the end of a reasoning message from the agent
357#[derive(Clone, PartialEq, ::prost::Message)]
358pub struct ReasoningEnd {
359    #[prost(string, tag = "1")]
360    pub id: ::prost::alloc::string::String,
361}
362/// this is a concise description of a tool call that the agent is making internally
363/// without revealing too much detail about the tool call, it informs the user what the agent is doing
364/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
365/// "Search channels for My Datasource"
366#[derive(Clone, PartialEq, ::prost::Message)]
367pub struct ToolAction {
368    #[prost(string, tag = "1")]
369    pub id: ::prost::alloc::string::String,
370    /// "Thought", "Read", "Find", "Look-up", etc.
371    #[prost(string, tag = "2")]
372    pub tool_action_verb: ::prost::alloc::string::String,
373    /// "workbook", "channel", "variable", "panel", etc.
374    #[prost(string, optional, tag = "3")]
375    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
376}
377/// Generated client implementations.
378pub mod ai_agent_service_client {
379    #![allow(
380        unused_variables,
381        dead_code,
382        missing_docs,
383        clippy::wildcard_imports,
384        clippy::let_unit_value,
385    )]
386    use tonic::codegen::*;
387    use tonic::codegen::http::Uri;
388    /// AIAgentService provides AI-powered assistance for general operations
389    #[derive(Debug, Clone)]
390    pub struct AiAgentServiceClient<T> {
391        inner: tonic::client::Grpc<T>,
392    }
393    impl AiAgentServiceClient<tonic::transport::Channel> {
394        /// Attempt to create a new client by connecting to a given endpoint.
395        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
396        where
397            D: TryInto<tonic::transport::Endpoint>,
398            D::Error: Into<StdError>,
399        {
400            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
401            Ok(Self::new(conn))
402        }
403    }
404    impl<T> AiAgentServiceClient<T>
405    where
406        T: tonic::client::GrpcService<tonic::body::Body>,
407        T::Error: Into<StdError>,
408        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
409        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
410    {
411        pub fn new(inner: T) -> Self {
412            let inner = tonic::client::Grpc::new(inner);
413            Self { inner }
414        }
415        pub fn with_origin(inner: T, origin: Uri) -> Self {
416            let inner = tonic::client::Grpc::with_origin(inner, origin);
417            Self { inner }
418        }
419        pub fn with_interceptor<F>(
420            inner: T,
421            interceptor: F,
422        ) -> AiAgentServiceClient<InterceptedService<T, F>>
423        where
424            F: tonic::service::Interceptor,
425            T::ResponseBody: Default,
426            T: tonic::codegen::Service<
427                http::Request<tonic::body::Body>,
428                Response = http::Response<
429                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
430                >,
431            >,
432            <T as tonic::codegen::Service<
433                http::Request<tonic::body::Body>,
434            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
435        {
436            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
437        }
438        /// Compress requests with the given encoding.
439        ///
440        /// This requires the server to support it otherwise it might respond with an
441        /// error.
442        #[must_use]
443        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
444            self.inner = self.inner.send_compressed(encoding);
445            self
446        }
447        /// Enable decompressing responses.
448        #[must_use]
449        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
450            self.inner = self.inner.accept_compressed(encoding);
451            self
452        }
453        /// Limits the maximum size of a decoded message.
454        ///
455        /// Default: `4MB`
456        #[must_use]
457        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
458            self.inner = self.inner.max_decoding_message_size(limit);
459            self
460        }
461        /// Limits the maximum size of an encoded message.
462        ///
463        /// Default: `usize::MAX`
464        #[must_use]
465        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
466            self.inner = self.inner.max_encoding_message_size(limit);
467            self
468        }
469        /// StreamChat handles bidirectional streaming chat for AI agent
470        pub async fn stream_chat(
471            &mut self,
472            request: impl tonic::IntoRequest<super::StreamChatRequest>,
473        ) -> std::result::Result<
474            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
475            tonic::Status,
476        > {
477            self.inner
478                .ready()
479                .await
480                .map_err(|e| {
481                    tonic::Status::unknown(
482                        format!("Service was not ready: {}", e.into()),
483                    )
484                })?;
485            let codec = tonic::codec::ProstCodec::default();
486            let path = http::uri::PathAndQuery::from_static(
487                "/nominal.ai.v1.AIAgentService/StreamChat",
488            );
489            let mut req = request.into_request();
490            req.extensions_mut()
491                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
492            self.inner.server_streaming(req, path, codec).await
493        }
494        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
495        pub async fn get_conversation(
496            &mut self,
497            request: impl tonic::IntoRequest<super::GetConversationRequest>,
498        ) -> std::result::Result<
499            tonic::Response<super::GetConversationResponse>,
500            tonic::Status,
501        > {
502            self.inner
503                .ready()
504                .await
505                .map_err(|e| {
506                    tonic::Status::unknown(
507                        format!("Service was not ready: {}", e.into()),
508                    )
509                })?;
510            let codec = tonic::codec::ProstCodec::default();
511            let path = http::uri::PathAndQuery::from_static(
512                "/nominal.ai.v1.AIAgentService/GetConversation",
513            );
514            let mut req = request.into_request();
515            req.extensions_mut()
516                .insert(
517                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
518                );
519            self.inner.unary(req, path, codec).await
520        }
521        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
522        pub async fn list_conversations(
523            &mut self,
524            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
525        ) -> std::result::Result<
526            tonic::Response<super::ListConversationsResponse>,
527            tonic::Status,
528        > {
529            self.inner
530                .ready()
531                .await
532                .map_err(|e| {
533                    tonic::Status::unknown(
534                        format!("Service was not ready: {}", e.into()),
535                    )
536                })?;
537            let codec = tonic::codec::ProstCodec::default();
538            let path = http::uri::PathAndQuery::from_static(
539                "/nominal.ai.v1.AIAgentService/ListConversations",
540            );
541            let mut req = request.into_request();
542            req.extensions_mut()
543                .insert(
544                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
545                );
546            self.inner.unary(req, path, codec).await
547        }
548        /// CreateConversation handles creating a conversation and assigning it a conversation rid
549        pub async fn create_conversation(
550            &mut self,
551            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
552        ) -> std::result::Result<
553            tonic::Response<super::CreateConversationResponse>,
554            tonic::Status,
555        > {
556            self.inner
557                .ready()
558                .await
559                .map_err(|e| {
560                    tonic::Status::unknown(
561                        format!("Service was not ready: {}", e.into()),
562                    )
563                })?;
564            let codec = tonic::codec::ProstCodec::default();
565            let path = http::uri::PathAndQuery::from_static(
566                "/nominal.ai.v1.AIAgentService/CreateConversation",
567            );
568            let mut req = request.into_request();
569            req.extensions_mut()
570                .insert(
571                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
572                );
573            self.inner.unary(req, path, codec).await
574        }
575        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
576        pub async fn update_conversation_metadata(
577            &mut self,
578            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
579        ) -> std::result::Result<
580            tonic::Response<super::UpdateConversationMetadataResponse>,
581            tonic::Status,
582        > {
583            self.inner
584                .ready()
585                .await
586                .map_err(|e| {
587                    tonic::Status::unknown(
588                        format!("Service was not ready: {}", e.into()),
589                    )
590                })?;
591            let codec = tonic::codec::ProstCodec::default();
592            let path = http::uri::PathAndQuery::from_static(
593                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
594            );
595            let mut req = request.into_request();
596            req.extensions_mut()
597                .insert(
598                    GrpcMethod::new(
599                        "nominal.ai.v1.AIAgentService",
600                        "UpdateConversationMetadata",
601                    ),
602                );
603            self.inner.unary(req, path, codec).await
604        }
605        /// DeleteConversation handles deleting a specific conversation by conversation rid
606        pub async fn delete_conversation(
607            &mut self,
608            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
609        ) -> std::result::Result<
610            tonic::Response<super::DeleteConversationResponse>,
611            tonic::Status,
612        > {
613            self.inner
614                .ready()
615                .await
616                .map_err(|e| {
617                    tonic::Status::unknown(
618                        format!("Service was not ready: {}", e.into()),
619                    )
620                })?;
621            let codec = tonic::codec::ProstCodec::default();
622            let path = http::uri::PathAndQuery::from_static(
623                "/nominal.ai.v1.AIAgentService/DeleteConversation",
624            );
625            let mut req = request.into_request();
626            req.extensions_mut()
627                .insert(
628                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
629                );
630            self.inner.unary(req, path, codec).await
631        }
632        /// GetSnapshotRidByUserMessageId handles resolving the snapshot rid of the workbook at the time the message is sent
633        pub async fn get_snapshot_rid_by_user_message_id(
634            &mut self,
635            request: impl tonic::IntoRequest<super::GetSnapshotRidByUserMessageIdRequest>,
636        ) -> std::result::Result<
637            tonic::Response<super::GetSnapshotRidByUserMessageIdResponse>,
638            tonic::Status,
639        > {
640            self.inner
641                .ready()
642                .await
643                .map_err(|e| {
644                    tonic::Status::unknown(
645                        format!("Service was not ready: {}", e.into()),
646                    )
647                })?;
648            let codec = tonic::codec::ProstCodec::default();
649            let path = http::uri::PathAndQuery::from_static(
650                "/nominal.ai.v1.AIAgentService/GetSnapshotRidByUserMessageId",
651            );
652            let mut req = request.into_request();
653            req.extensions_mut()
654                .insert(
655                    GrpcMethod::new(
656                        "nominal.ai.v1.AIAgentService",
657                        "GetSnapshotRidByUserMessageId",
658                    ),
659                );
660            self.inner.unary(req, path, codec).await
661        }
662    }
663}
664#[derive(Clone, Copy, PartialEq, ::prost::Message)]
665pub struct GetProviderStatusRequest {}
666#[derive(Clone, Copy, PartialEq, ::prost::Message)]
667pub struct GetProviderStatusResponse {
668    /// Timestamp when the last status was determined
669    #[prost(message, optional, tag = "1")]
670    pub timestamp: ::core::option::Option<
671        super::super::super::google::protobuf::Timestamp,
672    >,
673    /// Status of the most recent health check probe
674    #[prost(message, optional, tag = "2")]
675    pub last_status: ::core::option::Option<ProviderStatus>,
676    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
677    /// Deprecated: Use aggregated_status instead. This field is kept for backward compatibility.
678    #[deprecated]
679    #[prost(message, optional, tag = "3")]
680    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
681    /// Aggregated status over the last iterations, configurable in the BE (DEGRADED if any check failed or exceeded thresholds)
682    #[prost(message, optional, tag = "4")]
683    pub aggregated_status: ::core::option::Option<ProviderStatus>,
684}
685#[derive(Clone, Copy, PartialEq, ::prost::Message)]
686pub struct ProviderStatus {
687    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
688    pub status: ::core::option::Option<provider_status::Status>,
689}
690/// Nested message and enum types in `ProviderStatus`.
691pub mod provider_status {
692    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
693    pub enum Status {
694        #[prost(message, tag = "1")]
695        Healthy(super::Healthy),
696        #[prost(message, tag = "2")]
697        Degraded(super::Degraded),
698    }
699}
700#[derive(Clone, Copy, PartialEq, ::prost::Message)]
701pub struct Healthy {}
702#[derive(Clone, Copy, PartialEq, ::prost::Message)]
703pub struct Degraded {
704    #[prost(enumeration = "DegradationReason", tag = "1")]
705    pub reason: i32,
706}
707#[derive(Clone, Copy, PartialEq, ::prost::Message)]
708pub struct ProviderMetrics {
709    #[prost(int32, tag = "1")]
710    pub time_to_first_token_ms: i32,
711    #[prost(int32, tag = "2")]
712    pub total_time_ms: i32,
713}
714#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
715#[repr(i32)]
716pub enum DegradationReason {
717    Unspecified = 0,
718    HighLatency = 1,
719    Failures = 2,
720    HighLatencyAndFailures = 3,
721}
722impl DegradationReason {
723    /// String value of the enum field names used in the ProtoBuf definition.
724    ///
725    /// The values are not transformed in any way and thus are considered stable
726    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
727    pub fn as_str_name(&self) -> &'static str {
728        match self {
729            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
730            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
731            Self::Failures => "DEGRADATION_REASON_FAILURES",
732            Self::HighLatencyAndFailures => {
733                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
734            }
735        }
736    }
737    /// Creates an enum from field names used in the ProtoBuf definition.
738    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
739        match value {
740            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
741            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
742            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
743            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
744                Some(Self::HighLatencyAndFailures)
745            }
746            _ => None,
747        }
748    }
749}
750/// Generated client implementations.
751pub mod model_provider_health_service_client {
752    #![allow(
753        unused_variables,
754        dead_code,
755        missing_docs,
756        clippy::wildcard_imports,
757        clippy::let_unit_value,
758    )]
759    use tonic::codegen::*;
760    use tonic::codegen::http::Uri;
761    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
762    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
763    /// independent of the complexity of user prompts.
764    #[derive(Debug, Clone)]
765    pub struct ModelProviderHealthServiceClient<T> {
766        inner: tonic::client::Grpc<T>,
767    }
768    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
769        /// Attempt to create a new client by connecting to a given endpoint.
770        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
771        where
772            D: TryInto<tonic::transport::Endpoint>,
773            D::Error: Into<StdError>,
774        {
775            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
776            Ok(Self::new(conn))
777        }
778    }
779    impl<T> ModelProviderHealthServiceClient<T>
780    where
781        T: tonic::client::GrpcService<tonic::body::Body>,
782        T::Error: Into<StdError>,
783        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
784        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
785    {
786        pub fn new(inner: T) -> Self {
787            let inner = tonic::client::Grpc::new(inner);
788            Self { inner }
789        }
790        pub fn with_origin(inner: T, origin: Uri) -> Self {
791            let inner = tonic::client::Grpc::with_origin(inner, origin);
792            Self { inner }
793        }
794        pub fn with_interceptor<F>(
795            inner: T,
796            interceptor: F,
797        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
798        where
799            F: tonic::service::Interceptor,
800            T::ResponseBody: Default,
801            T: tonic::codegen::Service<
802                http::Request<tonic::body::Body>,
803                Response = http::Response<
804                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
805                >,
806            >,
807            <T as tonic::codegen::Service<
808                http::Request<tonic::body::Body>,
809            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
810        {
811            ModelProviderHealthServiceClient::new(
812                InterceptedService::new(inner, interceptor),
813            )
814        }
815        /// Compress requests with the given encoding.
816        ///
817        /// This requires the server to support it otherwise it might respond with an
818        /// error.
819        #[must_use]
820        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
821            self.inner = self.inner.send_compressed(encoding);
822            self
823        }
824        /// Enable decompressing responses.
825        #[must_use]
826        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
827            self.inner = self.inner.accept_compressed(encoding);
828            self
829        }
830        /// Limits the maximum size of a decoded message.
831        ///
832        /// Default: `4MB`
833        #[must_use]
834        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
835            self.inner = self.inner.max_decoding_message_size(limit);
836            self
837        }
838        /// Limits the maximum size of an encoded message.
839        ///
840        /// Default: `usize::MAX`
841        #[must_use]
842        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
843            self.inner = self.inner.max_encoding_message_size(limit);
844            self
845        }
846        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
847        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
848        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
849        pub async fn get_provider_status(
850            &mut self,
851            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
852        ) -> std::result::Result<
853            tonic::Response<super::GetProviderStatusResponse>,
854            tonic::Status,
855        > {
856            self.inner
857                .ready()
858                .await
859                .map_err(|e| {
860                    tonic::Status::unknown(
861                        format!("Service was not ready: {}", e.into()),
862                    )
863                })?;
864            let codec = tonic::codec::ProstCodec::default();
865            let path = http::uri::PathAndQuery::from_static(
866                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
867            );
868            let mut req = request.into_request();
869            req.extensions_mut()
870                .insert(
871                    GrpcMethod::new(
872                        "nominal.ai.v1.ModelProviderHealthService",
873                        "GetProviderStatus",
874                    ),
875                );
876            self.inner.unary(req, path, codec).await
877        }
878    }
879}
880/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
881/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
882#[derive(Clone, PartialEq, ::prost::Message)]
883pub struct CreateOrUpdateKnowledgeBaseRequest {
884    #[prost(string, tag = "1")]
885    pub attachment_rid: ::prost::alloc::string::String,
886    /// summary of the knowledge base, will be used by the LLM to decide when to use it
887    #[prost(string, tag = "2")]
888    pub summary_description: ::prost::alloc::string::String,
889    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
890    pub r#type: ::core::option::Option<i32>,
891}
892/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
893#[derive(Clone, PartialEq, ::prost::Message)]
894pub struct CreateOrUpdateKnowledgeBaseResponse {
895    #[prost(string, tag = "1")]
896    pub knowledge_base_rid: ::prost::alloc::string::String,
897}
898/// KnowledgeBase represents a knowledge base entry
899#[derive(Clone, PartialEq, ::prost::Message)]
900pub struct KnowledgeBase {
901    #[prost(string, tag = "1")]
902    pub knowledge_base_rid: ::prost::alloc::string::String,
903    #[prost(string, tag = "2")]
904    pub attachment_rid: ::prost::alloc::string::String,
905    #[prost(string, tag = "3")]
906    pub workspace_rid: ::prost::alloc::string::String,
907    #[prost(string, tag = "4")]
908    pub summary_description: ::prost::alloc::string::String,
909    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
910    pub r#type: i32,
911    #[prost(int32, tag = "6")]
912    pub version: i32,
913}
914#[derive(Clone, PartialEq, ::prost::Message)]
915pub struct ListRequest {
916    #[prost(string, tag = "1")]
917    pub workspace_rid: ::prost::alloc::string::String,
918}
919#[derive(Clone, PartialEq, ::prost::Message)]
920pub struct ListResponse {
921    #[prost(message, repeated, tag = "1")]
922    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
923}
924#[derive(Clone, PartialEq, ::prost::Message)]
925pub struct DeleteRequest {
926    #[prost(string, tag = "1")]
927    pub knowledge_base_rid: ::prost::alloc::string::String,
928}
929#[derive(Clone, Copy, PartialEq, ::prost::Message)]
930pub struct DeleteResponse {
931    #[prost(bool, tag = "1")]
932    pub success: bool,
933}
934#[derive(Clone, PartialEq, ::prost::Message)]
935pub struct GetBatchRequest {
936    #[prost(string, repeated, tag = "1")]
937    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
938}
939#[derive(Clone, PartialEq, ::prost::Message)]
940pub struct GetBatchResponse {
941    #[prost(message, repeated, tag = "1")]
942    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
943}
944/// generate summary description is intentionally going to return the generated description to the frontend
945/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
946#[derive(Clone, PartialEq, ::prost::Message)]
947pub struct GenerateSummaryDescriptionRequest {
948    #[prost(string, tag = "1")]
949    pub attachment_rid: ::prost::alloc::string::String,
950}
951#[derive(Clone, PartialEq, ::prost::Message)]
952pub struct GenerateSummaryDescriptionResponse {
953    #[prost(string, tag = "1")]
954    pub summary_description: ::prost::alloc::string::String,
955}
956/// KnowledgeBaseType defines the types of knowledge base
957#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
958#[repr(i32)]
959pub enum KnowledgeBaseType {
960    /// defaults to PROMPT
961    Unspecified = 0,
962    /// knowledge base gets added directly to prompt (needs to be small enough!)
963    Prompt = 1,
964    /// knowledge base gets used via vector search on embeddings
965    Embedding = 2,
966}
967impl KnowledgeBaseType {
968    /// String value of the enum field names used in the ProtoBuf definition.
969    ///
970    /// The values are not transformed in any way and thus are considered stable
971    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
972    pub fn as_str_name(&self) -> &'static str {
973        match self {
974            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
975            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
976            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
977        }
978    }
979    /// Creates an enum from field names used in the ProtoBuf definition.
980    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
981        match value {
982            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
983            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
984            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
985            _ => None,
986        }
987    }
988}
989/// Generated client implementations.
990pub mod knowledge_base_service_client {
991    #![allow(
992        unused_variables,
993        dead_code,
994        missing_docs,
995        clippy::wildcard_imports,
996        clippy::let_unit_value,
997    )]
998    use tonic::codegen::*;
999    use tonic::codegen::http::Uri;
1000    /// KnowledgeBaseService provides AI-powered knowledge base management
1001    #[derive(Debug, Clone)]
1002    pub struct KnowledgeBaseServiceClient<T> {
1003        inner: tonic::client::Grpc<T>,
1004    }
1005    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1006        /// Attempt to create a new client by connecting to a given endpoint.
1007        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1008        where
1009            D: TryInto<tonic::transport::Endpoint>,
1010            D::Error: Into<StdError>,
1011        {
1012            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1013            Ok(Self::new(conn))
1014        }
1015    }
1016    impl<T> KnowledgeBaseServiceClient<T>
1017    where
1018        T: tonic::client::GrpcService<tonic::body::Body>,
1019        T::Error: Into<StdError>,
1020        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1021        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1022    {
1023        pub fn new(inner: T) -> Self {
1024            let inner = tonic::client::Grpc::new(inner);
1025            Self { inner }
1026        }
1027        pub fn with_origin(inner: T, origin: Uri) -> Self {
1028            let inner = tonic::client::Grpc::with_origin(inner, origin);
1029            Self { inner }
1030        }
1031        pub fn with_interceptor<F>(
1032            inner: T,
1033            interceptor: F,
1034        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1035        where
1036            F: tonic::service::Interceptor,
1037            T::ResponseBody: Default,
1038            T: tonic::codegen::Service<
1039                http::Request<tonic::body::Body>,
1040                Response = http::Response<
1041                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1042                >,
1043            >,
1044            <T as tonic::codegen::Service<
1045                http::Request<tonic::body::Body>,
1046            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1047        {
1048            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1049        }
1050        /// Compress requests with the given encoding.
1051        ///
1052        /// This requires the server to support it otherwise it might respond with an
1053        /// error.
1054        #[must_use]
1055        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1056            self.inner = self.inner.send_compressed(encoding);
1057            self
1058        }
1059        /// Enable decompressing responses.
1060        #[must_use]
1061        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1062            self.inner = self.inner.accept_compressed(encoding);
1063            self
1064        }
1065        /// Limits the maximum size of a decoded message.
1066        ///
1067        /// Default: `4MB`
1068        #[must_use]
1069        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1070            self.inner = self.inner.max_decoding_message_size(limit);
1071            self
1072        }
1073        /// Limits the maximum size of an encoded message.
1074        ///
1075        /// Default: `usize::MAX`
1076        #[must_use]
1077        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1078            self.inner = self.inner.max_encoding_message_size(limit);
1079            self
1080        }
1081        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1082        pub async fn create_or_update_knowledge_base(
1083            &mut self,
1084            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1085        ) -> std::result::Result<
1086            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1087            tonic::Status,
1088        > {
1089            self.inner
1090                .ready()
1091                .await
1092                .map_err(|e| {
1093                    tonic::Status::unknown(
1094                        format!("Service was not ready: {}", e.into()),
1095                    )
1096                })?;
1097            let codec = tonic::codec::ProstCodec::default();
1098            let path = http::uri::PathAndQuery::from_static(
1099                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1100            );
1101            let mut req = request.into_request();
1102            req.extensions_mut()
1103                .insert(
1104                    GrpcMethod::new(
1105                        "nominal.ai.v1.KnowledgeBaseService",
1106                        "CreateOrUpdateKnowledgeBase",
1107                    ),
1108                );
1109            self.inner.unary(req, path, codec).await
1110        }
1111        /// List returns all knowledge bases in the specified workspace
1112        pub async fn list(
1113            &mut self,
1114            request: impl tonic::IntoRequest<super::ListRequest>,
1115        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1116            self.inner
1117                .ready()
1118                .await
1119                .map_err(|e| {
1120                    tonic::Status::unknown(
1121                        format!("Service was not ready: {}", e.into()),
1122                    )
1123                })?;
1124            let codec = tonic::codec::ProstCodec::default();
1125            let path = http::uri::PathAndQuery::from_static(
1126                "/nominal.ai.v1.KnowledgeBaseService/List",
1127            );
1128            let mut req = request.into_request();
1129            req.extensions_mut()
1130                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1131            self.inner.unary(req, path, codec).await
1132        }
1133        /// Delete removes a knowledge base by its RID
1134        pub async fn delete(
1135            &mut self,
1136            request: impl tonic::IntoRequest<super::DeleteRequest>,
1137        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1138            self.inner
1139                .ready()
1140                .await
1141                .map_err(|e| {
1142                    tonic::Status::unknown(
1143                        format!("Service was not ready: {}", e.into()),
1144                    )
1145                })?;
1146            let codec = tonic::codec::ProstCodec::default();
1147            let path = http::uri::PathAndQuery::from_static(
1148                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1149            );
1150            let mut req = request.into_request();
1151            req.extensions_mut()
1152                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1153            self.inner.unary(req, path, codec).await
1154        }
1155        /// GetBatch retrieves multiple knowledge bases by their RIDs
1156        pub async fn get_batch(
1157            &mut self,
1158            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1159        ) -> std::result::Result<
1160            tonic::Response<super::GetBatchResponse>,
1161            tonic::Status,
1162        > {
1163            self.inner
1164                .ready()
1165                .await
1166                .map_err(|e| {
1167                    tonic::Status::unknown(
1168                        format!("Service was not ready: {}", e.into()),
1169                    )
1170                })?;
1171            let codec = tonic::codec::ProstCodec::default();
1172            let path = http::uri::PathAndQuery::from_static(
1173                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1174            );
1175            let mut req = request.into_request();
1176            req.extensions_mut()
1177                .insert(
1178                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1179                );
1180            self.inner.unary(req, path, codec).await
1181        }
1182        /// GenerateSummaryDescription generates a summary description for an attachment rid
1183        pub async fn generate_summary_description(
1184            &mut self,
1185            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1186        ) -> std::result::Result<
1187            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1188            tonic::Status,
1189        > {
1190            self.inner
1191                .ready()
1192                .await
1193                .map_err(|e| {
1194                    tonic::Status::unknown(
1195                        format!("Service was not ready: {}", e.into()),
1196                    )
1197                })?;
1198            let codec = tonic::codec::ProstCodec::default();
1199            let path = http::uri::PathAndQuery::from_static(
1200                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1201            );
1202            let mut req = request.into_request();
1203            req.extensions_mut()
1204                .insert(
1205                    GrpcMethod::new(
1206                        "nominal.ai.v1.KnowledgeBaseService",
1207                        "GenerateSummaryDescription",
1208                    ),
1209                );
1210            self.inner.unary(req, path, codec).await
1211        }
1212    }
1213}