nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    /// The conversation ID
6    #[prost(string, tag = "1")]
7    pub conversation_rid: ::prost::alloc::string::String,
8    /// The user message to append to the conversation
9    #[prost(message, optional, tag = "2")]
10    pub message: ::core::option::Option<UserModelMessage>,
11    /// Optional: image files to provide to the agent
12    #[prost(message, repeated, tag = "3")]
13    pub images: ::prost::alloc::vec::Vec<ImagePart>,
14    /// Context-specific fields based on the oneofKind.
15    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
16    pub context: ::core::option::Option<stream_chat_request::Context>,
17}
18/// Nested message and enum types in `StreamChatRequest`.
19pub mod stream_chat_request {
20    /// Context-specific fields based on the oneofKind.
21    #[derive(Clone, PartialEq, ::prost::Oneof)]
22    pub enum Context {
23        #[prost(message, tag = "4")]
24        Workbook(super::WorkbookContext),
25        #[prost(message, tag = "5")]
26        Global(super::GlobalContext),
27    }
28}
29/// WorkbookContext contains workbook-specific context fields
30#[derive(Clone, PartialEq, ::prost::Message)]
31pub struct WorkbookContext {
32    /// RID of the workbook to use for context
33    #[prost(string, tag = "1")]
34    pub workbook_rid: ::prost::alloc::string::String,
35    /// Optional: the user's presence in the workbook
36    #[prost(message, optional, tag = "2")]
37    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
38}
39/// DefaultContext (no context)
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct GlobalContext {}
42/// WorkbookUserPresence contains the user's presence in the workbook
43/// which is used to describe what the user is viewing at the time of the message.
44#[derive(Clone, Copy, PartialEq, ::prost::Message)]
45pub struct WorkbookUserPresence {
46    #[prost(int32, optional, tag = "1")]
47    pub tab_index: ::core::option::Option<i32>,
48    #[prost(message, optional, tag = "2")]
49    pub range: ::core::option::Option<TimeRange>,
50}
51/// CreateConversation request will create a new conversation thread
52/// if old conversation id is not set, a brand new, clear chat is created
53/// If old conversation id is set without a previous message id, the full conversation thread will be copied
54/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
55/// the above case is useful for branching a conversation into a new thread
56#[derive(Clone, PartialEq, ::prost::Message)]
57pub struct CreateConversationRequest {
58    #[prost(string, tag = "1")]
59    pub title: ::prost::alloc::string::String,
60    #[prost(string, tag = "2")]
61    pub workspace_rid: ::prost::alloc::string::String,
62    #[prost(string, optional, tag = "3")]
63    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
64    #[prost(string, optional, tag = "4")]
65    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
66}
67/// CreateConversationResponse will return the conversation id for the new conversation
68#[derive(Clone, PartialEq, ::prost::Message)]
69pub struct CreateConversationResponse {
70    #[prost(string, tag = "1")]
71    pub new_conversation_rid: ::prost::alloc::string::String,
72}
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct UpdateConversationMetadataRequest {
75    #[prost(string, tag = "1")]
76    pub title: ::prost::alloc::string::String,
77    #[prost(string, tag = "2")]
78    pub conversation_rid: ::prost::alloc::string::String,
79}
80#[derive(Clone, Copy, PartialEq, ::prost::Message)]
81pub struct UpdateConversationMetadataResponse {}
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct DeleteConversationRequest {
84    #[prost(string, tag = "1")]
85    pub conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, Copy, PartialEq, ::prost::Message)]
88pub struct DeleteConversationResponse {}
89/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
90/// by provided rid. To start from a particular message - you can also provide a message id.
91#[derive(Clone, PartialEq, ::prost::Message)]
92pub struct GetConversationRequest {
93    #[prost(string, tag = "1")]
94    pub conversation_rid: ::prost::alloc::string::String,
95    #[prost(string, optional, tag = "2")]
96    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
97    #[prost(int32, optional, tag = "3")]
98    pub max_message_count: ::core::option::Option<i32>,
99}
100/// Model message with id allows you to identify the message ID of a given message
101#[derive(Clone, PartialEq, ::prost::Message)]
102pub struct ModelMessageWithId {
103    #[prost(string, tag = "3")]
104    pub message_id: ::prost::alloc::string::String,
105    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
106    pub content: ::core::option::Option<model_message_with_id::Content>,
107}
108/// Nested message and enum types in `ModelMessageWithId`.
109pub mod model_message_with_id {
110    #[derive(Clone, PartialEq, ::prost::Oneof)]
111    pub enum Content {
112        #[prost(message, tag = "1")]
113        Message(super::ModelMessage),
114        #[prost(message, tag = "2")]
115        ToolAction(super::ToolAction),
116    }
117}
118#[derive(Clone, PartialEq, ::prost::Message)]
119pub struct GetConversationResponse {
120    #[prost(message, repeated, tag = "1")]
121    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
122    #[prost(message, optional, tag = "2")]
123    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
124}
125/// Will generate all conversation threads that this user has in this workspace
126#[derive(Clone, PartialEq, ::prost::Message)]
127pub struct ListConversationsRequest {
128    #[prost(string, tag = "1")]
129    pub workspace_rid: ::prost::alloc::string::String,
130}
131#[derive(Clone, PartialEq, ::prost::Message)]
132pub struct ConversationMetadata {
133    #[prost(string, tag = "1")]
134    pub conversation_rid: ::prost::alloc::string::String,
135    #[prost(string, tag = "2")]
136    pub title: ::prost::alloc::string::String,
137    #[prost(message, optional, tag = "3")]
138    pub created_at: ::core::option::Option<
139        super::super::super::google::protobuf::Timestamp,
140    >,
141    #[prost(message, optional, tag = "4")]
142    pub last_updated_at: ::core::option::Option<
143        super::super::super::google::protobuf::Timestamp,
144    >,
145}
146/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
147/// to get a full conversation from storage. These are ordered by creation time.
148#[derive(Clone, PartialEq, ::prost::Message)]
149pub struct ListConversationsResponse {
150    #[prost(message, repeated, tag = "1")]
151    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
152}
153#[derive(Clone, Copy, PartialEq, ::prost::Message)]
154pub struct TimeRange {
155    #[prost(message, optional, tag = "1")]
156    pub range_start: ::core::option::Option<Timestamp>,
157    #[prost(message, optional, tag = "2")]
158    pub range_end: ::core::option::Option<Timestamp>,
159}
160#[derive(Clone, Copy, PartialEq, ::prost::Message)]
161pub struct Timestamp {
162    #[prost(int32, tag = "1")]
163    pub seconds: i32,
164    #[prost(int32, tag = "2")]
165    pub nanoseconds: i32,
166}
167/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
168/// Each message type has its own structure and content.
169#[derive(Clone, PartialEq, ::prost::Message)]
170pub struct ModelMessage {
171    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
172    pub kind: ::core::option::Option<model_message::Kind>,
173}
174/// Nested message and enum types in `ModelMessage`.
175pub mod model_message {
176    #[derive(Clone, PartialEq, ::prost::Oneof)]
177    pub enum Kind {
178        #[prost(message, tag = "1")]
179        User(super::UserModelMessage),
180        #[prost(message, tag = "2")]
181        Assistant(super::AssistantModelMessage),
182    }
183}
184/// A user message containing text
185#[derive(Clone, PartialEq, ::prost::Message)]
186pub struct UserModelMessage {
187    #[prost(message, repeated, tag = "1")]
188    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
189}
190/// An assistant message containing text
191#[derive(Clone, PartialEq, ::prost::Message)]
192pub struct AssistantModelMessage {
193    #[prost(message, repeated, tag = "1")]
194    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
195}
196#[derive(Clone, PartialEq, ::prost::Message)]
197pub struct UserContentPart {
198    #[prost(oneof = "user_content_part::Part", tags = "1")]
199    pub part: ::core::option::Option<user_content_part::Part>,
200}
201/// Nested message and enum types in `UserContentPart`.
202pub mod user_content_part {
203    #[derive(Clone, PartialEq, ::prost::Oneof)]
204    pub enum Part {
205        #[prost(message, tag = "1")]
206        Text(super::TextPart),
207    }
208}
209/// Content part for assistant messages: can be text, reasoning, or mutation.
210#[derive(Clone, PartialEq, ::prost::Message)]
211pub struct AssistantContentPart {
212    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
213    pub part: ::core::option::Option<assistant_content_part::Part>,
214}
215/// Nested message and enum types in `AssistantContentPart`.
216pub mod assistant_content_part {
217    #[derive(Clone, PartialEq, ::prost::Oneof)]
218    pub enum Part {
219        #[prost(message, tag = "1")]
220        Text(super::TextPart),
221        #[prost(message, tag = "2")]
222        Reasoning(super::ReasoningPart),
223    }
224}
225/// Text part for user or assistant messages.
226#[derive(Clone, PartialEq, ::prost::Message)]
227pub struct TextPart {
228    #[prost(string, tag = "1")]
229    pub text: ::prost::alloc::string::String,
230}
231/// User-supplied image part.
232#[derive(Clone, PartialEq, ::prost::Message)]
233pub struct ImagePart {
234    /// The base64-encoded image data
235    #[prost(bytes = "vec", tag = "1")]
236    pub data: ::prost::alloc::vec::Vec<u8>,
237    /// The media type of the image (e.g. "image/png", "image/jpeg")
238    #[prost(string, optional, tag = "2")]
239    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
240    /// Optional: the filename of the image
241    #[prost(string, optional, tag = "3")]
242    pub filename: ::core::option::Option<::prost::alloc::string::String>,
243}
244/// Reasoning part for assistant messages.
245#[derive(Clone, PartialEq, ::prost::Message)]
246pub struct ReasoningPart {
247    #[prost(string, tag = "1")]
248    pub reasoning: ::prost::alloc::string::String,
249}
250/// StreamChatResponse is a discriminated union response to a StreamChatRequest
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct StreamChatResponse {
253    #[prost(
254        oneof = "stream_chat_response::Response",
255        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
256    )]
257    pub response: ::core::option::Option<stream_chat_response::Response>,
258}
259/// Nested message and enum types in `StreamChatResponse`.
260pub mod stream_chat_response {
261    #[derive(Clone, PartialEq, ::prost::Oneof)]
262    pub enum Response {
263        #[prost(message, tag = "1")]
264        Finish(super::Finish),
265        #[prost(message, tag = "2")]
266        Error(super::Error),
267        #[prost(message, tag = "3")]
268        TextStart(super::TextStart),
269        #[prost(message, tag = "4")]
270        TextDelta(super::TextDelta),
271        #[prost(message, tag = "5")]
272        TextEnd(super::TextEnd),
273        #[prost(message, tag = "6")]
274        ReasoningStart(super::ReasoningStart),
275        #[prost(message, tag = "7")]
276        ReasoningDelta(super::ReasoningDelta),
277        #[prost(message, tag = "8")]
278        ReasoningEnd(super::ReasoningEnd),
279        /// this will be deprecated in favor of MCP-based mutations
280        #[prost(message, tag = "9")]
281        WorkbookMutation(super::WorkbookMutation),
282        #[prost(message, tag = "10")]
283        ToolAction(super::ToolAction),
284    }
285}
286/// Indicates the end of a chat session
287#[derive(Clone, PartialEq, ::prost::Message)]
288pub struct Finish {
289    /// The message ids in order of all generated messages for this agent run
290    /// These ids can be used to branch a message from that specific message
291    #[prost(string, repeated, tag = "1")]
292    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
293    /// In the case that this is the first agent run in a conversation thread, we also
294    /// return the new conversation title generated
295    #[prost(string, optional, tag = "2")]
296    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
297}
298/// An error that occurred during the chat session
299#[derive(Clone, PartialEq, ::prost::Message)]
300pub struct Error {
301    #[prost(string, tag = "1")]
302    pub message: ::prost::alloc::string::String,
303}
304/// Indicates the start of a text message from the agent
305#[derive(Clone, PartialEq, ::prost::Message)]
306pub struct TextStart {
307    /// uniquely identifies the text message (e.g. uuid) so that the client can
308    /// merge parallel message streams (if it happens).
309    #[prost(string, tag = "1")]
310    pub id: ::prost::alloc::string::String,
311}
312/// A delta (continuation) of a text message from the agent
313#[derive(Clone, PartialEq, ::prost::Message)]
314pub struct TextDelta {
315    #[prost(string, tag = "1")]
316    pub id: ::prost::alloc::string::String,
317    /// The next chunk of text
318    #[prost(string, tag = "2")]
319    pub delta: ::prost::alloc::string::String,
320}
321/// Indicates the end of a text message from the agent
322#[derive(Clone, PartialEq, ::prost::Message)]
323pub struct TextEnd {
324    #[prost(string, tag = "1")]
325    pub id: ::prost::alloc::string::String,
326}
327/// Indicates the start of a reasoning message from the agent
328#[derive(Clone, PartialEq, ::prost::Message)]
329pub struct ReasoningStart {
330    #[prost(string, tag = "1")]
331    pub id: ::prost::alloc::string::String,
332}
333/// A delta (continuation) of a reasoning message from the agent
334#[derive(Clone, PartialEq, ::prost::Message)]
335pub struct ReasoningDelta {
336    #[prost(string, tag = "1")]
337    pub id: ::prost::alloc::string::String,
338    /// The next chunk of reasoning
339    #[prost(string, tag = "2")]
340    pub delta: ::prost::alloc::string::String,
341}
342/// Indicates the end of a reasoning message from the agent
343#[derive(Clone, PartialEq, ::prost::Message)]
344pub struct ReasoningEnd {
345    #[prost(string, tag = "1")]
346    pub id: ::prost::alloc::string::String,
347}
348/// Add a new tab to the workbook
349#[derive(Clone, PartialEq, ::prost::Message)]
350pub struct AddTabMutation {
351    /// if tab_name is not provided, we'll name it "New Tab"
352    #[prost(string, optional, tag = "1")]
353    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
354}
355/// this is the "result" of the mutation
356#[derive(Clone, PartialEq, ::prost::Message)]
357pub struct AddOrUpdatePanelMutation {
358    /// JSON-serialized representation of IVizDefinition
359    #[prost(string, tag = "1")]
360    pub panel_as_json: ::prost::alloc::string::String,
361    #[prost(string, tag = "2")]
362    pub panel_id: ::prost::alloc::string::String,
363    #[prost(int32, tag = "3")]
364    pub tab_index: i32,
365}
366#[derive(Clone, PartialEq, ::prost::Message)]
367pub struct RemovePanelsMutation {
368    #[prost(string, repeated, tag = "1")]
369    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
370}
371/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
372#[derive(Clone, PartialEq, ::prost::Message)]
373pub struct AddOrReplaceVariableMutation {
374    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
375    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
376    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
377    #[prost(string, tag = "1")]
378    pub compute_spec_as_json: ::prost::alloc::string::String,
379    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
380    #[prost(string, optional, tag = "2")]
381    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
382    #[prost(string, optional, tag = "3")]
383    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
384}
385/// DeleteVariablesMutation is a mutation to delete variables from the workbook
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct DeleteVariablesMutation {
388    #[prost(string, repeated, tag = "1")]
389    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
390}
391/// WorkbookMutation is a mutation to the workbook
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct WorkbookMutation {
394    #[prost(string, tag = "1")]
395    pub id: ::prost::alloc::string::String,
396    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
397    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
398}
399/// Nested message and enum types in `WorkbookMutation`.
400pub mod workbook_mutation {
401    #[derive(Clone, PartialEq, ::prost::Oneof)]
402    pub enum Mutation {
403        #[prost(message, tag = "2")]
404        AddTab(super::AddTabMutation),
405        #[prost(message, tag = "3")]
406        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
407        #[prost(message, tag = "4")]
408        RemovePanels(super::RemovePanelsMutation),
409        #[prost(message, tag = "5")]
410        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
411        #[prost(message, tag = "6")]
412        DeleteVariables(super::DeleteVariablesMutation),
413    }
414}
415/// this is a concise description of a tool call that the agent is making internally
416/// without revealing too much detail about the tool call, it informs the user what the agent is doing
417/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
418/// "Search channels for My Datasource"
419#[derive(Clone, PartialEq, ::prost::Message)]
420pub struct ToolAction {
421    #[prost(string, tag = "1")]
422    pub id: ::prost::alloc::string::String,
423    /// "Thought", "Read", "Find", "Look-up", etc.
424    #[prost(string, tag = "2")]
425    pub tool_action_verb: ::prost::alloc::string::String,
426    /// "workbook", "channel", "variable", "panel", etc.
427    #[prost(string, optional, tag = "3")]
428    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
429}
430/// Generated client implementations.
431pub mod ai_agent_service_client {
432    #![allow(
433        unused_variables,
434        dead_code,
435        missing_docs,
436        clippy::wildcard_imports,
437        clippy::let_unit_value,
438    )]
439    use tonic::codegen::*;
440    use tonic::codegen::http::Uri;
441    /// AIAgentService provides AI-powered assistance for general operations
442    #[derive(Debug, Clone)]
443    pub struct AiAgentServiceClient<T> {
444        inner: tonic::client::Grpc<T>,
445    }
446    impl AiAgentServiceClient<tonic::transport::Channel> {
447        /// Attempt to create a new client by connecting to a given endpoint.
448        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
449        where
450            D: TryInto<tonic::transport::Endpoint>,
451            D::Error: Into<StdError>,
452        {
453            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
454            Ok(Self::new(conn))
455        }
456    }
457    impl<T> AiAgentServiceClient<T>
458    where
459        T: tonic::client::GrpcService<tonic::body::Body>,
460        T::Error: Into<StdError>,
461        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
462        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
463    {
464        pub fn new(inner: T) -> Self {
465            let inner = tonic::client::Grpc::new(inner);
466            Self { inner }
467        }
468        pub fn with_origin(inner: T, origin: Uri) -> Self {
469            let inner = tonic::client::Grpc::with_origin(inner, origin);
470            Self { inner }
471        }
472        pub fn with_interceptor<F>(
473            inner: T,
474            interceptor: F,
475        ) -> AiAgentServiceClient<InterceptedService<T, F>>
476        where
477            F: tonic::service::Interceptor,
478            T::ResponseBody: Default,
479            T: tonic::codegen::Service<
480                http::Request<tonic::body::Body>,
481                Response = http::Response<
482                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
483                >,
484            >,
485            <T as tonic::codegen::Service<
486                http::Request<tonic::body::Body>,
487            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
488        {
489            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
490        }
491        /// Compress requests with the given encoding.
492        ///
493        /// This requires the server to support it otherwise it might respond with an
494        /// error.
495        #[must_use]
496        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
497            self.inner = self.inner.send_compressed(encoding);
498            self
499        }
500        /// Enable decompressing responses.
501        #[must_use]
502        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
503            self.inner = self.inner.accept_compressed(encoding);
504            self
505        }
506        /// Limits the maximum size of a decoded message.
507        ///
508        /// Default: `4MB`
509        #[must_use]
510        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
511            self.inner = self.inner.max_decoding_message_size(limit);
512            self
513        }
514        /// Limits the maximum size of an encoded message.
515        ///
516        /// Default: `usize::MAX`
517        #[must_use]
518        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
519            self.inner = self.inner.max_encoding_message_size(limit);
520            self
521        }
522        /// StreamChat handles bidirectional streaming chat for AI agent
523        pub async fn stream_chat(
524            &mut self,
525            request: impl tonic::IntoRequest<super::StreamChatRequest>,
526        ) -> std::result::Result<
527            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
528            tonic::Status,
529        > {
530            self.inner
531                .ready()
532                .await
533                .map_err(|e| {
534                    tonic::Status::unknown(
535                        format!("Service was not ready: {}", e.into()),
536                    )
537                })?;
538            let codec = tonic::codec::ProstCodec::default();
539            let path = http::uri::PathAndQuery::from_static(
540                "/nominal.ai.v1.AIAgentService/StreamChat",
541            );
542            let mut req = request.into_request();
543            req.extensions_mut()
544                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
545            self.inner.server_streaming(req, path, codec).await
546        }
547        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
548        pub async fn get_conversation(
549            &mut self,
550            request: impl tonic::IntoRequest<super::GetConversationRequest>,
551        ) -> std::result::Result<
552            tonic::Response<super::GetConversationResponse>,
553            tonic::Status,
554        > {
555            self.inner
556                .ready()
557                .await
558                .map_err(|e| {
559                    tonic::Status::unknown(
560                        format!("Service was not ready: {}", e.into()),
561                    )
562                })?;
563            let codec = tonic::codec::ProstCodec::default();
564            let path = http::uri::PathAndQuery::from_static(
565                "/nominal.ai.v1.AIAgentService/GetConversation",
566            );
567            let mut req = request.into_request();
568            req.extensions_mut()
569                .insert(
570                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
571                );
572            self.inner.unary(req, path, codec).await
573        }
574        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
575        pub async fn list_conversations(
576            &mut self,
577            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
578        ) -> std::result::Result<
579            tonic::Response<super::ListConversationsResponse>,
580            tonic::Status,
581        > {
582            self.inner
583                .ready()
584                .await
585                .map_err(|e| {
586                    tonic::Status::unknown(
587                        format!("Service was not ready: {}", e.into()),
588                    )
589                })?;
590            let codec = tonic::codec::ProstCodec::default();
591            let path = http::uri::PathAndQuery::from_static(
592                "/nominal.ai.v1.AIAgentService/ListConversations",
593            );
594            let mut req = request.into_request();
595            req.extensions_mut()
596                .insert(
597                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
598                );
599            self.inner.unary(req, path, codec).await
600        }
601        /// CreateConversation handles creating a conversation and assigning it a conversation rid
602        pub async fn create_conversation(
603            &mut self,
604            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
605        ) -> std::result::Result<
606            tonic::Response<super::CreateConversationResponse>,
607            tonic::Status,
608        > {
609            self.inner
610                .ready()
611                .await
612                .map_err(|e| {
613                    tonic::Status::unknown(
614                        format!("Service was not ready: {}", e.into()),
615                    )
616                })?;
617            let codec = tonic::codec::ProstCodec::default();
618            let path = http::uri::PathAndQuery::from_static(
619                "/nominal.ai.v1.AIAgentService/CreateConversation",
620            );
621            let mut req = request.into_request();
622            req.extensions_mut()
623                .insert(
624                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
625                );
626            self.inner.unary(req, path, codec).await
627        }
628        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
629        pub async fn update_conversation_metadata(
630            &mut self,
631            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
632        ) -> std::result::Result<
633            tonic::Response<super::UpdateConversationMetadataResponse>,
634            tonic::Status,
635        > {
636            self.inner
637                .ready()
638                .await
639                .map_err(|e| {
640                    tonic::Status::unknown(
641                        format!("Service was not ready: {}", e.into()),
642                    )
643                })?;
644            let codec = tonic::codec::ProstCodec::default();
645            let path = http::uri::PathAndQuery::from_static(
646                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
647            );
648            let mut req = request.into_request();
649            req.extensions_mut()
650                .insert(
651                    GrpcMethod::new(
652                        "nominal.ai.v1.AIAgentService",
653                        "UpdateConversationMetadata",
654                    ),
655                );
656            self.inner.unary(req, path, codec).await
657        }
658        /// DeleteConversation handles deleting a specific conversation by conversation rid
659        pub async fn delete_conversation(
660            &mut self,
661            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
662        ) -> std::result::Result<
663            tonic::Response<super::DeleteConversationResponse>,
664            tonic::Status,
665        > {
666            self.inner
667                .ready()
668                .await
669                .map_err(|e| {
670                    tonic::Status::unknown(
671                        format!("Service was not ready: {}", e.into()),
672                    )
673                })?;
674            let codec = tonic::codec::ProstCodec::default();
675            let path = http::uri::PathAndQuery::from_static(
676                "/nominal.ai.v1.AIAgentService/DeleteConversation",
677            );
678            let mut req = request.into_request();
679            req.extensions_mut()
680                .insert(
681                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
682                );
683            self.inner.unary(req, path, codec).await
684        }
685    }
686}
687#[derive(Clone, Copy, PartialEq, ::prost::Message)]
688pub struct GetProviderStatusRequest {}
689#[derive(Clone, Copy, PartialEq, ::prost::Message)]
690pub struct GetProviderStatusResponse {
691    /// Timestamp when the last status was determined
692    #[prost(message, optional, tag = "1")]
693    pub timestamp: ::core::option::Option<
694        super::super::super::google::protobuf::Timestamp,
695    >,
696    /// Status of the most recent health check probe
697    #[prost(message, optional, tag = "2")]
698    pub last_status: ::core::option::Option<ProviderStatus>,
699    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
700    #[prost(message, optional, tag = "3")]
701    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
702}
703#[derive(Clone, Copy, PartialEq, ::prost::Message)]
704pub struct ProviderStatus {
705    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
706    pub status: ::core::option::Option<provider_status::Status>,
707}
708/// Nested message and enum types in `ProviderStatus`.
709pub mod provider_status {
710    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
711    pub enum Status {
712        #[prost(message, tag = "1")]
713        Healthy(super::Healthy),
714        #[prost(message, tag = "2")]
715        Degraded(super::Degraded),
716    }
717}
718#[derive(Clone, Copy, PartialEq, ::prost::Message)]
719pub struct Healthy {}
720#[derive(Clone, Copy, PartialEq, ::prost::Message)]
721pub struct Degraded {
722    #[prost(enumeration = "DegradationReason", tag = "1")]
723    pub reason: i32,
724}
725#[derive(Clone, Copy, PartialEq, ::prost::Message)]
726pub struct ProviderMetrics {
727    #[prost(int32, tag = "1")]
728    pub time_to_first_token_ms: i32,
729    #[prost(int32, tag = "2")]
730    pub total_time_ms: i32,
731}
732#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
733#[repr(i32)]
734pub enum DegradationReason {
735    Unspecified = 0,
736    HighLatency = 1,
737    Failures = 2,
738    HighLatencyAndFailures = 3,
739}
740impl DegradationReason {
741    /// String value of the enum field names used in the ProtoBuf definition.
742    ///
743    /// The values are not transformed in any way and thus are considered stable
744    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
745    pub fn as_str_name(&self) -> &'static str {
746        match self {
747            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
748            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
749            Self::Failures => "DEGRADATION_REASON_FAILURES",
750            Self::HighLatencyAndFailures => {
751                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
752            }
753        }
754    }
755    /// Creates an enum from field names used in the ProtoBuf definition.
756    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
757        match value {
758            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
759            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
760            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
761            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
762                Some(Self::HighLatencyAndFailures)
763            }
764            _ => None,
765        }
766    }
767}
768/// Generated client implementations.
769pub mod model_provider_health_service_client {
770    #![allow(
771        unused_variables,
772        dead_code,
773        missing_docs,
774        clippy::wildcard_imports,
775        clippy::let_unit_value,
776    )]
777    use tonic::codegen::*;
778    use tonic::codegen::http::Uri;
779    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
780    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
781    /// independent of the complexity of user prompts.
782    #[derive(Debug, Clone)]
783    pub struct ModelProviderHealthServiceClient<T> {
784        inner: tonic::client::Grpc<T>,
785    }
786    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
787        /// Attempt to create a new client by connecting to a given endpoint.
788        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
789        where
790            D: TryInto<tonic::transport::Endpoint>,
791            D::Error: Into<StdError>,
792        {
793            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
794            Ok(Self::new(conn))
795        }
796    }
797    impl<T> ModelProviderHealthServiceClient<T>
798    where
799        T: tonic::client::GrpcService<tonic::body::Body>,
800        T::Error: Into<StdError>,
801        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
802        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
803    {
804        pub fn new(inner: T) -> Self {
805            let inner = tonic::client::Grpc::new(inner);
806            Self { inner }
807        }
808        pub fn with_origin(inner: T, origin: Uri) -> Self {
809            let inner = tonic::client::Grpc::with_origin(inner, origin);
810            Self { inner }
811        }
812        pub fn with_interceptor<F>(
813            inner: T,
814            interceptor: F,
815        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
816        where
817            F: tonic::service::Interceptor,
818            T::ResponseBody: Default,
819            T: tonic::codegen::Service<
820                http::Request<tonic::body::Body>,
821                Response = http::Response<
822                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
823                >,
824            >,
825            <T as tonic::codegen::Service<
826                http::Request<tonic::body::Body>,
827            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
828        {
829            ModelProviderHealthServiceClient::new(
830                InterceptedService::new(inner, interceptor),
831            )
832        }
833        /// Compress requests with the given encoding.
834        ///
835        /// This requires the server to support it otherwise it might respond with an
836        /// error.
837        #[must_use]
838        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
839            self.inner = self.inner.send_compressed(encoding);
840            self
841        }
842        /// Enable decompressing responses.
843        #[must_use]
844        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
845            self.inner = self.inner.accept_compressed(encoding);
846            self
847        }
848        /// Limits the maximum size of a decoded message.
849        ///
850        /// Default: `4MB`
851        #[must_use]
852        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
853            self.inner = self.inner.max_decoding_message_size(limit);
854            self
855        }
856        /// Limits the maximum size of an encoded message.
857        ///
858        /// Default: `usize::MAX`
859        #[must_use]
860        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
861            self.inner = self.inner.max_encoding_message_size(limit);
862            self
863        }
864        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
865        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
866        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
867        pub async fn get_provider_status(
868            &mut self,
869            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
870        ) -> std::result::Result<
871            tonic::Response<super::GetProviderStatusResponse>,
872            tonic::Status,
873        > {
874            self.inner
875                .ready()
876                .await
877                .map_err(|e| {
878                    tonic::Status::unknown(
879                        format!("Service was not ready: {}", e.into()),
880                    )
881                })?;
882            let codec = tonic::codec::ProstCodec::default();
883            let path = http::uri::PathAndQuery::from_static(
884                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
885            );
886            let mut req = request.into_request();
887            req.extensions_mut()
888                .insert(
889                    GrpcMethod::new(
890                        "nominal.ai.v1.ModelProviderHealthService",
891                        "GetProviderStatus",
892                    ),
893                );
894            self.inner.unary(req, path, codec).await
895        }
896    }
897}
898#[derive(Clone, Copy, PartialEq, ::prost::Message)]
899pub struct IsAiEnabledForUserRequest {}
900#[derive(Clone, Copy, PartialEq, ::prost::Message)]
901pub struct IsAiEnabledForUserResponse {
902    #[prost(bool, tag = "1")]
903    pub is_enabled: bool,
904}
905/// Generated client implementations.
906pub mod ai_features_service_client {
907    #![allow(
908        unused_variables,
909        dead_code,
910        missing_docs,
911        clippy::wildcard_imports,
912        clippy::let_unit_value,
913    )]
914    use tonic::codegen::*;
915    use tonic::codegen::http::Uri;
916    /// AIFeaturesService provides information about enabled AI features
917    #[derive(Debug, Clone)]
918    pub struct AiFeaturesServiceClient<T> {
919        inner: tonic::client::Grpc<T>,
920    }
921    impl AiFeaturesServiceClient<tonic::transport::Channel> {
922        /// Attempt to create a new client by connecting to a given endpoint.
923        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
924        where
925            D: TryInto<tonic::transport::Endpoint>,
926            D::Error: Into<StdError>,
927        {
928            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
929            Ok(Self::new(conn))
930        }
931    }
932    impl<T> AiFeaturesServiceClient<T>
933    where
934        T: tonic::client::GrpcService<tonic::body::Body>,
935        T::Error: Into<StdError>,
936        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
937        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
938    {
939        pub fn new(inner: T) -> Self {
940            let inner = tonic::client::Grpc::new(inner);
941            Self { inner }
942        }
943        pub fn with_origin(inner: T, origin: Uri) -> Self {
944            let inner = tonic::client::Grpc::with_origin(inner, origin);
945            Self { inner }
946        }
947        pub fn with_interceptor<F>(
948            inner: T,
949            interceptor: F,
950        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
951        where
952            F: tonic::service::Interceptor,
953            T::ResponseBody: Default,
954            T: tonic::codegen::Service<
955                http::Request<tonic::body::Body>,
956                Response = http::Response<
957                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
958                >,
959            >,
960            <T as tonic::codegen::Service<
961                http::Request<tonic::body::Body>,
962            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
963        {
964            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
965        }
966        /// Compress requests with the given encoding.
967        ///
968        /// This requires the server to support it otherwise it might respond with an
969        /// error.
970        #[must_use]
971        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
972            self.inner = self.inner.send_compressed(encoding);
973            self
974        }
975        /// Enable decompressing responses.
976        #[must_use]
977        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
978            self.inner = self.inner.accept_compressed(encoding);
979            self
980        }
981        /// Limits the maximum size of a decoded message.
982        ///
983        /// Default: `4MB`
984        #[must_use]
985        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
986            self.inner = self.inner.max_decoding_message_size(limit);
987            self
988        }
989        /// Limits the maximum size of an encoded message.
990        ///
991        /// Default: `usize::MAX`
992        #[must_use]
993        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
994            self.inner = self.inner.max_encoding_message_size(limit);
995            self
996        }
997        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
998        pub async fn is_ai_enabled_for_user(
999            &mut self,
1000            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
1001        ) -> std::result::Result<
1002            tonic::Response<super::IsAiEnabledForUserResponse>,
1003            tonic::Status,
1004        > {
1005            self.inner
1006                .ready()
1007                .await
1008                .map_err(|e| {
1009                    tonic::Status::unknown(
1010                        format!("Service was not ready: {}", e.into()),
1011                    )
1012                })?;
1013            let codec = tonic::codec::ProstCodec::default();
1014            let path = http::uri::PathAndQuery::from_static(
1015                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
1016            );
1017            let mut req = request.into_request();
1018            req.extensions_mut()
1019                .insert(
1020                    GrpcMethod::new(
1021                        "nominal.ai.v1.AIFeaturesService",
1022                        "IsAIEnabledForUser",
1023                    ),
1024                );
1025            self.inner.unary(req, path, codec).await
1026        }
1027    }
1028}
1029/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
1030/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
1031#[derive(Clone, PartialEq, ::prost::Message)]
1032pub struct CreateOrUpdateKnowledgeBaseRequest {
1033    #[prost(string, tag = "1")]
1034    pub attachment_rid: ::prost::alloc::string::String,
1035    /// summary of the knowledge base, will be used by the LLM to decide when to use it
1036    #[prost(string, tag = "2")]
1037    pub summary_description: ::prost::alloc::string::String,
1038    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
1039    pub r#type: ::core::option::Option<i32>,
1040}
1041/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
1042#[derive(Clone, PartialEq, ::prost::Message)]
1043pub struct CreateOrUpdateKnowledgeBaseResponse {
1044    #[prost(string, tag = "1")]
1045    pub knowledge_base_rid: ::prost::alloc::string::String,
1046}
1047/// KnowledgeBase represents a knowledge base entry
1048#[derive(Clone, PartialEq, ::prost::Message)]
1049pub struct KnowledgeBase {
1050    #[prost(string, tag = "1")]
1051    pub knowledge_base_rid: ::prost::alloc::string::String,
1052    #[prost(string, tag = "2")]
1053    pub attachment_rid: ::prost::alloc::string::String,
1054    #[prost(string, tag = "3")]
1055    pub workspace_rid: ::prost::alloc::string::String,
1056    #[prost(string, tag = "4")]
1057    pub summary_description: ::prost::alloc::string::String,
1058    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
1059    pub r#type: i32,
1060    #[prost(int32, tag = "6")]
1061    pub version: i32,
1062}
1063#[derive(Clone, PartialEq, ::prost::Message)]
1064pub struct ListRequest {
1065    #[prost(string, tag = "1")]
1066    pub workspace_rid: ::prost::alloc::string::String,
1067}
1068#[derive(Clone, PartialEq, ::prost::Message)]
1069pub struct ListResponse {
1070    #[prost(message, repeated, tag = "1")]
1071    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1072}
1073#[derive(Clone, PartialEq, ::prost::Message)]
1074pub struct DeleteRequest {
1075    #[prost(string, tag = "1")]
1076    pub knowledge_base_rid: ::prost::alloc::string::String,
1077}
1078#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1079pub struct DeleteResponse {
1080    #[prost(bool, tag = "1")]
1081    pub success: bool,
1082}
1083#[derive(Clone, PartialEq, ::prost::Message)]
1084pub struct GetBatchRequest {
1085    #[prost(string, repeated, tag = "1")]
1086    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1087}
1088#[derive(Clone, PartialEq, ::prost::Message)]
1089pub struct GetBatchResponse {
1090    #[prost(message, repeated, tag = "1")]
1091    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1092}
1093/// generate summary description is intentionally going to return the generated description to the frontend
1094/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1095#[derive(Clone, PartialEq, ::prost::Message)]
1096pub struct GenerateSummaryDescriptionRequest {
1097    #[prost(string, tag = "1")]
1098    pub attachment_rid: ::prost::alloc::string::String,
1099}
1100#[derive(Clone, PartialEq, ::prost::Message)]
1101pub struct GenerateSummaryDescriptionResponse {
1102    #[prost(string, tag = "1")]
1103    pub summary_description: ::prost::alloc::string::String,
1104}
1105/// KnowledgeBaseType defines the types of knowledge base
1106#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1107#[repr(i32)]
1108pub enum KnowledgeBaseType {
1109    /// defaults to PROMPT
1110    Unspecified = 0,
1111    /// knowledge base gets added directly to prompt (needs to be small enough!)
1112    Prompt = 1,
1113    /// knowledge base gets used via vector search on embeddings
1114    Embedding = 2,
1115}
1116impl KnowledgeBaseType {
1117    /// String value of the enum field names used in the ProtoBuf definition.
1118    ///
1119    /// The values are not transformed in any way and thus are considered stable
1120    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1121    pub fn as_str_name(&self) -> &'static str {
1122        match self {
1123            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1124            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1125            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1126        }
1127    }
1128    /// Creates an enum from field names used in the ProtoBuf definition.
1129    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1130        match value {
1131            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1132            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1133            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1134            _ => None,
1135        }
1136    }
1137}
1138/// Generated client implementations.
1139pub mod knowledge_base_service_client {
1140    #![allow(
1141        unused_variables,
1142        dead_code,
1143        missing_docs,
1144        clippy::wildcard_imports,
1145        clippy::let_unit_value,
1146    )]
1147    use tonic::codegen::*;
1148    use tonic::codegen::http::Uri;
1149    /// KnowledgeBaseService provides AI-powered knowledge base management
1150    #[derive(Debug, Clone)]
1151    pub struct KnowledgeBaseServiceClient<T> {
1152        inner: tonic::client::Grpc<T>,
1153    }
1154    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1155        /// Attempt to create a new client by connecting to a given endpoint.
1156        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1157        where
1158            D: TryInto<tonic::transport::Endpoint>,
1159            D::Error: Into<StdError>,
1160        {
1161            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1162            Ok(Self::new(conn))
1163        }
1164    }
1165    impl<T> KnowledgeBaseServiceClient<T>
1166    where
1167        T: tonic::client::GrpcService<tonic::body::Body>,
1168        T::Error: Into<StdError>,
1169        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1170        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1171    {
1172        pub fn new(inner: T) -> Self {
1173            let inner = tonic::client::Grpc::new(inner);
1174            Self { inner }
1175        }
1176        pub fn with_origin(inner: T, origin: Uri) -> Self {
1177            let inner = tonic::client::Grpc::with_origin(inner, origin);
1178            Self { inner }
1179        }
1180        pub fn with_interceptor<F>(
1181            inner: T,
1182            interceptor: F,
1183        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1184        where
1185            F: tonic::service::Interceptor,
1186            T::ResponseBody: Default,
1187            T: tonic::codegen::Service<
1188                http::Request<tonic::body::Body>,
1189                Response = http::Response<
1190                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1191                >,
1192            >,
1193            <T as tonic::codegen::Service<
1194                http::Request<tonic::body::Body>,
1195            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1196        {
1197            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1198        }
1199        /// Compress requests with the given encoding.
1200        ///
1201        /// This requires the server to support it otherwise it might respond with an
1202        /// error.
1203        #[must_use]
1204        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1205            self.inner = self.inner.send_compressed(encoding);
1206            self
1207        }
1208        /// Enable decompressing responses.
1209        #[must_use]
1210        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1211            self.inner = self.inner.accept_compressed(encoding);
1212            self
1213        }
1214        /// Limits the maximum size of a decoded message.
1215        ///
1216        /// Default: `4MB`
1217        #[must_use]
1218        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1219            self.inner = self.inner.max_decoding_message_size(limit);
1220            self
1221        }
1222        /// Limits the maximum size of an encoded message.
1223        ///
1224        /// Default: `usize::MAX`
1225        #[must_use]
1226        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1227            self.inner = self.inner.max_encoding_message_size(limit);
1228            self
1229        }
1230        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1231        pub async fn create_or_update_knowledge_base(
1232            &mut self,
1233            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1234        ) -> std::result::Result<
1235            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1236            tonic::Status,
1237        > {
1238            self.inner
1239                .ready()
1240                .await
1241                .map_err(|e| {
1242                    tonic::Status::unknown(
1243                        format!("Service was not ready: {}", e.into()),
1244                    )
1245                })?;
1246            let codec = tonic::codec::ProstCodec::default();
1247            let path = http::uri::PathAndQuery::from_static(
1248                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1249            );
1250            let mut req = request.into_request();
1251            req.extensions_mut()
1252                .insert(
1253                    GrpcMethod::new(
1254                        "nominal.ai.v1.KnowledgeBaseService",
1255                        "CreateOrUpdateKnowledgeBase",
1256                    ),
1257                );
1258            self.inner.unary(req, path, codec).await
1259        }
1260        /// List returns all knowledge bases in the specified workspace
1261        pub async fn list(
1262            &mut self,
1263            request: impl tonic::IntoRequest<super::ListRequest>,
1264        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1265            self.inner
1266                .ready()
1267                .await
1268                .map_err(|e| {
1269                    tonic::Status::unknown(
1270                        format!("Service was not ready: {}", e.into()),
1271                    )
1272                })?;
1273            let codec = tonic::codec::ProstCodec::default();
1274            let path = http::uri::PathAndQuery::from_static(
1275                "/nominal.ai.v1.KnowledgeBaseService/List",
1276            );
1277            let mut req = request.into_request();
1278            req.extensions_mut()
1279                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1280            self.inner.unary(req, path, codec).await
1281        }
1282        /// Delete removes a knowledge base by its RID
1283        pub async fn delete(
1284            &mut self,
1285            request: impl tonic::IntoRequest<super::DeleteRequest>,
1286        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1287            self.inner
1288                .ready()
1289                .await
1290                .map_err(|e| {
1291                    tonic::Status::unknown(
1292                        format!("Service was not ready: {}", e.into()),
1293                    )
1294                })?;
1295            let codec = tonic::codec::ProstCodec::default();
1296            let path = http::uri::PathAndQuery::from_static(
1297                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1298            );
1299            let mut req = request.into_request();
1300            req.extensions_mut()
1301                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1302            self.inner.unary(req, path, codec).await
1303        }
1304        /// GetBatch retrieves multiple knowledge bases by their RIDs
1305        pub async fn get_batch(
1306            &mut self,
1307            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1308        ) -> std::result::Result<
1309            tonic::Response<super::GetBatchResponse>,
1310            tonic::Status,
1311        > {
1312            self.inner
1313                .ready()
1314                .await
1315                .map_err(|e| {
1316                    tonic::Status::unknown(
1317                        format!("Service was not ready: {}", e.into()),
1318                    )
1319                })?;
1320            let codec = tonic::codec::ProstCodec::default();
1321            let path = http::uri::PathAndQuery::from_static(
1322                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1323            );
1324            let mut req = request.into_request();
1325            req.extensions_mut()
1326                .insert(
1327                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1328                );
1329            self.inner.unary(req, path, codec).await
1330        }
1331        /// GenerateSummaryDescription generates a summary description for an attachment rid
1332        pub async fn generate_summary_description(
1333            &mut self,
1334            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1335        ) -> std::result::Result<
1336            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1337            tonic::Status,
1338        > {
1339            self.inner
1340                .ready()
1341                .await
1342                .map_err(|e| {
1343                    tonic::Status::unknown(
1344                        format!("Service was not ready: {}", e.into()),
1345                    )
1346                })?;
1347            let codec = tonic::codec::ProstCodec::default();
1348            let path = http::uri::PathAndQuery::from_static(
1349                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1350            );
1351            let mut req = request.into_request();
1352            req.extensions_mut()
1353                .insert(
1354                    GrpcMethod::new(
1355                        "nominal.ai.v1.KnowledgeBaseService",
1356                        "GenerateSummaryDescription",
1357                    ),
1358                );
1359            self.inner.unary(req, path, codec).await
1360        }
1361    }
1362}