nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    /// The conversation ID
6    #[prost(string, tag = "1")]
7    pub conversation_rid: ::prost::alloc::string::String,
8    /// The user message to append to the conversation
9    #[prost(message, optional, tag = "2")]
10    pub message: ::core::option::Option<UserModelMessage>,
11    /// Optional: image files to provide to the agent
12    #[prost(message, repeated, tag = "3")]
13    pub images: ::prost::alloc::vec::Vec<ImagePart>,
14    /// Context-specific fields based on the oneofKind.
15    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
16    pub context: ::core::option::Option<stream_chat_request::Context>,
17}
18/// Nested message and enum types in `StreamChatRequest`.
19pub mod stream_chat_request {
20    /// Context-specific fields based on the oneofKind.
21    #[derive(Clone, PartialEq, ::prost::Oneof)]
22    pub enum Context {
23        #[prost(message, tag = "4")]
24        Workbook(super::WorkbookContext),
25        #[prost(message, tag = "5")]
26        Global(super::GlobalContext),
27    }
28}
29/// WorkbookContext contains workbook-specific context fields
30#[derive(Clone, PartialEq, ::prost::Message)]
31pub struct WorkbookContext {
32    /// RID of the workbook to use for context
33    #[prost(string, tag = "1")]
34    pub workbook_rid: ::prost::alloc::string::String,
35    /// Optional: the user's presence in the workbook
36    #[prost(message, optional, tag = "2")]
37    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
38}
39/// DefaultContext (no context)
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct GlobalContext {}
42/// WorkbookUserPresence contains the user's presence in the workbook
43/// which is used to describe what the user is viewing at the time of the message.
44#[derive(Clone, Copy, PartialEq, ::prost::Message)]
45pub struct WorkbookUserPresence {
46    #[prost(int32, optional, tag = "1")]
47    pub tab_index: ::core::option::Option<i32>,
48    #[prost(message, optional, tag = "2")]
49    pub range: ::core::option::Option<TimeRange>,
50}
51/// CreateConversation request will create a new conversation thread
52/// if old conversation id is not set, a brand new, clear chat is created
53/// If old conversation id is set without a previous message id, the full conversation thread will be copied
54/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
55/// the above case is useful for branching a conversation into a new thread
56#[derive(Clone, PartialEq, ::prost::Message)]
57pub struct CreateConversationRequest {
58    #[prost(string, tag = "1")]
59    pub title: ::prost::alloc::string::String,
60    #[prost(string, tag = "2")]
61    pub workspace_rid: ::prost::alloc::string::String,
62    #[prost(string, optional, tag = "3")]
63    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
64    #[prost(string, optional, tag = "4")]
65    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
66}
67/// CreateConversationResponse will return the conversation id for the new conversation
68#[derive(Clone, PartialEq, ::prost::Message)]
69pub struct CreateConversationResponse {
70    #[prost(string, tag = "1")]
71    pub new_conversation_rid: ::prost::alloc::string::String,
72}
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct UpdateConversationMetadataRequest {
75    #[prost(string, tag = "1")]
76    pub title: ::prost::alloc::string::String,
77    #[prost(string, tag = "2")]
78    pub conversation_rid: ::prost::alloc::string::String,
79}
80#[derive(Clone, Copy, PartialEq, ::prost::Message)]
81pub struct UpdateConversationMetadataResponse {}
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct DeleteConversationRequest {
84    #[prost(string, tag = "1")]
85    pub conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, Copy, PartialEq, ::prost::Message)]
88pub struct DeleteConversationResponse {}
89/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
90/// by provided rid. To start from a particular message - you can also provide a message id.
91#[derive(Clone, PartialEq, ::prost::Message)]
92pub struct GetConversationRequest {
93    #[prost(string, tag = "1")]
94    pub conversation_rid: ::prost::alloc::string::String,
95    #[prost(string, optional, tag = "2")]
96    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
97    #[prost(int32, optional, tag = "3")]
98    pub max_message_count: ::core::option::Option<i32>,
99}
100/// Model message with id allows you to identify the message ID of a given message
101#[derive(Clone, PartialEq, ::prost::Message)]
102pub struct ModelMessageWithId {
103    #[prost(string, tag = "3")]
104    pub message_id: ::prost::alloc::string::String,
105    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
106    pub content: ::core::option::Option<model_message_with_id::Content>,
107}
108/// Nested message and enum types in `ModelMessageWithId`.
109pub mod model_message_with_id {
110    #[derive(Clone, PartialEq, ::prost::Oneof)]
111    pub enum Content {
112        #[prost(message, tag = "1")]
113        Message(super::ModelMessage),
114        #[prost(message, tag = "2")]
115        ToolAction(super::ToolAction),
116    }
117}
118#[derive(Clone, PartialEq, ::prost::Message)]
119pub struct GetConversationResponse {
120    #[prost(message, repeated, tag = "1")]
121    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
122}
123/// Will generate all conversation threads that this user has in this workspace
124#[derive(Clone, PartialEq, ::prost::Message)]
125pub struct ListConversationsRequest {
126    #[prost(string, tag = "1")]
127    pub workspace_rid: ::prost::alloc::string::String,
128}
129#[derive(Clone, PartialEq, ::prost::Message)]
130pub struct ConversationMetadata {
131    #[prost(string, tag = "1")]
132    pub conversation_rid: ::prost::alloc::string::String,
133    #[prost(string, tag = "2")]
134    pub title: ::prost::alloc::string::String,
135    #[prost(message, optional, tag = "3")]
136    pub created_at: ::core::option::Option<
137        super::super::super::google::protobuf::Timestamp,
138    >,
139    #[prost(message, optional, tag = "4")]
140    pub last_updated_at: ::core::option::Option<
141        super::super::super::google::protobuf::Timestamp,
142    >,
143}
144/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
145/// to get a full conversation from storage. These are ordered by creation time.
146#[derive(Clone, PartialEq, ::prost::Message)]
147pub struct ListConversationsResponse {
148    #[prost(message, repeated, tag = "1")]
149    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
150}
151#[derive(Clone, Copy, PartialEq, ::prost::Message)]
152pub struct TimeRange {
153    #[prost(message, optional, tag = "1")]
154    pub range_start: ::core::option::Option<Timestamp>,
155    #[prost(message, optional, tag = "2")]
156    pub range_end: ::core::option::Option<Timestamp>,
157}
158#[derive(Clone, Copy, PartialEq, ::prost::Message)]
159pub struct Timestamp {
160    #[prost(int32, tag = "1")]
161    pub seconds: i32,
162    #[prost(int32, tag = "2")]
163    pub nanoseconds: i32,
164}
165/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
166/// Each message type has its own structure and content.
167#[derive(Clone, PartialEq, ::prost::Message)]
168pub struct ModelMessage {
169    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
170    pub kind: ::core::option::Option<model_message::Kind>,
171}
172/// Nested message and enum types in `ModelMessage`.
173pub mod model_message {
174    #[derive(Clone, PartialEq, ::prost::Oneof)]
175    pub enum Kind {
176        #[prost(message, tag = "1")]
177        User(super::UserModelMessage),
178        #[prost(message, tag = "2")]
179        Assistant(super::AssistantModelMessage),
180    }
181}
182/// A user message containing text
183#[derive(Clone, PartialEq, ::prost::Message)]
184pub struct UserModelMessage {
185    #[prost(message, repeated, tag = "1")]
186    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
187}
188/// An assistant message containing text
189#[derive(Clone, PartialEq, ::prost::Message)]
190pub struct AssistantModelMessage {
191    #[prost(message, repeated, tag = "1")]
192    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
193}
194#[derive(Clone, PartialEq, ::prost::Message)]
195pub struct UserContentPart {
196    #[prost(oneof = "user_content_part::Part", tags = "1")]
197    pub part: ::core::option::Option<user_content_part::Part>,
198}
199/// Nested message and enum types in `UserContentPart`.
200pub mod user_content_part {
201    #[derive(Clone, PartialEq, ::prost::Oneof)]
202    pub enum Part {
203        #[prost(message, tag = "1")]
204        Text(super::TextPart),
205    }
206}
207/// Content part for assistant messages: can be text, reasoning, or mutation.
208#[derive(Clone, PartialEq, ::prost::Message)]
209pub struct AssistantContentPart {
210    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
211    pub part: ::core::option::Option<assistant_content_part::Part>,
212}
213/// Nested message and enum types in `AssistantContentPart`.
214pub mod assistant_content_part {
215    #[derive(Clone, PartialEq, ::prost::Oneof)]
216    pub enum Part {
217        #[prost(message, tag = "1")]
218        Text(super::TextPart),
219        #[prost(message, tag = "2")]
220        Reasoning(super::ReasoningPart),
221    }
222}
223/// Text part for user or assistant messages.
224#[derive(Clone, PartialEq, ::prost::Message)]
225pub struct TextPart {
226    #[prost(string, tag = "1")]
227    pub text: ::prost::alloc::string::String,
228}
229/// User-supplied image part.
230#[derive(Clone, PartialEq, ::prost::Message)]
231pub struct ImagePart {
232    /// The base64-encoded image data
233    #[prost(bytes = "vec", tag = "1")]
234    pub data: ::prost::alloc::vec::Vec<u8>,
235    /// The media type of the image (e.g. "image/png", "image/jpeg")
236    #[prost(string, optional, tag = "2")]
237    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
238    /// Optional: the filename of the image
239    #[prost(string, optional, tag = "3")]
240    pub filename: ::core::option::Option<::prost::alloc::string::String>,
241}
242/// Reasoning part for assistant messages.
243#[derive(Clone, PartialEq, ::prost::Message)]
244pub struct ReasoningPart {
245    #[prost(string, tag = "1")]
246    pub reasoning: ::prost::alloc::string::String,
247}
248/// StreamChatResponse is a discriminated union response to a StreamChatRequest
249#[derive(Clone, PartialEq, ::prost::Message)]
250pub struct StreamChatResponse {
251    #[prost(
252        oneof = "stream_chat_response::Response",
253        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
254    )]
255    pub response: ::core::option::Option<stream_chat_response::Response>,
256}
257/// Nested message and enum types in `StreamChatResponse`.
258pub mod stream_chat_response {
259    #[derive(Clone, PartialEq, ::prost::Oneof)]
260    pub enum Response {
261        #[prost(message, tag = "1")]
262        Finish(super::Finish),
263        #[prost(message, tag = "2")]
264        Error(super::Error),
265        #[prost(message, tag = "3")]
266        TextStart(super::TextStart),
267        #[prost(message, tag = "4")]
268        TextDelta(super::TextDelta),
269        #[prost(message, tag = "5")]
270        TextEnd(super::TextEnd),
271        #[prost(message, tag = "6")]
272        ReasoningStart(super::ReasoningStart),
273        #[prost(message, tag = "7")]
274        ReasoningDelta(super::ReasoningDelta),
275        #[prost(message, tag = "8")]
276        ReasoningEnd(super::ReasoningEnd),
277        /// this will be deprecated in favor of MCP-based mutations
278        #[prost(message, tag = "9")]
279        WorkbookMutation(super::WorkbookMutation),
280        #[prost(message, tag = "10")]
281        ToolAction(super::ToolAction),
282    }
283}
284/// Indicates the end of a chat session
285#[derive(Clone, PartialEq, ::prost::Message)]
286pub struct Finish {
287    /// The message ids in order of all generated messages for this agent run
288    /// These ids can be used to branch a message from that specific message
289    #[prost(string, repeated, tag = "1")]
290    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
291}
292/// An error that occurred during the chat session
293#[derive(Clone, PartialEq, ::prost::Message)]
294pub struct Error {
295    #[prost(string, tag = "1")]
296    pub message: ::prost::alloc::string::String,
297}
298/// Indicates the start of a text message from the agent
299#[derive(Clone, PartialEq, ::prost::Message)]
300pub struct TextStart {
301    /// uniquely identifies the text message (e.g. uuid) so that the client can
302    /// merge parallel message streams (if it happens).
303    #[prost(string, tag = "1")]
304    pub id: ::prost::alloc::string::String,
305}
306/// A delta (continuation) of a text message from the agent
307#[derive(Clone, PartialEq, ::prost::Message)]
308pub struct TextDelta {
309    #[prost(string, tag = "1")]
310    pub id: ::prost::alloc::string::String,
311    /// The next chunk of text
312    #[prost(string, tag = "2")]
313    pub delta: ::prost::alloc::string::String,
314}
315/// Indicates the end of a text message from the agent
316#[derive(Clone, PartialEq, ::prost::Message)]
317pub struct TextEnd {
318    #[prost(string, tag = "1")]
319    pub id: ::prost::alloc::string::String,
320}
321/// Indicates the start of a reasoning message from the agent
322#[derive(Clone, PartialEq, ::prost::Message)]
323pub struct ReasoningStart {
324    #[prost(string, tag = "1")]
325    pub id: ::prost::alloc::string::String,
326}
327/// A delta (continuation) of a reasoning message from the agent
328#[derive(Clone, PartialEq, ::prost::Message)]
329pub struct ReasoningDelta {
330    #[prost(string, tag = "1")]
331    pub id: ::prost::alloc::string::String,
332    /// The next chunk of reasoning
333    #[prost(string, tag = "2")]
334    pub delta: ::prost::alloc::string::String,
335}
336/// Indicates the end of a reasoning message from the agent
337#[derive(Clone, PartialEq, ::prost::Message)]
338pub struct ReasoningEnd {
339    #[prost(string, tag = "1")]
340    pub id: ::prost::alloc::string::String,
341}
342/// Add a new tab to the workbook
343#[derive(Clone, PartialEq, ::prost::Message)]
344pub struct AddTabMutation {
345    /// if tab_name is not provided, we'll name it "New Tab"
346    #[prost(string, optional, tag = "1")]
347    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
348}
349/// this is the "result" of the mutation
350#[derive(Clone, PartialEq, ::prost::Message)]
351pub struct AddOrUpdatePanelMutation {
352    /// JSON-serialized representation of IVizDefinition
353    #[prost(string, tag = "1")]
354    pub panel_as_json: ::prost::alloc::string::String,
355    #[prost(string, tag = "2")]
356    pub panel_id: ::prost::alloc::string::String,
357    #[prost(int32, tag = "3")]
358    pub tab_index: i32,
359}
360#[derive(Clone, PartialEq, ::prost::Message)]
361pub struct RemovePanelsMutation {
362    #[prost(string, repeated, tag = "1")]
363    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
364}
365/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
366#[derive(Clone, PartialEq, ::prost::Message)]
367pub struct AddOrReplaceVariableMutation {
368    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
369    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
370    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
371    #[prost(string, tag = "1")]
372    pub compute_spec_as_json: ::prost::alloc::string::String,
373    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
374    #[prost(string, optional, tag = "2")]
375    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
376    #[prost(string, optional, tag = "3")]
377    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
378}
379/// DeleteVariablesMutation is a mutation to delete variables from the workbook
380#[derive(Clone, PartialEq, ::prost::Message)]
381pub struct DeleteVariablesMutation {
382    #[prost(string, repeated, tag = "1")]
383    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
384}
385/// WorkbookMutation is a mutation to the workbook
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct WorkbookMutation {
388    #[prost(string, tag = "1")]
389    pub id: ::prost::alloc::string::String,
390    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
391    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
392}
393/// Nested message and enum types in `WorkbookMutation`.
394pub mod workbook_mutation {
395    #[derive(Clone, PartialEq, ::prost::Oneof)]
396    pub enum Mutation {
397        #[prost(message, tag = "2")]
398        AddTab(super::AddTabMutation),
399        #[prost(message, tag = "3")]
400        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
401        #[prost(message, tag = "4")]
402        RemovePanels(super::RemovePanelsMutation),
403        #[prost(message, tag = "5")]
404        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
405        #[prost(message, tag = "6")]
406        DeleteVariables(super::DeleteVariablesMutation),
407    }
408}
409/// this is a concise description of a tool call that the agent is making internally
410/// without revealing too much detail about the tool call, it informs the user what the agent is doing
411/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
412/// "Search channels for My Datasource"
413#[derive(Clone, PartialEq, ::prost::Message)]
414pub struct ToolAction {
415    #[prost(string, tag = "1")]
416    pub id: ::prost::alloc::string::String,
417    /// "Thought", "Read", "Find", "Look-up", etc.
418    #[prost(string, tag = "2")]
419    pub tool_action_verb: ::prost::alloc::string::String,
420    /// "workbook", "channel", "variable", "panel", etc.
421    #[prost(string, optional, tag = "3")]
422    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
423}
424/// Generated client implementations.
425pub mod ai_agent_service_client {
426    #![allow(
427        unused_variables,
428        dead_code,
429        missing_docs,
430        clippy::wildcard_imports,
431        clippy::let_unit_value,
432    )]
433    use tonic::codegen::*;
434    use tonic::codegen::http::Uri;
435    /// AIAgentService provides AI-powered assistance for general operations
436    #[derive(Debug, Clone)]
437    pub struct AiAgentServiceClient<T> {
438        inner: tonic::client::Grpc<T>,
439    }
440    impl AiAgentServiceClient<tonic::transport::Channel> {
441        /// Attempt to create a new client by connecting to a given endpoint.
442        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
443        where
444            D: TryInto<tonic::transport::Endpoint>,
445            D::Error: Into<StdError>,
446        {
447            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
448            Ok(Self::new(conn))
449        }
450    }
451    impl<T> AiAgentServiceClient<T>
452    where
453        T: tonic::client::GrpcService<tonic::body::Body>,
454        T::Error: Into<StdError>,
455        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
456        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
457    {
458        pub fn new(inner: T) -> Self {
459            let inner = tonic::client::Grpc::new(inner);
460            Self { inner }
461        }
462        pub fn with_origin(inner: T, origin: Uri) -> Self {
463            let inner = tonic::client::Grpc::with_origin(inner, origin);
464            Self { inner }
465        }
466        pub fn with_interceptor<F>(
467            inner: T,
468            interceptor: F,
469        ) -> AiAgentServiceClient<InterceptedService<T, F>>
470        where
471            F: tonic::service::Interceptor,
472            T::ResponseBody: Default,
473            T: tonic::codegen::Service<
474                http::Request<tonic::body::Body>,
475                Response = http::Response<
476                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
477                >,
478            >,
479            <T as tonic::codegen::Service<
480                http::Request<tonic::body::Body>,
481            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
482        {
483            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
484        }
485        /// Compress requests with the given encoding.
486        ///
487        /// This requires the server to support it otherwise it might respond with an
488        /// error.
489        #[must_use]
490        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
491            self.inner = self.inner.send_compressed(encoding);
492            self
493        }
494        /// Enable decompressing responses.
495        #[must_use]
496        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
497            self.inner = self.inner.accept_compressed(encoding);
498            self
499        }
500        /// Limits the maximum size of a decoded message.
501        ///
502        /// Default: `4MB`
503        #[must_use]
504        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
505            self.inner = self.inner.max_decoding_message_size(limit);
506            self
507        }
508        /// Limits the maximum size of an encoded message.
509        ///
510        /// Default: `usize::MAX`
511        #[must_use]
512        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
513            self.inner = self.inner.max_encoding_message_size(limit);
514            self
515        }
516        /// StreamChat handles bidirectional streaming chat for AI agent
517        pub async fn stream_chat(
518            &mut self,
519            request: impl tonic::IntoRequest<super::StreamChatRequest>,
520        ) -> std::result::Result<
521            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
522            tonic::Status,
523        > {
524            self.inner
525                .ready()
526                .await
527                .map_err(|e| {
528                    tonic::Status::unknown(
529                        format!("Service was not ready: {}", e.into()),
530                    )
531                })?;
532            let codec = tonic::codec::ProstCodec::default();
533            let path = http::uri::PathAndQuery::from_static(
534                "/nominal.ai.v1.AIAgentService/StreamChat",
535            );
536            let mut req = request.into_request();
537            req.extensions_mut()
538                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
539            self.inner.server_streaming(req, path, codec).await
540        }
541        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
542        pub async fn get_conversation(
543            &mut self,
544            request: impl tonic::IntoRequest<super::GetConversationRequest>,
545        ) -> std::result::Result<
546            tonic::Response<super::GetConversationResponse>,
547            tonic::Status,
548        > {
549            self.inner
550                .ready()
551                .await
552                .map_err(|e| {
553                    tonic::Status::unknown(
554                        format!("Service was not ready: {}", e.into()),
555                    )
556                })?;
557            let codec = tonic::codec::ProstCodec::default();
558            let path = http::uri::PathAndQuery::from_static(
559                "/nominal.ai.v1.AIAgentService/GetConversation",
560            );
561            let mut req = request.into_request();
562            req.extensions_mut()
563                .insert(
564                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
565                );
566            self.inner.unary(req, path, codec).await
567        }
568        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
569        pub async fn list_conversations(
570            &mut self,
571            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
572        ) -> std::result::Result<
573            tonic::Response<super::ListConversationsResponse>,
574            tonic::Status,
575        > {
576            self.inner
577                .ready()
578                .await
579                .map_err(|e| {
580                    tonic::Status::unknown(
581                        format!("Service was not ready: {}", e.into()),
582                    )
583                })?;
584            let codec = tonic::codec::ProstCodec::default();
585            let path = http::uri::PathAndQuery::from_static(
586                "/nominal.ai.v1.AIAgentService/ListConversations",
587            );
588            let mut req = request.into_request();
589            req.extensions_mut()
590                .insert(
591                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
592                );
593            self.inner.unary(req, path, codec).await
594        }
595        /// CreateConversation handles creating a conversation and assigning it a conversation rid
596        pub async fn create_conversation(
597            &mut self,
598            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
599        ) -> std::result::Result<
600            tonic::Response<super::CreateConversationResponse>,
601            tonic::Status,
602        > {
603            self.inner
604                .ready()
605                .await
606                .map_err(|e| {
607                    tonic::Status::unknown(
608                        format!("Service was not ready: {}", e.into()),
609                    )
610                })?;
611            let codec = tonic::codec::ProstCodec::default();
612            let path = http::uri::PathAndQuery::from_static(
613                "/nominal.ai.v1.AIAgentService/CreateConversation",
614            );
615            let mut req = request.into_request();
616            req.extensions_mut()
617                .insert(
618                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
619                );
620            self.inner.unary(req, path, codec).await
621        }
622        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
623        pub async fn update_conversation_metadata(
624            &mut self,
625            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
626        ) -> std::result::Result<
627            tonic::Response<super::UpdateConversationMetadataResponse>,
628            tonic::Status,
629        > {
630            self.inner
631                .ready()
632                .await
633                .map_err(|e| {
634                    tonic::Status::unknown(
635                        format!("Service was not ready: {}", e.into()),
636                    )
637                })?;
638            let codec = tonic::codec::ProstCodec::default();
639            let path = http::uri::PathAndQuery::from_static(
640                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
641            );
642            let mut req = request.into_request();
643            req.extensions_mut()
644                .insert(
645                    GrpcMethod::new(
646                        "nominal.ai.v1.AIAgentService",
647                        "UpdateConversationMetadata",
648                    ),
649                );
650            self.inner.unary(req, path, codec).await
651        }
652        /// DeleteConversation handles deleting a specific conversation by conversation rid
653        pub async fn delete_conversation(
654            &mut self,
655            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
656        ) -> std::result::Result<
657            tonic::Response<super::DeleteConversationResponse>,
658            tonic::Status,
659        > {
660            self.inner
661                .ready()
662                .await
663                .map_err(|e| {
664                    tonic::Status::unknown(
665                        format!("Service was not ready: {}", e.into()),
666                    )
667                })?;
668            let codec = tonic::codec::ProstCodec::default();
669            let path = http::uri::PathAndQuery::from_static(
670                "/nominal.ai.v1.AIAgentService/DeleteConversation",
671            );
672            let mut req = request.into_request();
673            req.extensions_mut()
674                .insert(
675                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
676                );
677            self.inner.unary(req, path, codec).await
678        }
679    }
680}
681/// StreamChatRequest is a request to stream chat messages for workbook AI agent
682#[derive(Clone, PartialEq, ::prost::Message)]
683pub struct WorkbookAgentServiceStreamChatRequest {
684    #[prost(message, repeated, tag = "1")]
685    pub messages: ::prost::alloc::vec::Vec<ModelMessage>,
686    /// JSON-serialized representation of INotebook
687    #[prost(string, optional, tag = "2")]
688    pub notebook_as_json: ::core::option::Option<::prost::alloc::string::String>,
689    /// The current tab visible in the workbook from the user's perspective
690    #[prost(int32, optional, tag = "3")]
691    pub selected_tab_index: ::core::option::Option<i32>,
692    /// Optional: image files to provide to the agent
693    #[prost(message, repeated, tag = "4")]
694    pub images: ::prost::alloc::vec::Vec<ImagePart>,
695    /// Time range for the tab that is currently visible to the user
696    #[prost(message, optional, tag = "5")]
697    pub range: ::core::option::Option<TimeRange>,
698    /// V2 conversation API persists the message and any assistant responses to storage under the provided
699    /// conversation_id. if id does not exist in the database, then a new conversation is started from this message.
700    #[prost(message, optional, tag = "6")]
701    pub message: ::core::option::Option<AppendMessage>,
702}
703/// This will append a message to an existing conversation
704/// A non existent conversation id will raise an error
705#[derive(Clone, PartialEq, ::prost::Message)]
706pub struct AppendMessage {
707    #[prost(message, optional, tag = "1")]
708    pub message: ::core::option::Option<UserModelMessage>,
709    #[prost(string, tag = "2")]
710    pub conversation_rid: ::prost::alloc::string::String,
711}
712/// Generated client implementations.
713pub mod workbook_agent_service_client {
714    #![allow(
715        unused_variables,
716        dead_code,
717        missing_docs,
718        clippy::wildcard_imports,
719        clippy::let_unit_value,
720    )]
721    use tonic::codegen::*;
722    use tonic::codegen::http::Uri;
723    /// WorkbookAgentService provides AI-powered assistance for workbook operations
724    /// this is deprecated in favor of the AIAgentService
725    #[derive(Debug, Clone)]
726    pub struct WorkbookAgentServiceClient<T> {
727        inner: tonic::client::Grpc<T>,
728    }
729    impl WorkbookAgentServiceClient<tonic::transport::Channel> {
730        /// Attempt to create a new client by connecting to a given endpoint.
731        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
732        where
733            D: TryInto<tonic::transport::Endpoint>,
734            D::Error: Into<StdError>,
735        {
736            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
737            Ok(Self::new(conn))
738        }
739    }
740    impl<T> WorkbookAgentServiceClient<T>
741    where
742        T: tonic::client::GrpcService<tonic::body::Body>,
743        T::Error: Into<StdError>,
744        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
745        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
746    {
747        pub fn new(inner: T) -> Self {
748            let inner = tonic::client::Grpc::new(inner);
749            Self { inner }
750        }
751        pub fn with_origin(inner: T, origin: Uri) -> Self {
752            let inner = tonic::client::Grpc::with_origin(inner, origin);
753            Self { inner }
754        }
755        pub fn with_interceptor<F>(
756            inner: T,
757            interceptor: F,
758        ) -> WorkbookAgentServiceClient<InterceptedService<T, F>>
759        where
760            F: tonic::service::Interceptor,
761            T::ResponseBody: Default,
762            T: tonic::codegen::Service<
763                http::Request<tonic::body::Body>,
764                Response = http::Response<
765                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
766                >,
767            >,
768            <T as tonic::codegen::Service<
769                http::Request<tonic::body::Body>,
770            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
771        {
772            WorkbookAgentServiceClient::new(InterceptedService::new(inner, interceptor))
773        }
774        /// Compress requests with the given encoding.
775        ///
776        /// This requires the server to support it otherwise it might respond with an
777        /// error.
778        #[must_use]
779        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
780            self.inner = self.inner.send_compressed(encoding);
781            self
782        }
783        /// Enable decompressing responses.
784        #[must_use]
785        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
786            self.inner = self.inner.accept_compressed(encoding);
787            self
788        }
789        /// Limits the maximum size of a decoded message.
790        ///
791        /// Default: `4MB`
792        #[must_use]
793        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
794            self.inner = self.inner.max_decoding_message_size(limit);
795            self
796        }
797        /// Limits the maximum size of an encoded message.
798        ///
799        /// Default: `usize::MAX`
800        #[must_use]
801        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
802            self.inner = self.inner.max_encoding_message_size(limit);
803            self
804        }
805        /// StreamChat handles bidirectional streaming chat for workbook AI agent
806        pub async fn stream_chat(
807            &mut self,
808            request: impl tonic::IntoRequest<
809                super::WorkbookAgentServiceStreamChatRequest,
810            >,
811        ) -> std::result::Result<
812            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
813            tonic::Status,
814        > {
815            self.inner
816                .ready()
817                .await
818                .map_err(|e| {
819                    tonic::Status::unknown(
820                        format!("Service was not ready: {}", e.into()),
821                    )
822                })?;
823            let codec = tonic::codec::ProstCodec::default();
824            let path = http::uri::PathAndQuery::from_static(
825                "/nominal.ai.v1.WorkbookAgentService/StreamChat",
826            );
827            let mut req = request.into_request();
828            req.extensions_mut()
829                .insert(
830                    GrpcMethod::new("nominal.ai.v1.WorkbookAgentService", "StreamChat"),
831                );
832            self.inner.server_streaming(req, path, codec).await
833        }
834        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
835        pub async fn get_conversation(
836            &mut self,
837            request: impl tonic::IntoRequest<super::GetConversationRequest>,
838        ) -> std::result::Result<
839            tonic::Response<super::GetConversationResponse>,
840            tonic::Status,
841        > {
842            self.inner
843                .ready()
844                .await
845                .map_err(|e| {
846                    tonic::Status::unknown(
847                        format!("Service was not ready: {}", e.into()),
848                    )
849                })?;
850            let codec = tonic::codec::ProstCodec::default();
851            let path = http::uri::PathAndQuery::from_static(
852                "/nominal.ai.v1.WorkbookAgentService/GetConversation",
853            );
854            let mut req = request.into_request();
855            req.extensions_mut()
856                .insert(
857                    GrpcMethod::new(
858                        "nominal.ai.v1.WorkbookAgentService",
859                        "GetConversation",
860                    ),
861                );
862            self.inner.unary(req, path, codec).await
863        }
864        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
865        pub async fn list_conversations(
866            &mut self,
867            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
868        ) -> std::result::Result<
869            tonic::Response<super::ListConversationsResponse>,
870            tonic::Status,
871        > {
872            self.inner
873                .ready()
874                .await
875                .map_err(|e| {
876                    tonic::Status::unknown(
877                        format!("Service was not ready: {}", e.into()),
878                    )
879                })?;
880            let codec = tonic::codec::ProstCodec::default();
881            let path = http::uri::PathAndQuery::from_static(
882                "/nominal.ai.v1.WorkbookAgentService/ListConversations",
883            );
884            let mut req = request.into_request();
885            req.extensions_mut()
886                .insert(
887                    GrpcMethod::new(
888                        "nominal.ai.v1.WorkbookAgentService",
889                        "ListConversations",
890                    ),
891                );
892            self.inner.unary(req, path, codec).await
893        }
894        /// CreateConversation handles creating a conversation and assigning it a conversation rid
895        pub async fn create_conversation(
896            &mut self,
897            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
898        ) -> std::result::Result<
899            tonic::Response<super::CreateConversationResponse>,
900            tonic::Status,
901        > {
902            self.inner
903                .ready()
904                .await
905                .map_err(|e| {
906                    tonic::Status::unknown(
907                        format!("Service was not ready: {}", e.into()),
908                    )
909                })?;
910            let codec = tonic::codec::ProstCodec::default();
911            let path = http::uri::PathAndQuery::from_static(
912                "/nominal.ai.v1.WorkbookAgentService/CreateConversation",
913            );
914            let mut req = request.into_request();
915            req.extensions_mut()
916                .insert(
917                    GrpcMethod::new(
918                        "nominal.ai.v1.WorkbookAgentService",
919                        "CreateConversation",
920                    ),
921                );
922            self.inner.unary(req, path, codec).await
923        }
924        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
925        pub async fn update_conversation_metadata(
926            &mut self,
927            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
928        ) -> std::result::Result<
929            tonic::Response<super::UpdateConversationMetadataResponse>,
930            tonic::Status,
931        > {
932            self.inner
933                .ready()
934                .await
935                .map_err(|e| {
936                    tonic::Status::unknown(
937                        format!("Service was not ready: {}", e.into()),
938                    )
939                })?;
940            let codec = tonic::codec::ProstCodec::default();
941            let path = http::uri::PathAndQuery::from_static(
942                "/nominal.ai.v1.WorkbookAgentService/UpdateConversationMetadata",
943            );
944            let mut req = request.into_request();
945            req.extensions_mut()
946                .insert(
947                    GrpcMethod::new(
948                        "nominal.ai.v1.WorkbookAgentService",
949                        "UpdateConversationMetadata",
950                    ),
951                );
952            self.inner.unary(req, path, codec).await
953        }
954        /// DeleteConversation handles deleting a specific conversation by conversation rid
955        pub async fn delete_conversation(
956            &mut self,
957            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
958        ) -> std::result::Result<
959            tonic::Response<super::DeleteConversationResponse>,
960            tonic::Status,
961        > {
962            self.inner
963                .ready()
964                .await
965                .map_err(|e| {
966                    tonic::Status::unknown(
967                        format!("Service was not ready: {}", e.into()),
968                    )
969                })?;
970            let codec = tonic::codec::ProstCodec::default();
971            let path = http::uri::PathAndQuery::from_static(
972                "/nominal.ai.v1.WorkbookAgentService/DeleteConversation",
973            );
974            let mut req = request.into_request();
975            req.extensions_mut()
976                .insert(
977                    GrpcMethod::new(
978                        "nominal.ai.v1.WorkbookAgentService",
979                        "DeleteConversation",
980                    ),
981                );
982            self.inner.unary(req, path, codec).await
983        }
984    }
985}
986#[derive(Clone, Copy, PartialEq, ::prost::Message)]
987pub struct IsAiEnabledForUserRequest {}
988#[derive(Clone, Copy, PartialEq, ::prost::Message)]
989pub struct IsAiEnabledForUserResponse {
990    #[prost(bool, tag = "1")]
991    pub is_enabled: bool,
992}
993/// Generated client implementations.
994pub mod ai_features_service_client {
995    #![allow(
996        unused_variables,
997        dead_code,
998        missing_docs,
999        clippy::wildcard_imports,
1000        clippy::let_unit_value,
1001    )]
1002    use tonic::codegen::*;
1003    use tonic::codegen::http::Uri;
1004    /// AIFeaturesService provides information about enabled AI features
1005    #[derive(Debug, Clone)]
1006    pub struct AiFeaturesServiceClient<T> {
1007        inner: tonic::client::Grpc<T>,
1008    }
1009    impl AiFeaturesServiceClient<tonic::transport::Channel> {
1010        /// Attempt to create a new client by connecting to a given endpoint.
1011        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1012        where
1013            D: TryInto<tonic::transport::Endpoint>,
1014            D::Error: Into<StdError>,
1015        {
1016            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1017            Ok(Self::new(conn))
1018        }
1019    }
1020    impl<T> AiFeaturesServiceClient<T>
1021    where
1022        T: tonic::client::GrpcService<tonic::body::Body>,
1023        T::Error: Into<StdError>,
1024        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1025        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1026    {
1027        pub fn new(inner: T) -> Self {
1028            let inner = tonic::client::Grpc::new(inner);
1029            Self { inner }
1030        }
1031        pub fn with_origin(inner: T, origin: Uri) -> Self {
1032            let inner = tonic::client::Grpc::with_origin(inner, origin);
1033            Self { inner }
1034        }
1035        pub fn with_interceptor<F>(
1036            inner: T,
1037            interceptor: F,
1038        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
1039        where
1040            F: tonic::service::Interceptor,
1041            T::ResponseBody: Default,
1042            T: tonic::codegen::Service<
1043                http::Request<tonic::body::Body>,
1044                Response = http::Response<
1045                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1046                >,
1047            >,
1048            <T as tonic::codegen::Service<
1049                http::Request<tonic::body::Body>,
1050            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1051        {
1052            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
1053        }
1054        /// Compress requests with the given encoding.
1055        ///
1056        /// This requires the server to support it otherwise it might respond with an
1057        /// error.
1058        #[must_use]
1059        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1060            self.inner = self.inner.send_compressed(encoding);
1061            self
1062        }
1063        /// Enable decompressing responses.
1064        #[must_use]
1065        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1066            self.inner = self.inner.accept_compressed(encoding);
1067            self
1068        }
1069        /// Limits the maximum size of a decoded message.
1070        ///
1071        /// Default: `4MB`
1072        #[must_use]
1073        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1074            self.inner = self.inner.max_decoding_message_size(limit);
1075            self
1076        }
1077        /// Limits the maximum size of an encoded message.
1078        ///
1079        /// Default: `usize::MAX`
1080        #[must_use]
1081        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1082            self.inner = self.inner.max_encoding_message_size(limit);
1083            self
1084        }
1085        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
1086        pub async fn is_ai_enabled_for_user(
1087            &mut self,
1088            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
1089        ) -> std::result::Result<
1090            tonic::Response<super::IsAiEnabledForUserResponse>,
1091            tonic::Status,
1092        > {
1093            self.inner
1094                .ready()
1095                .await
1096                .map_err(|e| {
1097                    tonic::Status::unknown(
1098                        format!("Service was not ready: {}", e.into()),
1099                    )
1100                })?;
1101            let codec = tonic::codec::ProstCodec::default();
1102            let path = http::uri::PathAndQuery::from_static(
1103                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
1104            );
1105            let mut req = request.into_request();
1106            req.extensions_mut()
1107                .insert(
1108                    GrpcMethod::new(
1109                        "nominal.ai.v1.AIFeaturesService",
1110                        "IsAIEnabledForUser",
1111                    ),
1112                );
1113            self.inner.unary(req, path, codec).await
1114        }
1115    }
1116}
1117/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
1118/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
1119#[derive(Clone, PartialEq, ::prost::Message)]
1120pub struct CreateOrUpdateKnowledgeBaseRequest {
1121    #[prost(string, tag = "1")]
1122    pub attachment_rid: ::prost::alloc::string::String,
1123    /// summary of the knowledge base, will be used by the LLM to decide when to use it
1124    #[prost(string, tag = "2")]
1125    pub summary_description: ::prost::alloc::string::String,
1126    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
1127    pub r#type: ::core::option::Option<i32>,
1128}
1129/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
1130#[derive(Clone, PartialEq, ::prost::Message)]
1131pub struct CreateOrUpdateKnowledgeBaseResponse {
1132    #[prost(string, tag = "1")]
1133    pub knowledge_base_rid: ::prost::alloc::string::String,
1134}
1135/// KnowledgeBase represents a knowledge base entry
1136#[derive(Clone, PartialEq, ::prost::Message)]
1137pub struct KnowledgeBase {
1138    #[prost(string, tag = "1")]
1139    pub knowledge_base_rid: ::prost::alloc::string::String,
1140    #[prost(string, tag = "2")]
1141    pub attachment_rid: ::prost::alloc::string::String,
1142    #[prost(string, tag = "3")]
1143    pub workspace_rid: ::prost::alloc::string::String,
1144    #[prost(string, tag = "4")]
1145    pub summary_description: ::prost::alloc::string::String,
1146    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
1147    pub r#type: i32,
1148    #[prost(int32, tag = "6")]
1149    pub version: i32,
1150}
1151#[derive(Clone, PartialEq, ::prost::Message)]
1152pub struct ListRequest {
1153    #[prost(string, tag = "1")]
1154    pub workspace_rid: ::prost::alloc::string::String,
1155}
1156#[derive(Clone, PartialEq, ::prost::Message)]
1157pub struct ListResponse {
1158    #[prost(message, repeated, tag = "1")]
1159    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1160}
1161#[derive(Clone, PartialEq, ::prost::Message)]
1162pub struct DeleteRequest {
1163    #[prost(string, tag = "1")]
1164    pub knowledge_base_rid: ::prost::alloc::string::String,
1165}
1166#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1167pub struct DeleteResponse {
1168    #[prost(bool, tag = "1")]
1169    pub success: bool,
1170}
1171#[derive(Clone, PartialEq, ::prost::Message)]
1172pub struct GetBatchRequest {
1173    #[prost(string, repeated, tag = "1")]
1174    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1175}
1176#[derive(Clone, PartialEq, ::prost::Message)]
1177pub struct GetBatchResponse {
1178    #[prost(message, repeated, tag = "1")]
1179    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1180}
1181/// generate summary description is intentionally going to return the generated description to the frontend
1182/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1183#[derive(Clone, PartialEq, ::prost::Message)]
1184pub struct GenerateSummaryDescriptionRequest {
1185    #[prost(string, tag = "1")]
1186    pub attachment_rid: ::prost::alloc::string::String,
1187}
1188#[derive(Clone, PartialEq, ::prost::Message)]
1189pub struct GenerateSummaryDescriptionResponse {
1190    #[prost(string, tag = "1")]
1191    pub summary_description: ::prost::alloc::string::String,
1192}
1193/// KnowledgeBaseType defines the types of knowledge base
1194#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1195#[repr(i32)]
1196pub enum KnowledgeBaseType {
1197    /// defaults to PROMPT
1198    Unspecified = 0,
1199    /// knowledge base gets added directly to prompt (needs to be small enough!)
1200    Prompt = 1,
1201    /// knowledge base gets used via vector search on embeddings
1202    Embedding = 2,
1203}
1204impl KnowledgeBaseType {
1205    /// String value of the enum field names used in the ProtoBuf definition.
1206    ///
1207    /// The values are not transformed in any way and thus are considered stable
1208    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1209    pub fn as_str_name(&self) -> &'static str {
1210        match self {
1211            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1212            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1213            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1214        }
1215    }
1216    /// Creates an enum from field names used in the ProtoBuf definition.
1217    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1218        match value {
1219            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1220            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1221            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1222            _ => None,
1223        }
1224    }
1225}
1226/// Generated client implementations.
1227pub mod knowledge_base_service_client {
1228    #![allow(
1229        unused_variables,
1230        dead_code,
1231        missing_docs,
1232        clippy::wildcard_imports,
1233        clippy::let_unit_value,
1234    )]
1235    use tonic::codegen::*;
1236    use tonic::codegen::http::Uri;
1237    /// KnowledgeBaseService provides AI-powered knowledge base management
1238    #[derive(Debug, Clone)]
1239    pub struct KnowledgeBaseServiceClient<T> {
1240        inner: tonic::client::Grpc<T>,
1241    }
1242    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1243        /// Attempt to create a new client by connecting to a given endpoint.
1244        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1245        where
1246            D: TryInto<tonic::transport::Endpoint>,
1247            D::Error: Into<StdError>,
1248        {
1249            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1250            Ok(Self::new(conn))
1251        }
1252    }
1253    impl<T> KnowledgeBaseServiceClient<T>
1254    where
1255        T: tonic::client::GrpcService<tonic::body::Body>,
1256        T::Error: Into<StdError>,
1257        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1258        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1259    {
1260        pub fn new(inner: T) -> Self {
1261            let inner = tonic::client::Grpc::new(inner);
1262            Self { inner }
1263        }
1264        pub fn with_origin(inner: T, origin: Uri) -> Self {
1265            let inner = tonic::client::Grpc::with_origin(inner, origin);
1266            Self { inner }
1267        }
1268        pub fn with_interceptor<F>(
1269            inner: T,
1270            interceptor: F,
1271        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1272        where
1273            F: tonic::service::Interceptor,
1274            T::ResponseBody: Default,
1275            T: tonic::codegen::Service<
1276                http::Request<tonic::body::Body>,
1277                Response = http::Response<
1278                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1279                >,
1280            >,
1281            <T as tonic::codegen::Service<
1282                http::Request<tonic::body::Body>,
1283            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1284        {
1285            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1286        }
1287        /// Compress requests with the given encoding.
1288        ///
1289        /// This requires the server to support it otherwise it might respond with an
1290        /// error.
1291        #[must_use]
1292        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1293            self.inner = self.inner.send_compressed(encoding);
1294            self
1295        }
1296        /// Enable decompressing responses.
1297        #[must_use]
1298        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1299            self.inner = self.inner.accept_compressed(encoding);
1300            self
1301        }
1302        /// Limits the maximum size of a decoded message.
1303        ///
1304        /// Default: `4MB`
1305        #[must_use]
1306        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1307            self.inner = self.inner.max_decoding_message_size(limit);
1308            self
1309        }
1310        /// Limits the maximum size of an encoded message.
1311        ///
1312        /// Default: `usize::MAX`
1313        #[must_use]
1314        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1315            self.inner = self.inner.max_encoding_message_size(limit);
1316            self
1317        }
1318        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1319        pub async fn create_or_update_knowledge_base(
1320            &mut self,
1321            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1322        ) -> std::result::Result<
1323            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1324            tonic::Status,
1325        > {
1326            self.inner
1327                .ready()
1328                .await
1329                .map_err(|e| {
1330                    tonic::Status::unknown(
1331                        format!("Service was not ready: {}", e.into()),
1332                    )
1333                })?;
1334            let codec = tonic::codec::ProstCodec::default();
1335            let path = http::uri::PathAndQuery::from_static(
1336                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1337            );
1338            let mut req = request.into_request();
1339            req.extensions_mut()
1340                .insert(
1341                    GrpcMethod::new(
1342                        "nominal.ai.v1.KnowledgeBaseService",
1343                        "CreateOrUpdateKnowledgeBase",
1344                    ),
1345                );
1346            self.inner.unary(req, path, codec).await
1347        }
1348        /// List returns all knowledge bases in the specified workspace
1349        pub async fn list(
1350            &mut self,
1351            request: impl tonic::IntoRequest<super::ListRequest>,
1352        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1353            self.inner
1354                .ready()
1355                .await
1356                .map_err(|e| {
1357                    tonic::Status::unknown(
1358                        format!("Service was not ready: {}", e.into()),
1359                    )
1360                })?;
1361            let codec = tonic::codec::ProstCodec::default();
1362            let path = http::uri::PathAndQuery::from_static(
1363                "/nominal.ai.v1.KnowledgeBaseService/List",
1364            );
1365            let mut req = request.into_request();
1366            req.extensions_mut()
1367                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1368            self.inner.unary(req, path, codec).await
1369        }
1370        /// Delete removes a knowledge base by its RID
1371        pub async fn delete(
1372            &mut self,
1373            request: impl tonic::IntoRequest<super::DeleteRequest>,
1374        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1375            self.inner
1376                .ready()
1377                .await
1378                .map_err(|e| {
1379                    tonic::Status::unknown(
1380                        format!("Service was not ready: {}", e.into()),
1381                    )
1382                })?;
1383            let codec = tonic::codec::ProstCodec::default();
1384            let path = http::uri::PathAndQuery::from_static(
1385                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1386            );
1387            let mut req = request.into_request();
1388            req.extensions_mut()
1389                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1390            self.inner.unary(req, path, codec).await
1391        }
1392        /// GetBatch retrieves multiple knowledge bases by their RIDs
1393        pub async fn get_batch(
1394            &mut self,
1395            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1396        ) -> std::result::Result<
1397            tonic::Response<super::GetBatchResponse>,
1398            tonic::Status,
1399        > {
1400            self.inner
1401                .ready()
1402                .await
1403                .map_err(|e| {
1404                    tonic::Status::unknown(
1405                        format!("Service was not ready: {}", e.into()),
1406                    )
1407                })?;
1408            let codec = tonic::codec::ProstCodec::default();
1409            let path = http::uri::PathAndQuery::from_static(
1410                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1411            );
1412            let mut req = request.into_request();
1413            req.extensions_mut()
1414                .insert(
1415                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1416                );
1417            self.inner.unary(req, path, codec).await
1418        }
1419        /// GenerateSummaryDescription generates a summary description for an attachment rid
1420        pub async fn generate_summary_description(
1421            &mut self,
1422            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1423        ) -> std::result::Result<
1424            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1425            tonic::Status,
1426        > {
1427            self.inner
1428                .ready()
1429                .await
1430                .map_err(|e| {
1431                    tonic::Status::unknown(
1432                        format!("Service was not ready: {}", e.into()),
1433                    )
1434                })?;
1435            let codec = tonic::codec::ProstCodec::default();
1436            let path = http::uri::PathAndQuery::from_static(
1437                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1438            );
1439            let mut req = request.into_request();
1440            req.extensions_mut()
1441                .insert(
1442                    GrpcMethod::new(
1443                        "nominal.ai.v1.KnowledgeBaseService",
1444                        "GenerateSummaryDescription",
1445                    ),
1446                );
1447            self.inner.unary(req, path, codec).await
1448        }
1449    }
1450}