nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    /// The conversation ID
6    #[prost(string, tag = "1")]
7    pub conversation_rid: ::prost::alloc::string::String,
8    /// The user message to append to the conversation
9    #[prost(message, optional, tag = "2")]
10    pub message: ::core::option::Option<UserModelMessage>,
11    /// Optional: image files to provide to the agent
12    #[prost(message, repeated, tag = "3")]
13    pub images: ::prost::alloc::vec::Vec<ImagePart>,
14    /// Context-specific fields based on the oneofKind. Unspecified context is accepted.
15    #[prost(oneof = "stream_chat_request::Context", tags = "4")]
16    pub context: ::core::option::Option<stream_chat_request::Context>,
17}
18/// Nested message and enum types in `StreamChatRequest`.
19pub mod stream_chat_request {
20    /// Context-specific fields based on the oneofKind. Unspecified context is accepted.
21    #[derive(Clone, PartialEq, ::prost::Oneof)]
22    pub enum Context {
23        #[prost(message, tag = "4")]
24        Workbook(super::WorkbookContext),
25    }
26}
27/// WorkbookContext contains workbook-specific context fields
28#[derive(Clone, PartialEq, ::prost::Message)]
29pub struct WorkbookContext {
30    /// RID of the workbook to use for context
31    #[prost(string, tag = "1")]
32    pub workbook_rid: ::prost::alloc::string::String,
33    /// Optional: the user's presence in the workbook
34    #[prost(message, optional, tag = "2")]
35    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
36}
37/// WorkbookUserPresence contains the user's presence in the workbook
38/// which is used to describe what the user is viewing at the time of the message.
39#[derive(Clone, Copy, PartialEq, ::prost::Message)]
40pub struct WorkbookUserPresence {
41    #[prost(int32, optional, tag = "1")]
42    pub tab_index: ::core::option::Option<i32>,
43    #[prost(message, optional, tag = "2")]
44    pub range: ::core::option::Option<TimeRange>,
45}
46/// CreateConversation request will create a new conversation thread
47/// if old conversation id is not set, a brand new, clear chat is created
48/// If old conversation id is set without a previous message id, the full conversation thread will be copied
49/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
50/// the above case is useful for branching a conversation into a new thread
51#[derive(Clone, PartialEq, ::prost::Message)]
52pub struct CreateConversationRequest {
53    #[prost(string, tag = "1")]
54    pub title: ::prost::alloc::string::String,
55    #[prost(string, tag = "2")]
56    pub workspace_rid: ::prost::alloc::string::String,
57    #[prost(string, optional, tag = "3")]
58    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
59    #[prost(string, optional, tag = "4")]
60    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
61}
62/// CreateConversationResponse will return the conversation id for the new conversation
63#[derive(Clone, PartialEq, ::prost::Message)]
64pub struct CreateConversationResponse {
65    #[prost(string, tag = "1")]
66    pub new_conversation_rid: ::prost::alloc::string::String,
67}
68#[derive(Clone, PartialEq, ::prost::Message)]
69pub struct UpdateConversationMetadataRequest {
70    #[prost(string, tag = "1")]
71    pub title: ::prost::alloc::string::String,
72    #[prost(string, tag = "2")]
73    pub conversation_rid: ::prost::alloc::string::String,
74}
75#[derive(Clone, Copy, PartialEq, ::prost::Message)]
76pub struct UpdateConversationMetadataResponse {}
77#[derive(Clone, PartialEq, ::prost::Message)]
78pub struct DeleteConversationRequest {
79    #[prost(string, tag = "1")]
80    pub conversation_rid: ::prost::alloc::string::String,
81}
82#[derive(Clone, Copy, PartialEq, ::prost::Message)]
83pub struct DeleteConversationResponse {}
84/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
85/// by provided rid. To start from a particular message - you can also provide a message id.
86#[derive(Clone, PartialEq, ::prost::Message)]
87pub struct GetConversationRequest {
88    #[prost(string, tag = "1")]
89    pub conversation_rid: ::prost::alloc::string::String,
90    #[prost(string, optional, tag = "2")]
91    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
92    #[prost(int32, optional, tag = "3")]
93    pub max_message_count: ::core::option::Option<i32>,
94}
95/// Model message with id allows you to identify the message ID of a given message
96#[derive(Clone, PartialEq, ::prost::Message)]
97pub struct ModelMessageWithId {
98    #[prost(string, tag = "3")]
99    pub message_id: ::prost::alloc::string::String,
100    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
101    pub content: ::core::option::Option<model_message_with_id::Content>,
102}
103/// Nested message and enum types in `ModelMessageWithId`.
104pub mod model_message_with_id {
105    #[derive(Clone, PartialEq, ::prost::Oneof)]
106    pub enum Content {
107        #[prost(message, tag = "1")]
108        Message(super::ModelMessage),
109        #[prost(message, tag = "2")]
110        ToolAction(super::ToolAction),
111    }
112}
113#[derive(Clone, PartialEq, ::prost::Message)]
114pub struct GetConversationResponse {
115    #[prost(message, repeated, tag = "1")]
116    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
117}
118/// Will generate all conversation threads that this user has in this workspace
119#[derive(Clone, PartialEq, ::prost::Message)]
120pub struct ListConversationsRequest {
121    #[prost(string, tag = "1")]
122    pub workspace_rid: ::prost::alloc::string::String,
123}
124#[derive(Clone, PartialEq, ::prost::Message)]
125pub struct ConversationMetadata {
126    #[prost(string, tag = "1")]
127    pub conversation_rid: ::prost::alloc::string::String,
128    #[prost(string, tag = "2")]
129    pub title: ::prost::alloc::string::String,
130    #[prost(message, optional, tag = "3")]
131    pub created_at: ::core::option::Option<
132        super::super::super::google::protobuf::Timestamp,
133    >,
134    #[prost(message, optional, tag = "4")]
135    pub last_updated_at: ::core::option::Option<
136        super::super::super::google::protobuf::Timestamp,
137    >,
138}
139/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
140/// to get a full conversation from storage. These are ordered by creation time.
141#[derive(Clone, PartialEq, ::prost::Message)]
142pub struct ListConversationsResponse {
143    #[prost(message, repeated, tag = "1")]
144    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
145}
146#[derive(Clone, Copy, PartialEq, ::prost::Message)]
147pub struct TimeRange {
148    #[prost(message, optional, tag = "1")]
149    pub range_start: ::core::option::Option<Timestamp>,
150    #[prost(message, optional, tag = "2")]
151    pub range_end: ::core::option::Option<Timestamp>,
152}
153#[derive(Clone, Copy, PartialEq, ::prost::Message)]
154pub struct Timestamp {
155    #[prost(int32, tag = "1")]
156    pub seconds: i32,
157    #[prost(int32, tag = "2")]
158    pub nanoseconds: i32,
159}
160/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
161/// Each message type has its own structure and content.
162#[derive(Clone, PartialEq, ::prost::Message)]
163pub struct ModelMessage {
164    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
165    pub kind: ::core::option::Option<model_message::Kind>,
166}
167/// Nested message and enum types in `ModelMessage`.
168pub mod model_message {
169    #[derive(Clone, PartialEq, ::prost::Oneof)]
170    pub enum Kind {
171        #[prost(message, tag = "1")]
172        User(super::UserModelMessage),
173        #[prost(message, tag = "2")]
174        Assistant(super::AssistantModelMessage),
175    }
176}
177/// A user message containing text
178#[derive(Clone, PartialEq, ::prost::Message)]
179pub struct UserModelMessage {
180    #[prost(message, repeated, tag = "1")]
181    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
182}
183/// An assistant message containing text
184#[derive(Clone, PartialEq, ::prost::Message)]
185pub struct AssistantModelMessage {
186    #[prost(message, repeated, tag = "1")]
187    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
188}
189#[derive(Clone, PartialEq, ::prost::Message)]
190pub struct UserContentPart {
191    #[prost(oneof = "user_content_part::Part", tags = "1")]
192    pub part: ::core::option::Option<user_content_part::Part>,
193}
194/// Nested message and enum types in `UserContentPart`.
195pub mod user_content_part {
196    #[derive(Clone, PartialEq, ::prost::Oneof)]
197    pub enum Part {
198        #[prost(message, tag = "1")]
199        Text(super::TextPart),
200    }
201}
202/// Content part for assistant messages: can be text, reasoning, or mutation.
203#[derive(Clone, PartialEq, ::prost::Message)]
204pub struct AssistantContentPart {
205    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
206    pub part: ::core::option::Option<assistant_content_part::Part>,
207}
208/// Nested message and enum types in `AssistantContentPart`.
209pub mod assistant_content_part {
210    #[derive(Clone, PartialEq, ::prost::Oneof)]
211    pub enum Part {
212        #[prost(message, tag = "1")]
213        Text(super::TextPart),
214        #[prost(message, tag = "2")]
215        Reasoning(super::ReasoningPart),
216    }
217}
218/// Text part for user or assistant messages.
219#[derive(Clone, PartialEq, ::prost::Message)]
220pub struct TextPart {
221    #[prost(string, tag = "1")]
222    pub text: ::prost::alloc::string::String,
223}
224/// User-supplied image part.
225#[derive(Clone, PartialEq, ::prost::Message)]
226pub struct ImagePart {
227    /// The base64-encoded image data
228    #[prost(bytes = "vec", tag = "1")]
229    pub data: ::prost::alloc::vec::Vec<u8>,
230    /// The media type of the image (e.g. "image/png", "image/jpeg")
231    #[prost(string, optional, tag = "2")]
232    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
233    /// Optional: the filename of the image
234    #[prost(string, optional, tag = "3")]
235    pub filename: ::core::option::Option<::prost::alloc::string::String>,
236}
237/// Reasoning part for assistant messages.
238#[derive(Clone, PartialEq, ::prost::Message)]
239pub struct ReasoningPart {
240    #[prost(string, tag = "1")]
241    pub reasoning: ::prost::alloc::string::String,
242}
243/// StreamChatResponse is a discriminated union response to a StreamChatRequest
244#[derive(Clone, PartialEq, ::prost::Message)]
245pub struct StreamChatResponse {
246    #[prost(
247        oneof = "stream_chat_response::Response",
248        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
249    )]
250    pub response: ::core::option::Option<stream_chat_response::Response>,
251}
252/// Nested message and enum types in `StreamChatResponse`.
253pub mod stream_chat_response {
254    #[derive(Clone, PartialEq, ::prost::Oneof)]
255    pub enum Response {
256        #[prost(message, tag = "1")]
257        Finish(super::Finish),
258        #[prost(message, tag = "2")]
259        Error(super::Error),
260        #[prost(message, tag = "3")]
261        TextStart(super::TextStart),
262        #[prost(message, tag = "4")]
263        TextDelta(super::TextDelta),
264        #[prost(message, tag = "5")]
265        TextEnd(super::TextEnd),
266        #[prost(message, tag = "6")]
267        ReasoningStart(super::ReasoningStart),
268        #[prost(message, tag = "7")]
269        ReasoningDelta(super::ReasoningDelta),
270        #[prost(message, tag = "8")]
271        ReasoningEnd(super::ReasoningEnd),
272        /// this will be deprecated in favor of MCP-based mutations
273        #[prost(message, tag = "9")]
274        WorkbookMutation(super::WorkbookMutation),
275        #[prost(message, tag = "10")]
276        ToolAction(super::ToolAction),
277    }
278}
279/// Indicates the end of a chat session
280#[derive(Clone, PartialEq, ::prost::Message)]
281pub struct Finish {
282    /// The message ids in order of all generated messages for this agent run
283    /// These ids can be used to branch a message from that specific message
284    #[prost(string, repeated, tag = "1")]
285    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
286}
287/// An error that occurred during the chat session
288#[derive(Clone, PartialEq, ::prost::Message)]
289pub struct Error {
290    #[prost(string, tag = "1")]
291    pub message: ::prost::alloc::string::String,
292}
293/// Indicates the start of a text message from the agent
294#[derive(Clone, PartialEq, ::prost::Message)]
295pub struct TextStart {
296    /// uniquely identifies the text message (e.g. uuid) so that the client can
297    /// merge parallel message streams (if it happens).
298    #[prost(string, tag = "1")]
299    pub id: ::prost::alloc::string::String,
300}
301/// A delta (continuation) of a text message from the agent
302#[derive(Clone, PartialEq, ::prost::Message)]
303pub struct TextDelta {
304    #[prost(string, tag = "1")]
305    pub id: ::prost::alloc::string::String,
306    /// The next chunk of text
307    #[prost(string, tag = "2")]
308    pub delta: ::prost::alloc::string::String,
309}
310/// Indicates the end of a text message from the agent
311#[derive(Clone, PartialEq, ::prost::Message)]
312pub struct TextEnd {
313    #[prost(string, tag = "1")]
314    pub id: ::prost::alloc::string::String,
315}
316/// Indicates the start of a reasoning message from the agent
317#[derive(Clone, PartialEq, ::prost::Message)]
318pub struct ReasoningStart {
319    #[prost(string, tag = "1")]
320    pub id: ::prost::alloc::string::String,
321}
322/// A delta (continuation) of a reasoning message from the agent
323#[derive(Clone, PartialEq, ::prost::Message)]
324pub struct ReasoningDelta {
325    #[prost(string, tag = "1")]
326    pub id: ::prost::alloc::string::String,
327    /// The next chunk of reasoning
328    #[prost(string, tag = "2")]
329    pub delta: ::prost::alloc::string::String,
330}
331/// Indicates the end of a reasoning message from the agent
332#[derive(Clone, PartialEq, ::prost::Message)]
333pub struct ReasoningEnd {
334    #[prost(string, tag = "1")]
335    pub id: ::prost::alloc::string::String,
336}
337/// Add a new tab to the workbook
338#[derive(Clone, PartialEq, ::prost::Message)]
339pub struct AddTabMutation {
340    /// if tab_name is not provided, we'll name it "New Tab"
341    #[prost(string, optional, tag = "1")]
342    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
343}
344/// this is the "result" of the mutation
345#[derive(Clone, PartialEq, ::prost::Message)]
346pub struct AddOrUpdatePanelMutation {
347    /// JSON-serialized representation of IVizDefinition
348    #[prost(string, tag = "1")]
349    pub panel_as_json: ::prost::alloc::string::String,
350    #[prost(string, tag = "2")]
351    pub panel_id: ::prost::alloc::string::String,
352    #[prost(int32, tag = "3")]
353    pub tab_index: i32,
354}
355#[derive(Clone, PartialEq, ::prost::Message)]
356pub struct RemovePanelsMutation {
357    #[prost(string, repeated, tag = "1")]
358    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
359}
360/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
361#[derive(Clone, PartialEq, ::prost::Message)]
362pub struct AddOrReplaceVariableMutation {
363    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
364    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
365    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
366    #[prost(string, tag = "1")]
367    pub compute_spec_as_json: ::prost::alloc::string::String,
368    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
369    #[prost(string, optional, tag = "2")]
370    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
371    #[prost(string, optional, tag = "3")]
372    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
373}
374/// DeleteVariablesMutation is a mutation to delete variables from the workbook
375#[derive(Clone, PartialEq, ::prost::Message)]
376pub struct DeleteVariablesMutation {
377    #[prost(string, repeated, tag = "1")]
378    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
379}
380/// WorkbookMutation is a mutation to the workbook
381#[derive(Clone, PartialEq, ::prost::Message)]
382pub struct WorkbookMutation {
383    #[prost(string, tag = "1")]
384    pub id: ::prost::alloc::string::String,
385    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
386    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
387}
388/// Nested message and enum types in `WorkbookMutation`.
389pub mod workbook_mutation {
390    #[derive(Clone, PartialEq, ::prost::Oneof)]
391    pub enum Mutation {
392        #[prost(message, tag = "2")]
393        AddTab(super::AddTabMutation),
394        #[prost(message, tag = "3")]
395        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
396        #[prost(message, tag = "4")]
397        RemovePanels(super::RemovePanelsMutation),
398        #[prost(message, tag = "5")]
399        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
400        #[prost(message, tag = "6")]
401        DeleteVariables(super::DeleteVariablesMutation),
402    }
403}
404/// this is a concise description of a tool call that the agent is making internally
405/// without revealing too much detail about the tool call, it informs the user what the agent is doing
406/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
407/// "Search channels for My Datasource"
408#[derive(Clone, PartialEq, ::prost::Message)]
409pub struct ToolAction {
410    #[prost(string, tag = "1")]
411    pub id: ::prost::alloc::string::String,
412    /// "Thought", "Read", "Find", "Look-up", etc.
413    #[prost(string, tag = "2")]
414    pub tool_action_verb: ::prost::alloc::string::String,
415    /// "workbook", "channel", "variable", "panel", etc.
416    #[prost(string, optional, tag = "3")]
417    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
418}
419/// Generated client implementations.
420pub mod ai_agent_service_client {
421    #![allow(
422        unused_variables,
423        dead_code,
424        missing_docs,
425        clippy::wildcard_imports,
426        clippy::let_unit_value,
427    )]
428    use tonic::codegen::*;
429    use tonic::codegen::http::Uri;
430    /// AIAgentService provides AI-powered assistance for general operations
431    #[derive(Debug, Clone)]
432    pub struct AiAgentServiceClient<T> {
433        inner: tonic::client::Grpc<T>,
434    }
435    impl AiAgentServiceClient<tonic::transport::Channel> {
436        /// Attempt to create a new client by connecting to a given endpoint.
437        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
438        where
439            D: TryInto<tonic::transport::Endpoint>,
440            D::Error: Into<StdError>,
441        {
442            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
443            Ok(Self::new(conn))
444        }
445    }
446    impl<T> AiAgentServiceClient<T>
447    where
448        T: tonic::client::GrpcService<tonic::body::Body>,
449        T::Error: Into<StdError>,
450        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
451        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
452    {
453        pub fn new(inner: T) -> Self {
454            let inner = tonic::client::Grpc::new(inner);
455            Self { inner }
456        }
457        pub fn with_origin(inner: T, origin: Uri) -> Self {
458            let inner = tonic::client::Grpc::with_origin(inner, origin);
459            Self { inner }
460        }
461        pub fn with_interceptor<F>(
462            inner: T,
463            interceptor: F,
464        ) -> AiAgentServiceClient<InterceptedService<T, F>>
465        where
466            F: tonic::service::Interceptor,
467            T::ResponseBody: Default,
468            T: tonic::codegen::Service<
469                http::Request<tonic::body::Body>,
470                Response = http::Response<
471                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
472                >,
473            >,
474            <T as tonic::codegen::Service<
475                http::Request<tonic::body::Body>,
476            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
477        {
478            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
479        }
480        /// Compress requests with the given encoding.
481        ///
482        /// This requires the server to support it otherwise it might respond with an
483        /// error.
484        #[must_use]
485        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
486            self.inner = self.inner.send_compressed(encoding);
487            self
488        }
489        /// Enable decompressing responses.
490        #[must_use]
491        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
492            self.inner = self.inner.accept_compressed(encoding);
493            self
494        }
495        /// Limits the maximum size of a decoded message.
496        ///
497        /// Default: `4MB`
498        #[must_use]
499        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
500            self.inner = self.inner.max_decoding_message_size(limit);
501            self
502        }
503        /// Limits the maximum size of an encoded message.
504        ///
505        /// Default: `usize::MAX`
506        #[must_use]
507        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
508            self.inner = self.inner.max_encoding_message_size(limit);
509            self
510        }
511        /// StreamChat handles bidirectional streaming chat for AI agent
512        pub async fn stream_chat(
513            &mut self,
514            request: impl tonic::IntoRequest<super::StreamChatRequest>,
515        ) -> std::result::Result<
516            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
517            tonic::Status,
518        > {
519            self.inner
520                .ready()
521                .await
522                .map_err(|e| {
523                    tonic::Status::unknown(
524                        format!("Service was not ready: {}", e.into()),
525                    )
526                })?;
527            let codec = tonic::codec::ProstCodec::default();
528            let path = http::uri::PathAndQuery::from_static(
529                "/nominal.ai.v1.AIAgentService/StreamChat",
530            );
531            let mut req = request.into_request();
532            req.extensions_mut()
533                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
534            self.inner.server_streaming(req, path, codec).await
535        }
536        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
537        pub async fn get_conversation(
538            &mut self,
539            request: impl tonic::IntoRequest<super::GetConversationRequest>,
540        ) -> std::result::Result<
541            tonic::Response<super::GetConversationResponse>,
542            tonic::Status,
543        > {
544            self.inner
545                .ready()
546                .await
547                .map_err(|e| {
548                    tonic::Status::unknown(
549                        format!("Service was not ready: {}", e.into()),
550                    )
551                })?;
552            let codec = tonic::codec::ProstCodec::default();
553            let path = http::uri::PathAndQuery::from_static(
554                "/nominal.ai.v1.AIAgentService/GetConversation",
555            );
556            let mut req = request.into_request();
557            req.extensions_mut()
558                .insert(
559                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
560                );
561            self.inner.unary(req, path, codec).await
562        }
563        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
564        pub async fn list_conversations(
565            &mut self,
566            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
567        ) -> std::result::Result<
568            tonic::Response<super::ListConversationsResponse>,
569            tonic::Status,
570        > {
571            self.inner
572                .ready()
573                .await
574                .map_err(|e| {
575                    tonic::Status::unknown(
576                        format!("Service was not ready: {}", e.into()),
577                    )
578                })?;
579            let codec = tonic::codec::ProstCodec::default();
580            let path = http::uri::PathAndQuery::from_static(
581                "/nominal.ai.v1.AIAgentService/ListConversations",
582            );
583            let mut req = request.into_request();
584            req.extensions_mut()
585                .insert(
586                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
587                );
588            self.inner.unary(req, path, codec).await
589        }
590        /// CreateConversation handles creating a conversation and assigning it a conversation rid
591        pub async fn create_conversation(
592            &mut self,
593            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
594        ) -> std::result::Result<
595            tonic::Response<super::CreateConversationResponse>,
596            tonic::Status,
597        > {
598            self.inner
599                .ready()
600                .await
601                .map_err(|e| {
602                    tonic::Status::unknown(
603                        format!("Service was not ready: {}", e.into()),
604                    )
605                })?;
606            let codec = tonic::codec::ProstCodec::default();
607            let path = http::uri::PathAndQuery::from_static(
608                "/nominal.ai.v1.AIAgentService/CreateConversation",
609            );
610            let mut req = request.into_request();
611            req.extensions_mut()
612                .insert(
613                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
614                );
615            self.inner.unary(req, path, codec).await
616        }
617        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
618        pub async fn update_conversation_metadata(
619            &mut self,
620            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
621        ) -> std::result::Result<
622            tonic::Response<super::UpdateConversationMetadataResponse>,
623            tonic::Status,
624        > {
625            self.inner
626                .ready()
627                .await
628                .map_err(|e| {
629                    tonic::Status::unknown(
630                        format!("Service was not ready: {}", e.into()),
631                    )
632                })?;
633            let codec = tonic::codec::ProstCodec::default();
634            let path = http::uri::PathAndQuery::from_static(
635                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
636            );
637            let mut req = request.into_request();
638            req.extensions_mut()
639                .insert(
640                    GrpcMethod::new(
641                        "nominal.ai.v1.AIAgentService",
642                        "UpdateConversationMetadata",
643                    ),
644                );
645            self.inner.unary(req, path, codec).await
646        }
647        /// DeleteConversation handles deleting a specific conversation by conversation rid
648        pub async fn delete_conversation(
649            &mut self,
650            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
651        ) -> std::result::Result<
652            tonic::Response<super::DeleteConversationResponse>,
653            tonic::Status,
654        > {
655            self.inner
656                .ready()
657                .await
658                .map_err(|e| {
659                    tonic::Status::unknown(
660                        format!("Service was not ready: {}", e.into()),
661                    )
662                })?;
663            let codec = tonic::codec::ProstCodec::default();
664            let path = http::uri::PathAndQuery::from_static(
665                "/nominal.ai.v1.AIAgentService/DeleteConversation",
666            );
667            let mut req = request.into_request();
668            req.extensions_mut()
669                .insert(
670                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
671                );
672            self.inner.unary(req, path, codec).await
673        }
674    }
675}
676/// StreamChatRequest is a request to stream chat messages for workbook AI agent
677#[derive(Clone, PartialEq, ::prost::Message)]
678pub struct WorkbookAgentServiceStreamChatRequest {
679    #[prost(message, repeated, tag = "1")]
680    pub messages: ::prost::alloc::vec::Vec<ModelMessage>,
681    /// JSON-serialized representation of INotebook
682    #[prost(string, optional, tag = "2")]
683    pub notebook_as_json: ::core::option::Option<::prost::alloc::string::String>,
684    /// The current tab visible in the workbook from the user's perspective
685    #[prost(int32, optional, tag = "3")]
686    pub selected_tab_index: ::core::option::Option<i32>,
687    /// Optional: image files to provide to the agent
688    #[prost(message, repeated, tag = "4")]
689    pub images: ::prost::alloc::vec::Vec<ImagePart>,
690    /// Time range for the tab that is currently visible to the user
691    #[prost(message, optional, tag = "5")]
692    pub range: ::core::option::Option<TimeRange>,
693    /// V2 conversation API persists the message and any assistant responses to storage under the provided
694    /// conversation_id. if id does not exist in the database, then a new conversation is started from this message.
695    #[prost(message, optional, tag = "6")]
696    pub message: ::core::option::Option<AppendMessage>,
697}
698/// This will append a message to an existing conversation
699/// A non existent conversation id will raise an error
700#[derive(Clone, PartialEq, ::prost::Message)]
701pub struct AppendMessage {
702    #[prost(message, optional, tag = "1")]
703    pub message: ::core::option::Option<UserModelMessage>,
704    #[prost(string, tag = "2")]
705    pub conversation_rid: ::prost::alloc::string::String,
706}
707/// Generated client implementations.
708pub mod workbook_agent_service_client {
709    #![allow(
710        unused_variables,
711        dead_code,
712        missing_docs,
713        clippy::wildcard_imports,
714        clippy::let_unit_value,
715    )]
716    use tonic::codegen::*;
717    use tonic::codegen::http::Uri;
718    /// WorkbookAgentService provides AI-powered assistance for workbook operations
719    /// this is deprecated in favor of the AIAgentService
720    #[derive(Debug, Clone)]
721    pub struct WorkbookAgentServiceClient<T> {
722        inner: tonic::client::Grpc<T>,
723    }
724    impl WorkbookAgentServiceClient<tonic::transport::Channel> {
725        /// Attempt to create a new client by connecting to a given endpoint.
726        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
727        where
728            D: TryInto<tonic::transport::Endpoint>,
729            D::Error: Into<StdError>,
730        {
731            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
732            Ok(Self::new(conn))
733        }
734    }
735    impl<T> WorkbookAgentServiceClient<T>
736    where
737        T: tonic::client::GrpcService<tonic::body::Body>,
738        T::Error: Into<StdError>,
739        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
740        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
741    {
742        pub fn new(inner: T) -> Self {
743            let inner = tonic::client::Grpc::new(inner);
744            Self { inner }
745        }
746        pub fn with_origin(inner: T, origin: Uri) -> Self {
747            let inner = tonic::client::Grpc::with_origin(inner, origin);
748            Self { inner }
749        }
750        pub fn with_interceptor<F>(
751            inner: T,
752            interceptor: F,
753        ) -> WorkbookAgentServiceClient<InterceptedService<T, F>>
754        where
755            F: tonic::service::Interceptor,
756            T::ResponseBody: Default,
757            T: tonic::codegen::Service<
758                http::Request<tonic::body::Body>,
759                Response = http::Response<
760                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
761                >,
762            >,
763            <T as tonic::codegen::Service<
764                http::Request<tonic::body::Body>,
765            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
766        {
767            WorkbookAgentServiceClient::new(InterceptedService::new(inner, interceptor))
768        }
769        /// Compress requests with the given encoding.
770        ///
771        /// This requires the server to support it otherwise it might respond with an
772        /// error.
773        #[must_use]
774        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
775            self.inner = self.inner.send_compressed(encoding);
776            self
777        }
778        /// Enable decompressing responses.
779        #[must_use]
780        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
781            self.inner = self.inner.accept_compressed(encoding);
782            self
783        }
784        /// Limits the maximum size of a decoded message.
785        ///
786        /// Default: `4MB`
787        #[must_use]
788        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
789            self.inner = self.inner.max_decoding_message_size(limit);
790            self
791        }
792        /// Limits the maximum size of an encoded message.
793        ///
794        /// Default: `usize::MAX`
795        #[must_use]
796        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
797            self.inner = self.inner.max_encoding_message_size(limit);
798            self
799        }
800        /// StreamChat handles bidirectional streaming chat for workbook AI agent
801        pub async fn stream_chat(
802            &mut self,
803            request: impl tonic::IntoRequest<
804                super::WorkbookAgentServiceStreamChatRequest,
805            >,
806        ) -> std::result::Result<
807            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
808            tonic::Status,
809        > {
810            self.inner
811                .ready()
812                .await
813                .map_err(|e| {
814                    tonic::Status::unknown(
815                        format!("Service was not ready: {}", e.into()),
816                    )
817                })?;
818            let codec = tonic::codec::ProstCodec::default();
819            let path = http::uri::PathAndQuery::from_static(
820                "/nominal.ai.v1.WorkbookAgentService/StreamChat",
821            );
822            let mut req = request.into_request();
823            req.extensions_mut()
824                .insert(
825                    GrpcMethod::new("nominal.ai.v1.WorkbookAgentService", "StreamChat"),
826                );
827            self.inner.server_streaming(req, path, codec).await
828        }
829        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
830        pub async fn get_conversation(
831            &mut self,
832            request: impl tonic::IntoRequest<super::GetConversationRequest>,
833        ) -> std::result::Result<
834            tonic::Response<super::GetConversationResponse>,
835            tonic::Status,
836        > {
837            self.inner
838                .ready()
839                .await
840                .map_err(|e| {
841                    tonic::Status::unknown(
842                        format!("Service was not ready: {}", e.into()),
843                    )
844                })?;
845            let codec = tonic::codec::ProstCodec::default();
846            let path = http::uri::PathAndQuery::from_static(
847                "/nominal.ai.v1.WorkbookAgentService/GetConversation",
848            );
849            let mut req = request.into_request();
850            req.extensions_mut()
851                .insert(
852                    GrpcMethod::new(
853                        "nominal.ai.v1.WorkbookAgentService",
854                        "GetConversation",
855                    ),
856                );
857            self.inner.unary(req, path, codec).await
858        }
859        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
860        pub async fn list_conversations(
861            &mut self,
862            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
863        ) -> std::result::Result<
864            tonic::Response<super::ListConversationsResponse>,
865            tonic::Status,
866        > {
867            self.inner
868                .ready()
869                .await
870                .map_err(|e| {
871                    tonic::Status::unknown(
872                        format!("Service was not ready: {}", e.into()),
873                    )
874                })?;
875            let codec = tonic::codec::ProstCodec::default();
876            let path = http::uri::PathAndQuery::from_static(
877                "/nominal.ai.v1.WorkbookAgentService/ListConversations",
878            );
879            let mut req = request.into_request();
880            req.extensions_mut()
881                .insert(
882                    GrpcMethod::new(
883                        "nominal.ai.v1.WorkbookAgentService",
884                        "ListConversations",
885                    ),
886                );
887            self.inner.unary(req, path, codec).await
888        }
889        /// CreateConversation handles creating a conversation and assigning it a conversation rid
890        pub async fn create_conversation(
891            &mut self,
892            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
893        ) -> std::result::Result<
894            tonic::Response<super::CreateConversationResponse>,
895            tonic::Status,
896        > {
897            self.inner
898                .ready()
899                .await
900                .map_err(|e| {
901                    tonic::Status::unknown(
902                        format!("Service was not ready: {}", e.into()),
903                    )
904                })?;
905            let codec = tonic::codec::ProstCodec::default();
906            let path = http::uri::PathAndQuery::from_static(
907                "/nominal.ai.v1.WorkbookAgentService/CreateConversation",
908            );
909            let mut req = request.into_request();
910            req.extensions_mut()
911                .insert(
912                    GrpcMethod::new(
913                        "nominal.ai.v1.WorkbookAgentService",
914                        "CreateConversation",
915                    ),
916                );
917            self.inner.unary(req, path, codec).await
918        }
919        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
920        pub async fn update_conversation_metadata(
921            &mut self,
922            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
923        ) -> std::result::Result<
924            tonic::Response<super::UpdateConversationMetadataResponse>,
925            tonic::Status,
926        > {
927            self.inner
928                .ready()
929                .await
930                .map_err(|e| {
931                    tonic::Status::unknown(
932                        format!("Service was not ready: {}", e.into()),
933                    )
934                })?;
935            let codec = tonic::codec::ProstCodec::default();
936            let path = http::uri::PathAndQuery::from_static(
937                "/nominal.ai.v1.WorkbookAgentService/UpdateConversationMetadata",
938            );
939            let mut req = request.into_request();
940            req.extensions_mut()
941                .insert(
942                    GrpcMethod::new(
943                        "nominal.ai.v1.WorkbookAgentService",
944                        "UpdateConversationMetadata",
945                    ),
946                );
947            self.inner.unary(req, path, codec).await
948        }
949        /// DeleteConversation handles deleting a specific conversation by conversation rid
950        pub async fn delete_conversation(
951            &mut self,
952            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
953        ) -> std::result::Result<
954            tonic::Response<super::DeleteConversationResponse>,
955            tonic::Status,
956        > {
957            self.inner
958                .ready()
959                .await
960                .map_err(|e| {
961                    tonic::Status::unknown(
962                        format!("Service was not ready: {}", e.into()),
963                    )
964                })?;
965            let codec = tonic::codec::ProstCodec::default();
966            let path = http::uri::PathAndQuery::from_static(
967                "/nominal.ai.v1.WorkbookAgentService/DeleteConversation",
968            );
969            let mut req = request.into_request();
970            req.extensions_mut()
971                .insert(
972                    GrpcMethod::new(
973                        "nominal.ai.v1.WorkbookAgentService",
974                        "DeleteConversation",
975                    ),
976                );
977            self.inner.unary(req, path, codec).await
978        }
979    }
980}
981#[derive(Clone, Copy, PartialEq, ::prost::Message)]
982pub struct IsAiEnabledForUserRequest {}
983#[derive(Clone, Copy, PartialEq, ::prost::Message)]
984pub struct IsAiEnabledForUserResponse {
985    #[prost(bool, tag = "1")]
986    pub is_enabled: bool,
987}
988/// Generated client implementations.
989pub mod ai_features_service_client {
990    #![allow(
991        unused_variables,
992        dead_code,
993        missing_docs,
994        clippy::wildcard_imports,
995        clippy::let_unit_value,
996    )]
997    use tonic::codegen::*;
998    use tonic::codegen::http::Uri;
999    /// AIFeaturesService provides information about enabled AI features
1000    #[derive(Debug, Clone)]
1001    pub struct AiFeaturesServiceClient<T> {
1002        inner: tonic::client::Grpc<T>,
1003    }
1004    impl AiFeaturesServiceClient<tonic::transport::Channel> {
1005        /// Attempt to create a new client by connecting to a given endpoint.
1006        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1007        where
1008            D: TryInto<tonic::transport::Endpoint>,
1009            D::Error: Into<StdError>,
1010        {
1011            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1012            Ok(Self::new(conn))
1013        }
1014    }
1015    impl<T> AiFeaturesServiceClient<T>
1016    where
1017        T: tonic::client::GrpcService<tonic::body::Body>,
1018        T::Error: Into<StdError>,
1019        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1020        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1021    {
1022        pub fn new(inner: T) -> Self {
1023            let inner = tonic::client::Grpc::new(inner);
1024            Self { inner }
1025        }
1026        pub fn with_origin(inner: T, origin: Uri) -> Self {
1027            let inner = tonic::client::Grpc::with_origin(inner, origin);
1028            Self { inner }
1029        }
1030        pub fn with_interceptor<F>(
1031            inner: T,
1032            interceptor: F,
1033        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
1034        where
1035            F: tonic::service::Interceptor,
1036            T::ResponseBody: Default,
1037            T: tonic::codegen::Service<
1038                http::Request<tonic::body::Body>,
1039                Response = http::Response<
1040                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1041                >,
1042            >,
1043            <T as tonic::codegen::Service<
1044                http::Request<tonic::body::Body>,
1045            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1046        {
1047            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
1048        }
1049        /// Compress requests with the given encoding.
1050        ///
1051        /// This requires the server to support it otherwise it might respond with an
1052        /// error.
1053        #[must_use]
1054        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1055            self.inner = self.inner.send_compressed(encoding);
1056            self
1057        }
1058        /// Enable decompressing responses.
1059        #[must_use]
1060        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1061            self.inner = self.inner.accept_compressed(encoding);
1062            self
1063        }
1064        /// Limits the maximum size of a decoded message.
1065        ///
1066        /// Default: `4MB`
1067        #[must_use]
1068        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1069            self.inner = self.inner.max_decoding_message_size(limit);
1070            self
1071        }
1072        /// Limits the maximum size of an encoded message.
1073        ///
1074        /// Default: `usize::MAX`
1075        #[must_use]
1076        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1077            self.inner = self.inner.max_encoding_message_size(limit);
1078            self
1079        }
1080        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
1081        pub async fn is_ai_enabled_for_user(
1082            &mut self,
1083            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
1084        ) -> std::result::Result<
1085            tonic::Response<super::IsAiEnabledForUserResponse>,
1086            tonic::Status,
1087        > {
1088            self.inner
1089                .ready()
1090                .await
1091                .map_err(|e| {
1092                    tonic::Status::unknown(
1093                        format!("Service was not ready: {}", e.into()),
1094                    )
1095                })?;
1096            let codec = tonic::codec::ProstCodec::default();
1097            let path = http::uri::PathAndQuery::from_static(
1098                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
1099            );
1100            let mut req = request.into_request();
1101            req.extensions_mut()
1102                .insert(
1103                    GrpcMethod::new(
1104                        "nominal.ai.v1.AIFeaturesService",
1105                        "IsAIEnabledForUser",
1106                    ),
1107                );
1108            self.inner.unary(req, path, codec).await
1109        }
1110    }
1111}
1112/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
1113/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
1114#[derive(Clone, PartialEq, ::prost::Message)]
1115pub struct CreateOrUpdateKnowledgeBaseRequest {
1116    #[prost(string, tag = "1")]
1117    pub attachment_rid: ::prost::alloc::string::String,
1118    /// summary of the knowledge base, will be used by the LLM to decide when to use it
1119    #[prost(string, tag = "2")]
1120    pub summary_description: ::prost::alloc::string::String,
1121    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
1122    pub r#type: ::core::option::Option<i32>,
1123}
1124/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
1125#[derive(Clone, PartialEq, ::prost::Message)]
1126pub struct CreateOrUpdateKnowledgeBaseResponse {
1127    #[prost(string, tag = "1")]
1128    pub knowledge_base_rid: ::prost::alloc::string::String,
1129}
1130/// KnowledgeBase represents a knowledge base entry
1131#[derive(Clone, PartialEq, ::prost::Message)]
1132pub struct KnowledgeBase {
1133    #[prost(string, tag = "1")]
1134    pub knowledge_base_rid: ::prost::alloc::string::String,
1135    #[prost(string, tag = "2")]
1136    pub attachment_rid: ::prost::alloc::string::String,
1137    #[prost(string, tag = "3")]
1138    pub workspace_rid: ::prost::alloc::string::String,
1139    #[prost(string, tag = "4")]
1140    pub summary_description: ::prost::alloc::string::String,
1141    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
1142    pub r#type: i32,
1143    #[prost(int32, tag = "6")]
1144    pub version: i32,
1145}
1146#[derive(Clone, PartialEq, ::prost::Message)]
1147pub struct ListRequest {
1148    #[prost(string, tag = "1")]
1149    pub workspace_rid: ::prost::alloc::string::String,
1150}
1151#[derive(Clone, PartialEq, ::prost::Message)]
1152pub struct ListResponse {
1153    #[prost(message, repeated, tag = "1")]
1154    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1155}
1156#[derive(Clone, PartialEq, ::prost::Message)]
1157pub struct DeleteRequest {
1158    #[prost(string, tag = "1")]
1159    pub knowledge_base_rid: ::prost::alloc::string::String,
1160}
1161#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1162pub struct DeleteResponse {
1163    #[prost(bool, tag = "1")]
1164    pub success: bool,
1165}
1166#[derive(Clone, PartialEq, ::prost::Message)]
1167pub struct GetBatchRequest {
1168    #[prost(string, repeated, tag = "1")]
1169    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1170}
1171#[derive(Clone, PartialEq, ::prost::Message)]
1172pub struct GetBatchResponse {
1173    #[prost(message, repeated, tag = "1")]
1174    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1175}
1176/// generate summary description is intentionally going to return the generated description to the frontend
1177/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1178#[derive(Clone, PartialEq, ::prost::Message)]
1179pub struct GenerateSummaryDescriptionRequest {
1180    #[prost(string, tag = "1")]
1181    pub attachment_rid: ::prost::alloc::string::String,
1182}
1183#[derive(Clone, PartialEq, ::prost::Message)]
1184pub struct GenerateSummaryDescriptionResponse {
1185    #[prost(string, tag = "1")]
1186    pub summary_description: ::prost::alloc::string::String,
1187}
1188/// KnowledgeBaseType defines the types of knowledge base
1189#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1190#[repr(i32)]
1191pub enum KnowledgeBaseType {
1192    /// defaults to PROMPT
1193    Unspecified = 0,
1194    /// knowledge base gets added directly to prompt (needs to be small enough!)
1195    Prompt = 1,
1196    /// knowledge base gets used via vector search on embeddings
1197    Embedding = 2,
1198}
1199impl KnowledgeBaseType {
1200    /// String value of the enum field names used in the ProtoBuf definition.
1201    ///
1202    /// The values are not transformed in any way and thus are considered stable
1203    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1204    pub fn as_str_name(&self) -> &'static str {
1205        match self {
1206            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1207            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1208            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1209        }
1210    }
1211    /// Creates an enum from field names used in the ProtoBuf definition.
1212    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1213        match value {
1214            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1215            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1216            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1217            _ => None,
1218        }
1219    }
1220}
1221/// Generated client implementations.
1222pub mod knowledge_base_service_client {
1223    #![allow(
1224        unused_variables,
1225        dead_code,
1226        missing_docs,
1227        clippy::wildcard_imports,
1228        clippy::let_unit_value,
1229    )]
1230    use tonic::codegen::*;
1231    use tonic::codegen::http::Uri;
1232    /// KnowledgeBaseService provides AI-powered knowledge base management
1233    #[derive(Debug, Clone)]
1234    pub struct KnowledgeBaseServiceClient<T> {
1235        inner: tonic::client::Grpc<T>,
1236    }
1237    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1238        /// Attempt to create a new client by connecting to a given endpoint.
1239        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1240        where
1241            D: TryInto<tonic::transport::Endpoint>,
1242            D::Error: Into<StdError>,
1243        {
1244            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1245            Ok(Self::new(conn))
1246        }
1247    }
1248    impl<T> KnowledgeBaseServiceClient<T>
1249    where
1250        T: tonic::client::GrpcService<tonic::body::Body>,
1251        T::Error: Into<StdError>,
1252        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1253        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1254    {
1255        pub fn new(inner: T) -> Self {
1256            let inner = tonic::client::Grpc::new(inner);
1257            Self { inner }
1258        }
1259        pub fn with_origin(inner: T, origin: Uri) -> Self {
1260            let inner = tonic::client::Grpc::with_origin(inner, origin);
1261            Self { inner }
1262        }
1263        pub fn with_interceptor<F>(
1264            inner: T,
1265            interceptor: F,
1266        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1267        where
1268            F: tonic::service::Interceptor,
1269            T::ResponseBody: Default,
1270            T: tonic::codegen::Service<
1271                http::Request<tonic::body::Body>,
1272                Response = http::Response<
1273                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1274                >,
1275            >,
1276            <T as tonic::codegen::Service<
1277                http::Request<tonic::body::Body>,
1278            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1279        {
1280            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1281        }
1282        /// Compress requests with the given encoding.
1283        ///
1284        /// This requires the server to support it otherwise it might respond with an
1285        /// error.
1286        #[must_use]
1287        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1288            self.inner = self.inner.send_compressed(encoding);
1289            self
1290        }
1291        /// Enable decompressing responses.
1292        #[must_use]
1293        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1294            self.inner = self.inner.accept_compressed(encoding);
1295            self
1296        }
1297        /// Limits the maximum size of a decoded message.
1298        ///
1299        /// Default: `4MB`
1300        #[must_use]
1301        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1302            self.inner = self.inner.max_decoding_message_size(limit);
1303            self
1304        }
1305        /// Limits the maximum size of an encoded message.
1306        ///
1307        /// Default: `usize::MAX`
1308        #[must_use]
1309        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1310            self.inner = self.inner.max_encoding_message_size(limit);
1311            self
1312        }
1313        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1314        pub async fn create_or_update_knowledge_base(
1315            &mut self,
1316            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1317        ) -> std::result::Result<
1318            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1319            tonic::Status,
1320        > {
1321            self.inner
1322                .ready()
1323                .await
1324                .map_err(|e| {
1325                    tonic::Status::unknown(
1326                        format!("Service was not ready: {}", e.into()),
1327                    )
1328                })?;
1329            let codec = tonic::codec::ProstCodec::default();
1330            let path = http::uri::PathAndQuery::from_static(
1331                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1332            );
1333            let mut req = request.into_request();
1334            req.extensions_mut()
1335                .insert(
1336                    GrpcMethod::new(
1337                        "nominal.ai.v1.KnowledgeBaseService",
1338                        "CreateOrUpdateKnowledgeBase",
1339                    ),
1340                );
1341            self.inner.unary(req, path, codec).await
1342        }
1343        /// List returns all knowledge bases in the specified workspace
1344        pub async fn list(
1345            &mut self,
1346            request: impl tonic::IntoRequest<super::ListRequest>,
1347        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1348            self.inner
1349                .ready()
1350                .await
1351                .map_err(|e| {
1352                    tonic::Status::unknown(
1353                        format!("Service was not ready: {}", e.into()),
1354                    )
1355                })?;
1356            let codec = tonic::codec::ProstCodec::default();
1357            let path = http::uri::PathAndQuery::from_static(
1358                "/nominal.ai.v1.KnowledgeBaseService/List",
1359            );
1360            let mut req = request.into_request();
1361            req.extensions_mut()
1362                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1363            self.inner.unary(req, path, codec).await
1364        }
1365        /// Delete removes a knowledge base by its RID
1366        pub async fn delete(
1367            &mut self,
1368            request: impl tonic::IntoRequest<super::DeleteRequest>,
1369        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1370            self.inner
1371                .ready()
1372                .await
1373                .map_err(|e| {
1374                    tonic::Status::unknown(
1375                        format!("Service was not ready: {}", e.into()),
1376                    )
1377                })?;
1378            let codec = tonic::codec::ProstCodec::default();
1379            let path = http::uri::PathAndQuery::from_static(
1380                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1381            );
1382            let mut req = request.into_request();
1383            req.extensions_mut()
1384                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1385            self.inner.unary(req, path, codec).await
1386        }
1387        /// GetBatch retrieves multiple knowledge bases by their RIDs
1388        pub async fn get_batch(
1389            &mut self,
1390            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1391        ) -> std::result::Result<
1392            tonic::Response<super::GetBatchResponse>,
1393            tonic::Status,
1394        > {
1395            self.inner
1396                .ready()
1397                .await
1398                .map_err(|e| {
1399                    tonic::Status::unknown(
1400                        format!("Service was not ready: {}", e.into()),
1401                    )
1402                })?;
1403            let codec = tonic::codec::ProstCodec::default();
1404            let path = http::uri::PathAndQuery::from_static(
1405                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1406            );
1407            let mut req = request.into_request();
1408            req.extensions_mut()
1409                .insert(
1410                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1411                );
1412            self.inner.unary(req, path, codec).await
1413        }
1414        /// GenerateSummaryDescription generates a summary description for an attachment rid
1415        pub async fn generate_summary_description(
1416            &mut self,
1417            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1418        ) -> std::result::Result<
1419            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1420            tonic::Status,
1421        > {
1422            self.inner
1423                .ready()
1424                .await
1425                .map_err(|e| {
1426                    tonic::Status::unknown(
1427                        format!("Service was not ready: {}", e.into()),
1428                    )
1429                })?;
1430            let codec = tonic::codec::ProstCodec::default();
1431            let path = http::uri::PathAndQuery::from_static(
1432                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1433            );
1434            let mut req = request.into_request();
1435            req.extensions_mut()
1436                .insert(
1437                    GrpcMethod::new(
1438                        "nominal.ai.v1.KnowledgeBaseService",
1439                        "GenerateSummaryDescription",
1440                    ),
1441                );
1442            self.inner.unary(req, path, codec).await
1443        }
1444    }
1445}