nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    /// The conversation ID
6    #[prost(string, tag = "1")]
7    pub conversation_rid: ::prost::alloc::string::String,
8    /// The user message to append to the conversation
9    #[prost(message, optional, tag = "2")]
10    pub message: ::core::option::Option<UserModelMessage>,
11    /// Optional: image files to provide to the agent
12    #[prost(message, repeated, tag = "3")]
13    pub images: ::prost::alloc::vec::Vec<ImagePart>,
14    /// Context-specific fields based on the oneofKind.
15    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
16    pub context: ::core::option::Option<stream_chat_request::Context>,
17}
18/// Nested message and enum types in `StreamChatRequest`.
19pub mod stream_chat_request {
20    /// Context-specific fields based on the oneofKind.
21    #[derive(Clone, PartialEq, ::prost::Oneof)]
22    pub enum Context {
23        #[prost(message, tag = "4")]
24        Workbook(super::WorkbookContext),
25        #[prost(message, tag = "5")]
26        Global(super::GlobalContext),
27    }
28}
29/// WorkbookContext contains workbook-specific context fields
30#[derive(Clone, PartialEq, ::prost::Message)]
31pub struct WorkbookContext {
32    /// RID of the workbook to use for context
33    #[prost(string, tag = "1")]
34    pub workbook_rid: ::prost::alloc::string::String,
35    /// Optional: the user's presence in the workbook
36    #[prost(message, optional, tag = "2")]
37    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
38}
39/// DefaultContext (no context)
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct GlobalContext {}
42/// WorkbookUserPresence contains the user's presence in the workbook
43/// which is used to describe what the user is viewing at the time of the message.
44#[derive(Clone, Copy, PartialEq, ::prost::Message)]
45pub struct WorkbookUserPresence {
46    #[prost(int32, optional, tag = "1")]
47    pub tab_index: ::core::option::Option<i32>,
48    #[prost(message, optional, tag = "2")]
49    pub range: ::core::option::Option<TimeRange>,
50}
51/// CreateConversation request will create a new conversation thread
52/// if old conversation id is not set, a brand new, clear chat is created
53/// If old conversation id is set without a previous message id, the full conversation thread will be copied
54/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
55/// the above case is useful for branching a conversation into a new thread
56#[derive(Clone, PartialEq, ::prost::Message)]
57pub struct CreateConversationRequest {
58    #[prost(string, tag = "1")]
59    pub title: ::prost::alloc::string::String,
60    #[prost(string, tag = "2")]
61    pub workspace_rid: ::prost::alloc::string::String,
62    #[prost(string, optional, tag = "3")]
63    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
64    #[prost(string, optional, tag = "4")]
65    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
66}
67/// CreateConversationResponse will return the conversation id for the new conversation
68#[derive(Clone, PartialEq, ::prost::Message)]
69pub struct CreateConversationResponse {
70    #[prost(string, tag = "1")]
71    pub new_conversation_rid: ::prost::alloc::string::String,
72}
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct UpdateConversationMetadataRequest {
75    #[prost(string, tag = "1")]
76    pub title: ::prost::alloc::string::String,
77    #[prost(string, tag = "2")]
78    pub conversation_rid: ::prost::alloc::string::String,
79}
80#[derive(Clone, Copy, PartialEq, ::prost::Message)]
81pub struct UpdateConversationMetadataResponse {}
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct DeleteConversationRequest {
84    #[prost(string, tag = "1")]
85    pub conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, Copy, PartialEq, ::prost::Message)]
88pub struct DeleteConversationResponse {}
89/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
90/// by provided rid. To start from a particular message - you can also provide a message id.
91#[derive(Clone, PartialEq, ::prost::Message)]
92pub struct GetConversationRequest {
93    #[prost(string, tag = "1")]
94    pub conversation_rid: ::prost::alloc::string::String,
95    #[prost(string, optional, tag = "2")]
96    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
97    #[prost(int32, optional, tag = "3")]
98    pub max_message_count: ::core::option::Option<i32>,
99}
100/// Model message with id allows you to identify the message ID of a given message
101#[derive(Clone, PartialEq, ::prost::Message)]
102pub struct ModelMessageWithId {
103    #[prost(string, tag = "3")]
104    pub message_id: ::prost::alloc::string::String,
105    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
106    pub content: ::core::option::Option<model_message_with_id::Content>,
107}
108/// Nested message and enum types in `ModelMessageWithId`.
109pub mod model_message_with_id {
110    #[derive(Clone, PartialEq, ::prost::Oneof)]
111    pub enum Content {
112        #[prost(message, tag = "1")]
113        Message(super::ModelMessage),
114        #[prost(message, tag = "2")]
115        ToolAction(super::ToolAction),
116    }
117}
118#[derive(Clone, PartialEq, ::prost::Message)]
119pub struct GetConversationResponse {
120    #[prost(message, repeated, tag = "1")]
121    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
122    #[prost(message, optional, tag = "2")]
123    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
124}
125/// Will generate all conversation threads that this user has in this workspace
126#[derive(Clone, PartialEq, ::prost::Message)]
127pub struct ListConversationsRequest {
128    #[prost(string, tag = "1")]
129    pub workspace_rid: ::prost::alloc::string::String,
130}
131#[derive(Clone, PartialEq, ::prost::Message)]
132pub struct ConversationMetadata {
133    #[prost(string, tag = "1")]
134    pub conversation_rid: ::prost::alloc::string::String,
135    #[prost(string, tag = "2")]
136    pub title: ::prost::alloc::string::String,
137    #[prost(message, optional, tag = "3")]
138    pub created_at: ::core::option::Option<
139        super::super::super::google::protobuf::Timestamp,
140    >,
141    #[prost(message, optional, tag = "4")]
142    pub last_updated_at: ::core::option::Option<
143        super::super::super::google::protobuf::Timestamp,
144    >,
145}
146/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
147/// to get a full conversation from storage. These are ordered by creation time.
148#[derive(Clone, PartialEq, ::prost::Message)]
149pub struct ListConversationsResponse {
150    #[prost(message, repeated, tag = "1")]
151    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
152}
153#[derive(Clone, Copy, PartialEq, ::prost::Message)]
154pub struct TimeRange {
155    #[prost(message, optional, tag = "1")]
156    pub range_start: ::core::option::Option<Timestamp>,
157    #[prost(message, optional, tag = "2")]
158    pub range_end: ::core::option::Option<Timestamp>,
159}
160#[derive(Clone, Copy, PartialEq, ::prost::Message)]
161pub struct Timestamp {
162    #[prost(int32, tag = "1")]
163    pub seconds: i32,
164    #[prost(int32, tag = "2")]
165    pub nanoseconds: i32,
166}
167/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
168/// Each message type has its own structure and content.
169#[derive(Clone, PartialEq, ::prost::Message)]
170pub struct ModelMessage {
171    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
172    pub kind: ::core::option::Option<model_message::Kind>,
173}
174/// Nested message and enum types in `ModelMessage`.
175pub mod model_message {
176    #[derive(Clone, PartialEq, ::prost::Oneof)]
177    pub enum Kind {
178        #[prost(message, tag = "1")]
179        User(super::UserModelMessage),
180        #[prost(message, tag = "2")]
181        Assistant(super::AssistantModelMessage),
182    }
183}
184/// A user message containing text
185#[derive(Clone, PartialEq, ::prost::Message)]
186pub struct UserModelMessage {
187    #[prost(message, repeated, tag = "1")]
188    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
189}
190/// An assistant message containing text
191#[derive(Clone, PartialEq, ::prost::Message)]
192pub struct AssistantModelMessage {
193    #[prost(message, repeated, tag = "1")]
194    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
195}
196#[derive(Clone, PartialEq, ::prost::Message)]
197pub struct UserContentPart {
198    #[prost(oneof = "user_content_part::Part", tags = "1")]
199    pub part: ::core::option::Option<user_content_part::Part>,
200}
201/// Nested message and enum types in `UserContentPart`.
202pub mod user_content_part {
203    #[derive(Clone, PartialEq, ::prost::Oneof)]
204    pub enum Part {
205        #[prost(message, tag = "1")]
206        Text(super::TextPart),
207    }
208}
209/// Content part for assistant messages: can be text, reasoning, or mutation.
210#[derive(Clone, PartialEq, ::prost::Message)]
211pub struct AssistantContentPart {
212    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
213    pub part: ::core::option::Option<assistant_content_part::Part>,
214}
215/// Nested message and enum types in `AssistantContentPart`.
216pub mod assistant_content_part {
217    #[derive(Clone, PartialEq, ::prost::Oneof)]
218    pub enum Part {
219        #[prost(message, tag = "1")]
220        Text(super::TextPart),
221        #[prost(message, tag = "2")]
222        Reasoning(super::ReasoningPart),
223    }
224}
225/// Text part for user or assistant messages.
226#[derive(Clone, PartialEq, ::prost::Message)]
227pub struct TextPart {
228    #[prost(string, tag = "1")]
229    pub text: ::prost::alloc::string::String,
230}
231/// User-supplied image part.
232#[derive(Clone, PartialEq, ::prost::Message)]
233pub struct ImagePart {
234    /// The base64-encoded image data
235    #[prost(bytes = "vec", tag = "1")]
236    pub data: ::prost::alloc::vec::Vec<u8>,
237    /// The media type of the image (e.g. "image/png", "image/jpeg")
238    #[prost(string, optional, tag = "2")]
239    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
240    /// Optional: the filename of the image
241    #[prost(string, optional, tag = "3")]
242    pub filename: ::core::option::Option<::prost::alloc::string::String>,
243}
244/// Reasoning part for assistant messages.
245#[derive(Clone, PartialEq, ::prost::Message)]
246pub struct ReasoningPart {
247    #[prost(string, tag = "1")]
248    pub reasoning: ::prost::alloc::string::String,
249}
250/// StreamChatResponse is a discriminated union response to a StreamChatRequest
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct StreamChatResponse {
253    #[prost(
254        oneof = "stream_chat_response::Response",
255        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
256    )]
257    pub response: ::core::option::Option<stream_chat_response::Response>,
258}
259/// Nested message and enum types in `StreamChatResponse`.
260pub mod stream_chat_response {
261    #[derive(Clone, PartialEq, ::prost::Oneof)]
262    pub enum Response {
263        #[prost(message, tag = "1")]
264        Finish(super::Finish),
265        #[prost(message, tag = "2")]
266        Error(super::Error),
267        #[prost(message, tag = "3")]
268        TextStart(super::TextStart),
269        #[prost(message, tag = "4")]
270        TextDelta(super::TextDelta),
271        #[prost(message, tag = "5")]
272        TextEnd(super::TextEnd),
273        #[prost(message, tag = "6")]
274        ReasoningStart(super::ReasoningStart),
275        #[prost(message, tag = "7")]
276        ReasoningDelta(super::ReasoningDelta),
277        #[prost(message, tag = "8")]
278        ReasoningEnd(super::ReasoningEnd),
279        /// this will be deprecated in favor of MCP-based mutations
280        #[prost(message, tag = "9")]
281        WorkbookMutation(super::WorkbookMutation),
282        #[prost(message, tag = "10")]
283        ToolAction(super::ToolAction),
284    }
285}
286/// Indicates the end of a chat session
287#[derive(Clone, PartialEq, ::prost::Message)]
288pub struct Finish {
289    /// The message ids in order of all generated messages for this agent run
290    /// These ids can be used to branch a message from that specific message
291    #[prost(string, repeated, tag = "1")]
292    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
293    /// In the case that this is the first agent run in a conversation thread, we also
294    /// return the new conversation title generated
295    #[prost(string, optional, tag = "2")]
296    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
297}
298/// An error that occurred during the chat session
299#[derive(Clone, PartialEq, ::prost::Message)]
300pub struct Error {
301    #[prost(string, tag = "1")]
302    pub message: ::prost::alloc::string::String,
303}
304/// Indicates the start of a text message from the agent
305#[derive(Clone, PartialEq, ::prost::Message)]
306pub struct TextStart {
307    /// uniquely identifies the text message (e.g. uuid) so that the client can
308    /// merge parallel message streams (if it happens).
309    #[prost(string, tag = "1")]
310    pub id: ::prost::alloc::string::String,
311}
312/// A delta (continuation) of a text message from the agent
313#[derive(Clone, PartialEq, ::prost::Message)]
314pub struct TextDelta {
315    #[prost(string, tag = "1")]
316    pub id: ::prost::alloc::string::String,
317    /// The next chunk of text
318    #[prost(string, tag = "2")]
319    pub delta: ::prost::alloc::string::String,
320}
321/// Indicates the end of a text message from the agent
322#[derive(Clone, PartialEq, ::prost::Message)]
323pub struct TextEnd {
324    #[prost(string, tag = "1")]
325    pub id: ::prost::alloc::string::String,
326}
327/// Indicates the start of a reasoning message from the agent
328#[derive(Clone, PartialEq, ::prost::Message)]
329pub struct ReasoningStart {
330    #[prost(string, tag = "1")]
331    pub id: ::prost::alloc::string::String,
332}
333/// A delta (continuation) of a reasoning message from the agent
334#[derive(Clone, PartialEq, ::prost::Message)]
335pub struct ReasoningDelta {
336    #[prost(string, tag = "1")]
337    pub id: ::prost::alloc::string::String,
338    /// The next chunk of reasoning
339    #[prost(string, tag = "2")]
340    pub delta: ::prost::alloc::string::String,
341}
342/// Indicates the end of a reasoning message from the agent
343#[derive(Clone, PartialEq, ::prost::Message)]
344pub struct ReasoningEnd {
345    #[prost(string, tag = "1")]
346    pub id: ::prost::alloc::string::String,
347}
348/// Add a new tab to the workbook
349#[derive(Clone, PartialEq, ::prost::Message)]
350pub struct AddTabMutation {
351    /// if tab_name is not provided, we'll name it "New Tab"
352    #[prost(string, optional, tag = "1")]
353    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
354}
355/// this is the "result" of the mutation
356#[derive(Clone, PartialEq, ::prost::Message)]
357pub struct AddOrUpdatePanelMutation {
358    /// JSON-serialized representation of IVizDefinition
359    #[prost(string, tag = "1")]
360    pub panel_as_json: ::prost::alloc::string::String,
361    #[prost(string, tag = "2")]
362    pub panel_id: ::prost::alloc::string::String,
363    #[prost(int32, tag = "3")]
364    pub tab_index: i32,
365}
366#[derive(Clone, PartialEq, ::prost::Message)]
367pub struct RemovePanelsMutation {
368    #[prost(string, repeated, tag = "1")]
369    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
370}
371/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
372#[derive(Clone, PartialEq, ::prost::Message)]
373pub struct AddOrReplaceVariableMutation {
374    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
375    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
376    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
377    #[prost(string, tag = "1")]
378    pub compute_spec_as_json: ::prost::alloc::string::String,
379    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
380    #[prost(string, optional, tag = "2")]
381    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
382    #[prost(string, optional, tag = "3")]
383    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
384}
385/// DeleteVariablesMutation is a mutation to delete variables from the workbook
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct DeleteVariablesMutation {
388    #[prost(string, repeated, tag = "1")]
389    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
390}
391/// WorkbookMutation is a mutation to the workbook
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct WorkbookMutation {
394    #[prost(string, tag = "1")]
395    pub id: ::prost::alloc::string::String,
396    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
397    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
398}
399/// Nested message and enum types in `WorkbookMutation`.
400pub mod workbook_mutation {
401    #[derive(Clone, PartialEq, ::prost::Oneof)]
402    pub enum Mutation {
403        #[prost(message, tag = "2")]
404        AddTab(super::AddTabMutation),
405        #[prost(message, tag = "3")]
406        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
407        #[prost(message, tag = "4")]
408        RemovePanels(super::RemovePanelsMutation),
409        #[prost(message, tag = "5")]
410        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
411        #[prost(message, tag = "6")]
412        DeleteVariables(super::DeleteVariablesMutation),
413    }
414}
415/// this is a concise description of a tool call that the agent is making internally
416/// without revealing too much detail about the tool call, it informs the user what the agent is doing
417/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
418/// "Search channels for My Datasource"
419#[derive(Clone, PartialEq, ::prost::Message)]
420pub struct ToolAction {
421    #[prost(string, tag = "1")]
422    pub id: ::prost::alloc::string::String,
423    /// "Thought", "Read", "Find", "Look-up", etc.
424    #[prost(string, tag = "2")]
425    pub tool_action_verb: ::prost::alloc::string::String,
426    /// "workbook", "channel", "variable", "panel", etc.
427    #[prost(string, optional, tag = "3")]
428    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
429}
430/// Generated client implementations.
431pub mod ai_agent_service_client {
432    #![allow(
433        unused_variables,
434        dead_code,
435        missing_docs,
436        clippy::wildcard_imports,
437        clippy::let_unit_value,
438    )]
439    use tonic::codegen::*;
440    use tonic::codegen::http::Uri;
441    /// AIAgentService provides AI-powered assistance for general operations
442    #[derive(Debug, Clone)]
443    pub struct AiAgentServiceClient<T> {
444        inner: tonic::client::Grpc<T>,
445    }
446    impl AiAgentServiceClient<tonic::transport::Channel> {
447        /// Attempt to create a new client by connecting to a given endpoint.
448        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
449        where
450            D: TryInto<tonic::transport::Endpoint>,
451            D::Error: Into<StdError>,
452        {
453            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
454            Ok(Self::new(conn))
455        }
456    }
457    impl<T> AiAgentServiceClient<T>
458    where
459        T: tonic::client::GrpcService<tonic::body::Body>,
460        T::Error: Into<StdError>,
461        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
462        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
463    {
464        pub fn new(inner: T) -> Self {
465            let inner = tonic::client::Grpc::new(inner);
466            Self { inner }
467        }
468        pub fn with_origin(inner: T, origin: Uri) -> Self {
469            let inner = tonic::client::Grpc::with_origin(inner, origin);
470            Self { inner }
471        }
472        pub fn with_interceptor<F>(
473            inner: T,
474            interceptor: F,
475        ) -> AiAgentServiceClient<InterceptedService<T, F>>
476        where
477            F: tonic::service::Interceptor,
478            T::ResponseBody: Default,
479            T: tonic::codegen::Service<
480                http::Request<tonic::body::Body>,
481                Response = http::Response<
482                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
483                >,
484            >,
485            <T as tonic::codegen::Service<
486                http::Request<tonic::body::Body>,
487            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
488        {
489            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
490        }
491        /// Compress requests with the given encoding.
492        ///
493        /// This requires the server to support it otherwise it might respond with an
494        /// error.
495        #[must_use]
496        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
497            self.inner = self.inner.send_compressed(encoding);
498            self
499        }
500        /// Enable decompressing responses.
501        #[must_use]
502        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
503            self.inner = self.inner.accept_compressed(encoding);
504            self
505        }
506        /// Limits the maximum size of a decoded message.
507        ///
508        /// Default: `4MB`
509        #[must_use]
510        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
511            self.inner = self.inner.max_decoding_message_size(limit);
512            self
513        }
514        /// Limits the maximum size of an encoded message.
515        ///
516        /// Default: `usize::MAX`
517        #[must_use]
518        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
519            self.inner = self.inner.max_encoding_message_size(limit);
520            self
521        }
522        /// StreamChat handles bidirectional streaming chat for AI agent
523        pub async fn stream_chat(
524            &mut self,
525            request: impl tonic::IntoRequest<super::StreamChatRequest>,
526        ) -> std::result::Result<
527            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
528            tonic::Status,
529        > {
530            self.inner
531                .ready()
532                .await
533                .map_err(|e| {
534                    tonic::Status::unknown(
535                        format!("Service was not ready: {}", e.into()),
536                    )
537                })?;
538            let codec = tonic::codec::ProstCodec::default();
539            let path = http::uri::PathAndQuery::from_static(
540                "/nominal.ai.v1.AIAgentService/StreamChat",
541            );
542            let mut req = request.into_request();
543            req.extensions_mut()
544                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
545            self.inner.server_streaming(req, path, codec).await
546        }
547        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
548        pub async fn get_conversation(
549            &mut self,
550            request: impl tonic::IntoRequest<super::GetConversationRequest>,
551        ) -> std::result::Result<
552            tonic::Response<super::GetConversationResponse>,
553            tonic::Status,
554        > {
555            self.inner
556                .ready()
557                .await
558                .map_err(|e| {
559                    tonic::Status::unknown(
560                        format!("Service was not ready: {}", e.into()),
561                    )
562                })?;
563            let codec = tonic::codec::ProstCodec::default();
564            let path = http::uri::PathAndQuery::from_static(
565                "/nominal.ai.v1.AIAgentService/GetConversation",
566            );
567            let mut req = request.into_request();
568            req.extensions_mut()
569                .insert(
570                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
571                );
572            self.inner.unary(req, path, codec).await
573        }
574        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
575        pub async fn list_conversations(
576            &mut self,
577            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
578        ) -> std::result::Result<
579            tonic::Response<super::ListConversationsResponse>,
580            tonic::Status,
581        > {
582            self.inner
583                .ready()
584                .await
585                .map_err(|e| {
586                    tonic::Status::unknown(
587                        format!("Service was not ready: {}", e.into()),
588                    )
589                })?;
590            let codec = tonic::codec::ProstCodec::default();
591            let path = http::uri::PathAndQuery::from_static(
592                "/nominal.ai.v1.AIAgentService/ListConversations",
593            );
594            let mut req = request.into_request();
595            req.extensions_mut()
596                .insert(
597                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
598                );
599            self.inner.unary(req, path, codec).await
600        }
601        /// CreateConversation handles creating a conversation and assigning it a conversation rid
602        pub async fn create_conversation(
603            &mut self,
604            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
605        ) -> std::result::Result<
606            tonic::Response<super::CreateConversationResponse>,
607            tonic::Status,
608        > {
609            self.inner
610                .ready()
611                .await
612                .map_err(|e| {
613                    tonic::Status::unknown(
614                        format!("Service was not ready: {}", e.into()),
615                    )
616                })?;
617            let codec = tonic::codec::ProstCodec::default();
618            let path = http::uri::PathAndQuery::from_static(
619                "/nominal.ai.v1.AIAgentService/CreateConversation",
620            );
621            let mut req = request.into_request();
622            req.extensions_mut()
623                .insert(
624                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
625                );
626            self.inner.unary(req, path, codec).await
627        }
628        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
629        pub async fn update_conversation_metadata(
630            &mut self,
631            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
632        ) -> std::result::Result<
633            tonic::Response<super::UpdateConversationMetadataResponse>,
634            tonic::Status,
635        > {
636            self.inner
637                .ready()
638                .await
639                .map_err(|e| {
640                    tonic::Status::unknown(
641                        format!("Service was not ready: {}", e.into()),
642                    )
643                })?;
644            let codec = tonic::codec::ProstCodec::default();
645            let path = http::uri::PathAndQuery::from_static(
646                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
647            );
648            let mut req = request.into_request();
649            req.extensions_mut()
650                .insert(
651                    GrpcMethod::new(
652                        "nominal.ai.v1.AIAgentService",
653                        "UpdateConversationMetadata",
654                    ),
655                );
656            self.inner.unary(req, path, codec).await
657        }
658        /// DeleteConversation handles deleting a specific conversation by conversation rid
659        pub async fn delete_conversation(
660            &mut self,
661            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
662        ) -> std::result::Result<
663            tonic::Response<super::DeleteConversationResponse>,
664            tonic::Status,
665        > {
666            self.inner
667                .ready()
668                .await
669                .map_err(|e| {
670                    tonic::Status::unknown(
671                        format!("Service was not ready: {}", e.into()),
672                    )
673                })?;
674            let codec = tonic::codec::ProstCodec::default();
675            let path = http::uri::PathAndQuery::from_static(
676                "/nominal.ai.v1.AIAgentService/DeleteConversation",
677            );
678            let mut req = request.into_request();
679            req.extensions_mut()
680                .insert(
681                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
682                );
683            self.inner.unary(req, path, codec).await
684        }
685    }
686}
687/// StreamChatRequest is a request to stream chat messages for workbook AI agent
688#[derive(Clone, PartialEq, ::prost::Message)]
689pub struct WorkbookAgentServiceStreamChatRequest {
690    #[prost(message, repeated, tag = "1")]
691    pub messages: ::prost::alloc::vec::Vec<ModelMessage>,
692    /// JSON-serialized representation of INotebook
693    #[prost(string, optional, tag = "2")]
694    pub notebook_as_json: ::core::option::Option<::prost::alloc::string::String>,
695    /// The current tab visible in the workbook from the user's perspective
696    #[prost(int32, optional, tag = "3")]
697    pub selected_tab_index: ::core::option::Option<i32>,
698    /// Optional: image files to provide to the agent
699    #[prost(message, repeated, tag = "4")]
700    pub images: ::prost::alloc::vec::Vec<ImagePart>,
701    /// Time range for the tab that is currently visible to the user
702    #[prost(message, optional, tag = "5")]
703    pub range: ::core::option::Option<TimeRange>,
704    /// V2 conversation API persists the message and any assistant responses to storage under the provided
705    /// conversation_id. if id does not exist in the database, then a new conversation is started from this message.
706    #[prost(message, optional, tag = "6")]
707    pub message: ::core::option::Option<AppendMessage>,
708}
709/// This will append a message to an existing conversation
710/// A non existent conversation id will raise an error
711#[derive(Clone, PartialEq, ::prost::Message)]
712pub struct AppendMessage {
713    #[prost(message, optional, tag = "1")]
714    pub message: ::core::option::Option<UserModelMessage>,
715    #[prost(string, tag = "2")]
716    pub conversation_rid: ::prost::alloc::string::String,
717}
718/// Generated client implementations.
719pub mod workbook_agent_service_client {
720    #![allow(
721        unused_variables,
722        dead_code,
723        missing_docs,
724        clippy::wildcard_imports,
725        clippy::let_unit_value,
726    )]
727    use tonic::codegen::*;
728    use tonic::codegen::http::Uri;
729    /// WorkbookAgentService provides AI-powered assistance for workbook operations
730    /// this is deprecated in favor of the AIAgentService
731    #[derive(Debug, Clone)]
732    pub struct WorkbookAgentServiceClient<T> {
733        inner: tonic::client::Grpc<T>,
734    }
735    impl WorkbookAgentServiceClient<tonic::transport::Channel> {
736        /// Attempt to create a new client by connecting to a given endpoint.
737        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
738        where
739            D: TryInto<tonic::transport::Endpoint>,
740            D::Error: Into<StdError>,
741        {
742            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
743            Ok(Self::new(conn))
744        }
745    }
746    impl<T> WorkbookAgentServiceClient<T>
747    where
748        T: tonic::client::GrpcService<tonic::body::Body>,
749        T::Error: Into<StdError>,
750        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
751        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
752    {
753        pub fn new(inner: T) -> Self {
754            let inner = tonic::client::Grpc::new(inner);
755            Self { inner }
756        }
757        pub fn with_origin(inner: T, origin: Uri) -> Self {
758            let inner = tonic::client::Grpc::with_origin(inner, origin);
759            Self { inner }
760        }
761        pub fn with_interceptor<F>(
762            inner: T,
763            interceptor: F,
764        ) -> WorkbookAgentServiceClient<InterceptedService<T, F>>
765        where
766            F: tonic::service::Interceptor,
767            T::ResponseBody: Default,
768            T: tonic::codegen::Service<
769                http::Request<tonic::body::Body>,
770                Response = http::Response<
771                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
772                >,
773            >,
774            <T as tonic::codegen::Service<
775                http::Request<tonic::body::Body>,
776            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
777        {
778            WorkbookAgentServiceClient::new(InterceptedService::new(inner, interceptor))
779        }
780        /// Compress requests with the given encoding.
781        ///
782        /// This requires the server to support it otherwise it might respond with an
783        /// error.
784        #[must_use]
785        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
786            self.inner = self.inner.send_compressed(encoding);
787            self
788        }
789        /// Enable decompressing responses.
790        #[must_use]
791        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
792            self.inner = self.inner.accept_compressed(encoding);
793            self
794        }
795        /// Limits the maximum size of a decoded message.
796        ///
797        /// Default: `4MB`
798        #[must_use]
799        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
800            self.inner = self.inner.max_decoding_message_size(limit);
801            self
802        }
803        /// Limits the maximum size of an encoded message.
804        ///
805        /// Default: `usize::MAX`
806        #[must_use]
807        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
808            self.inner = self.inner.max_encoding_message_size(limit);
809            self
810        }
811        /// StreamChat handles bidirectional streaming chat for workbook AI agent
812        pub async fn stream_chat(
813            &mut self,
814            request: impl tonic::IntoRequest<
815                super::WorkbookAgentServiceStreamChatRequest,
816            >,
817        ) -> std::result::Result<
818            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
819            tonic::Status,
820        > {
821            self.inner
822                .ready()
823                .await
824                .map_err(|e| {
825                    tonic::Status::unknown(
826                        format!("Service was not ready: {}", e.into()),
827                    )
828                })?;
829            let codec = tonic::codec::ProstCodec::default();
830            let path = http::uri::PathAndQuery::from_static(
831                "/nominal.ai.v1.WorkbookAgentService/StreamChat",
832            );
833            let mut req = request.into_request();
834            req.extensions_mut()
835                .insert(
836                    GrpcMethod::new("nominal.ai.v1.WorkbookAgentService", "StreamChat"),
837                );
838            self.inner.server_streaming(req, path, codec).await
839        }
840        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
841        pub async fn get_conversation(
842            &mut self,
843            request: impl tonic::IntoRequest<super::GetConversationRequest>,
844        ) -> std::result::Result<
845            tonic::Response<super::GetConversationResponse>,
846            tonic::Status,
847        > {
848            self.inner
849                .ready()
850                .await
851                .map_err(|e| {
852                    tonic::Status::unknown(
853                        format!("Service was not ready: {}", e.into()),
854                    )
855                })?;
856            let codec = tonic::codec::ProstCodec::default();
857            let path = http::uri::PathAndQuery::from_static(
858                "/nominal.ai.v1.WorkbookAgentService/GetConversation",
859            );
860            let mut req = request.into_request();
861            req.extensions_mut()
862                .insert(
863                    GrpcMethod::new(
864                        "nominal.ai.v1.WorkbookAgentService",
865                        "GetConversation",
866                    ),
867                );
868            self.inner.unary(req, path, codec).await
869        }
870        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
871        pub async fn list_conversations(
872            &mut self,
873            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
874        ) -> std::result::Result<
875            tonic::Response<super::ListConversationsResponse>,
876            tonic::Status,
877        > {
878            self.inner
879                .ready()
880                .await
881                .map_err(|e| {
882                    tonic::Status::unknown(
883                        format!("Service was not ready: {}", e.into()),
884                    )
885                })?;
886            let codec = tonic::codec::ProstCodec::default();
887            let path = http::uri::PathAndQuery::from_static(
888                "/nominal.ai.v1.WorkbookAgentService/ListConversations",
889            );
890            let mut req = request.into_request();
891            req.extensions_mut()
892                .insert(
893                    GrpcMethod::new(
894                        "nominal.ai.v1.WorkbookAgentService",
895                        "ListConversations",
896                    ),
897                );
898            self.inner.unary(req, path, codec).await
899        }
900        /// CreateConversation handles creating a conversation and assigning it a conversation rid
901        pub async fn create_conversation(
902            &mut self,
903            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
904        ) -> std::result::Result<
905            tonic::Response<super::CreateConversationResponse>,
906            tonic::Status,
907        > {
908            self.inner
909                .ready()
910                .await
911                .map_err(|e| {
912                    tonic::Status::unknown(
913                        format!("Service was not ready: {}", e.into()),
914                    )
915                })?;
916            let codec = tonic::codec::ProstCodec::default();
917            let path = http::uri::PathAndQuery::from_static(
918                "/nominal.ai.v1.WorkbookAgentService/CreateConversation",
919            );
920            let mut req = request.into_request();
921            req.extensions_mut()
922                .insert(
923                    GrpcMethod::new(
924                        "nominal.ai.v1.WorkbookAgentService",
925                        "CreateConversation",
926                    ),
927                );
928            self.inner.unary(req, path, codec).await
929        }
930        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
931        pub async fn update_conversation_metadata(
932            &mut self,
933            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
934        ) -> std::result::Result<
935            tonic::Response<super::UpdateConversationMetadataResponse>,
936            tonic::Status,
937        > {
938            self.inner
939                .ready()
940                .await
941                .map_err(|e| {
942                    tonic::Status::unknown(
943                        format!("Service was not ready: {}", e.into()),
944                    )
945                })?;
946            let codec = tonic::codec::ProstCodec::default();
947            let path = http::uri::PathAndQuery::from_static(
948                "/nominal.ai.v1.WorkbookAgentService/UpdateConversationMetadata",
949            );
950            let mut req = request.into_request();
951            req.extensions_mut()
952                .insert(
953                    GrpcMethod::new(
954                        "nominal.ai.v1.WorkbookAgentService",
955                        "UpdateConversationMetadata",
956                    ),
957                );
958            self.inner.unary(req, path, codec).await
959        }
960        /// DeleteConversation handles deleting a specific conversation by conversation rid
961        pub async fn delete_conversation(
962            &mut self,
963            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
964        ) -> std::result::Result<
965            tonic::Response<super::DeleteConversationResponse>,
966            tonic::Status,
967        > {
968            self.inner
969                .ready()
970                .await
971                .map_err(|e| {
972                    tonic::Status::unknown(
973                        format!("Service was not ready: {}", e.into()),
974                    )
975                })?;
976            let codec = tonic::codec::ProstCodec::default();
977            let path = http::uri::PathAndQuery::from_static(
978                "/nominal.ai.v1.WorkbookAgentService/DeleteConversation",
979            );
980            let mut req = request.into_request();
981            req.extensions_mut()
982                .insert(
983                    GrpcMethod::new(
984                        "nominal.ai.v1.WorkbookAgentService",
985                        "DeleteConversation",
986                    ),
987                );
988            self.inner.unary(req, path, codec).await
989        }
990    }
991}
992#[derive(Clone, Copy, PartialEq, ::prost::Message)]
993pub struct IsAiEnabledForUserRequest {}
994#[derive(Clone, Copy, PartialEq, ::prost::Message)]
995pub struct IsAiEnabledForUserResponse {
996    #[prost(bool, tag = "1")]
997    pub is_enabled: bool,
998}
999/// Generated client implementations.
1000pub mod ai_features_service_client {
1001    #![allow(
1002        unused_variables,
1003        dead_code,
1004        missing_docs,
1005        clippy::wildcard_imports,
1006        clippy::let_unit_value,
1007    )]
1008    use tonic::codegen::*;
1009    use tonic::codegen::http::Uri;
1010    /// AIFeaturesService provides information about enabled AI features
1011    #[derive(Debug, Clone)]
1012    pub struct AiFeaturesServiceClient<T> {
1013        inner: tonic::client::Grpc<T>,
1014    }
1015    impl AiFeaturesServiceClient<tonic::transport::Channel> {
1016        /// Attempt to create a new client by connecting to a given endpoint.
1017        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1018        where
1019            D: TryInto<tonic::transport::Endpoint>,
1020            D::Error: Into<StdError>,
1021        {
1022            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1023            Ok(Self::new(conn))
1024        }
1025    }
1026    impl<T> AiFeaturesServiceClient<T>
1027    where
1028        T: tonic::client::GrpcService<tonic::body::Body>,
1029        T::Error: Into<StdError>,
1030        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1031        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1032    {
1033        pub fn new(inner: T) -> Self {
1034            let inner = tonic::client::Grpc::new(inner);
1035            Self { inner }
1036        }
1037        pub fn with_origin(inner: T, origin: Uri) -> Self {
1038            let inner = tonic::client::Grpc::with_origin(inner, origin);
1039            Self { inner }
1040        }
1041        pub fn with_interceptor<F>(
1042            inner: T,
1043            interceptor: F,
1044        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
1045        where
1046            F: tonic::service::Interceptor,
1047            T::ResponseBody: Default,
1048            T: tonic::codegen::Service<
1049                http::Request<tonic::body::Body>,
1050                Response = http::Response<
1051                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1052                >,
1053            >,
1054            <T as tonic::codegen::Service<
1055                http::Request<tonic::body::Body>,
1056            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1057        {
1058            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
1059        }
1060        /// Compress requests with the given encoding.
1061        ///
1062        /// This requires the server to support it otherwise it might respond with an
1063        /// error.
1064        #[must_use]
1065        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1066            self.inner = self.inner.send_compressed(encoding);
1067            self
1068        }
1069        /// Enable decompressing responses.
1070        #[must_use]
1071        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1072            self.inner = self.inner.accept_compressed(encoding);
1073            self
1074        }
1075        /// Limits the maximum size of a decoded message.
1076        ///
1077        /// Default: `4MB`
1078        #[must_use]
1079        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1080            self.inner = self.inner.max_decoding_message_size(limit);
1081            self
1082        }
1083        /// Limits the maximum size of an encoded message.
1084        ///
1085        /// Default: `usize::MAX`
1086        #[must_use]
1087        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1088            self.inner = self.inner.max_encoding_message_size(limit);
1089            self
1090        }
1091        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
1092        pub async fn is_ai_enabled_for_user(
1093            &mut self,
1094            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
1095        ) -> std::result::Result<
1096            tonic::Response<super::IsAiEnabledForUserResponse>,
1097            tonic::Status,
1098        > {
1099            self.inner
1100                .ready()
1101                .await
1102                .map_err(|e| {
1103                    tonic::Status::unknown(
1104                        format!("Service was not ready: {}", e.into()),
1105                    )
1106                })?;
1107            let codec = tonic::codec::ProstCodec::default();
1108            let path = http::uri::PathAndQuery::from_static(
1109                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
1110            );
1111            let mut req = request.into_request();
1112            req.extensions_mut()
1113                .insert(
1114                    GrpcMethod::new(
1115                        "nominal.ai.v1.AIFeaturesService",
1116                        "IsAIEnabledForUser",
1117                    ),
1118                );
1119            self.inner.unary(req, path, codec).await
1120        }
1121    }
1122}
1123/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
1124/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
1125#[derive(Clone, PartialEq, ::prost::Message)]
1126pub struct CreateOrUpdateKnowledgeBaseRequest {
1127    #[prost(string, tag = "1")]
1128    pub attachment_rid: ::prost::alloc::string::String,
1129    /// summary of the knowledge base, will be used by the LLM to decide when to use it
1130    #[prost(string, tag = "2")]
1131    pub summary_description: ::prost::alloc::string::String,
1132    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
1133    pub r#type: ::core::option::Option<i32>,
1134}
1135/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
1136#[derive(Clone, PartialEq, ::prost::Message)]
1137pub struct CreateOrUpdateKnowledgeBaseResponse {
1138    #[prost(string, tag = "1")]
1139    pub knowledge_base_rid: ::prost::alloc::string::String,
1140}
1141/// KnowledgeBase represents a knowledge base entry
1142#[derive(Clone, PartialEq, ::prost::Message)]
1143pub struct KnowledgeBase {
1144    #[prost(string, tag = "1")]
1145    pub knowledge_base_rid: ::prost::alloc::string::String,
1146    #[prost(string, tag = "2")]
1147    pub attachment_rid: ::prost::alloc::string::String,
1148    #[prost(string, tag = "3")]
1149    pub workspace_rid: ::prost::alloc::string::String,
1150    #[prost(string, tag = "4")]
1151    pub summary_description: ::prost::alloc::string::String,
1152    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
1153    pub r#type: i32,
1154    #[prost(int32, tag = "6")]
1155    pub version: i32,
1156}
1157#[derive(Clone, PartialEq, ::prost::Message)]
1158pub struct ListRequest {
1159    #[prost(string, tag = "1")]
1160    pub workspace_rid: ::prost::alloc::string::String,
1161}
1162#[derive(Clone, PartialEq, ::prost::Message)]
1163pub struct ListResponse {
1164    #[prost(message, repeated, tag = "1")]
1165    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1166}
1167#[derive(Clone, PartialEq, ::prost::Message)]
1168pub struct DeleteRequest {
1169    #[prost(string, tag = "1")]
1170    pub knowledge_base_rid: ::prost::alloc::string::String,
1171}
1172#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1173pub struct DeleteResponse {
1174    #[prost(bool, tag = "1")]
1175    pub success: bool,
1176}
1177#[derive(Clone, PartialEq, ::prost::Message)]
1178pub struct GetBatchRequest {
1179    #[prost(string, repeated, tag = "1")]
1180    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1181}
1182#[derive(Clone, PartialEq, ::prost::Message)]
1183pub struct GetBatchResponse {
1184    #[prost(message, repeated, tag = "1")]
1185    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1186}
1187/// generate summary description is intentionally going to return the generated description to the frontend
1188/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1189#[derive(Clone, PartialEq, ::prost::Message)]
1190pub struct GenerateSummaryDescriptionRequest {
1191    #[prost(string, tag = "1")]
1192    pub attachment_rid: ::prost::alloc::string::String,
1193}
1194#[derive(Clone, PartialEq, ::prost::Message)]
1195pub struct GenerateSummaryDescriptionResponse {
1196    #[prost(string, tag = "1")]
1197    pub summary_description: ::prost::alloc::string::String,
1198}
1199/// KnowledgeBaseType defines the types of knowledge base
1200#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1201#[repr(i32)]
1202pub enum KnowledgeBaseType {
1203    /// defaults to PROMPT
1204    Unspecified = 0,
1205    /// knowledge base gets added directly to prompt (needs to be small enough!)
1206    Prompt = 1,
1207    /// knowledge base gets used via vector search on embeddings
1208    Embedding = 2,
1209}
1210impl KnowledgeBaseType {
1211    /// String value of the enum field names used in the ProtoBuf definition.
1212    ///
1213    /// The values are not transformed in any way and thus are considered stable
1214    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1215    pub fn as_str_name(&self) -> &'static str {
1216        match self {
1217            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1218            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1219            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1220        }
1221    }
1222    /// Creates an enum from field names used in the ProtoBuf definition.
1223    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1224        match value {
1225            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1226            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1227            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1228            _ => None,
1229        }
1230    }
1231}
1232/// Generated client implementations.
1233pub mod knowledge_base_service_client {
1234    #![allow(
1235        unused_variables,
1236        dead_code,
1237        missing_docs,
1238        clippy::wildcard_imports,
1239        clippy::let_unit_value,
1240    )]
1241    use tonic::codegen::*;
1242    use tonic::codegen::http::Uri;
1243    /// KnowledgeBaseService provides AI-powered knowledge base management
1244    #[derive(Debug, Clone)]
1245    pub struct KnowledgeBaseServiceClient<T> {
1246        inner: tonic::client::Grpc<T>,
1247    }
1248    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1249        /// Attempt to create a new client by connecting to a given endpoint.
1250        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1251        where
1252            D: TryInto<tonic::transport::Endpoint>,
1253            D::Error: Into<StdError>,
1254        {
1255            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1256            Ok(Self::new(conn))
1257        }
1258    }
1259    impl<T> KnowledgeBaseServiceClient<T>
1260    where
1261        T: tonic::client::GrpcService<tonic::body::Body>,
1262        T::Error: Into<StdError>,
1263        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1264        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1265    {
1266        pub fn new(inner: T) -> Self {
1267            let inner = tonic::client::Grpc::new(inner);
1268            Self { inner }
1269        }
1270        pub fn with_origin(inner: T, origin: Uri) -> Self {
1271            let inner = tonic::client::Grpc::with_origin(inner, origin);
1272            Self { inner }
1273        }
1274        pub fn with_interceptor<F>(
1275            inner: T,
1276            interceptor: F,
1277        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1278        where
1279            F: tonic::service::Interceptor,
1280            T::ResponseBody: Default,
1281            T: tonic::codegen::Service<
1282                http::Request<tonic::body::Body>,
1283                Response = http::Response<
1284                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1285                >,
1286            >,
1287            <T as tonic::codegen::Service<
1288                http::Request<tonic::body::Body>,
1289            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1290        {
1291            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1292        }
1293        /// Compress requests with the given encoding.
1294        ///
1295        /// This requires the server to support it otherwise it might respond with an
1296        /// error.
1297        #[must_use]
1298        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1299            self.inner = self.inner.send_compressed(encoding);
1300            self
1301        }
1302        /// Enable decompressing responses.
1303        #[must_use]
1304        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1305            self.inner = self.inner.accept_compressed(encoding);
1306            self
1307        }
1308        /// Limits the maximum size of a decoded message.
1309        ///
1310        /// Default: `4MB`
1311        #[must_use]
1312        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1313            self.inner = self.inner.max_decoding_message_size(limit);
1314            self
1315        }
1316        /// Limits the maximum size of an encoded message.
1317        ///
1318        /// Default: `usize::MAX`
1319        #[must_use]
1320        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1321            self.inner = self.inner.max_encoding_message_size(limit);
1322            self
1323        }
1324        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1325        pub async fn create_or_update_knowledge_base(
1326            &mut self,
1327            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1328        ) -> std::result::Result<
1329            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1330            tonic::Status,
1331        > {
1332            self.inner
1333                .ready()
1334                .await
1335                .map_err(|e| {
1336                    tonic::Status::unknown(
1337                        format!("Service was not ready: {}", e.into()),
1338                    )
1339                })?;
1340            let codec = tonic::codec::ProstCodec::default();
1341            let path = http::uri::PathAndQuery::from_static(
1342                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1343            );
1344            let mut req = request.into_request();
1345            req.extensions_mut()
1346                .insert(
1347                    GrpcMethod::new(
1348                        "nominal.ai.v1.KnowledgeBaseService",
1349                        "CreateOrUpdateKnowledgeBase",
1350                    ),
1351                );
1352            self.inner.unary(req, path, codec).await
1353        }
1354        /// List returns all knowledge bases in the specified workspace
1355        pub async fn list(
1356            &mut self,
1357            request: impl tonic::IntoRequest<super::ListRequest>,
1358        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1359            self.inner
1360                .ready()
1361                .await
1362                .map_err(|e| {
1363                    tonic::Status::unknown(
1364                        format!("Service was not ready: {}", e.into()),
1365                    )
1366                })?;
1367            let codec = tonic::codec::ProstCodec::default();
1368            let path = http::uri::PathAndQuery::from_static(
1369                "/nominal.ai.v1.KnowledgeBaseService/List",
1370            );
1371            let mut req = request.into_request();
1372            req.extensions_mut()
1373                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1374            self.inner.unary(req, path, codec).await
1375        }
1376        /// Delete removes a knowledge base by its RID
1377        pub async fn delete(
1378            &mut self,
1379            request: impl tonic::IntoRequest<super::DeleteRequest>,
1380        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1381            self.inner
1382                .ready()
1383                .await
1384                .map_err(|e| {
1385                    tonic::Status::unknown(
1386                        format!("Service was not ready: {}", e.into()),
1387                    )
1388                })?;
1389            let codec = tonic::codec::ProstCodec::default();
1390            let path = http::uri::PathAndQuery::from_static(
1391                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1392            );
1393            let mut req = request.into_request();
1394            req.extensions_mut()
1395                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1396            self.inner.unary(req, path, codec).await
1397        }
1398        /// GetBatch retrieves multiple knowledge bases by their RIDs
1399        pub async fn get_batch(
1400            &mut self,
1401            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1402        ) -> std::result::Result<
1403            tonic::Response<super::GetBatchResponse>,
1404            tonic::Status,
1405        > {
1406            self.inner
1407                .ready()
1408                .await
1409                .map_err(|e| {
1410                    tonic::Status::unknown(
1411                        format!("Service was not ready: {}", e.into()),
1412                    )
1413                })?;
1414            let codec = tonic::codec::ProstCodec::default();
1415            let path = http::uri::PathAndQuery::from_static(
1416                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1417            );
1418            let mut req = request.into_request();
1419            req.extensions_mut()
1420                .insert(
1421                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1422                );
1423            self.inner.unary(req, path, codec).await
1424        }
1425        /// GenerateSummaryDescription generates a summary description for an attachment rid
1426        pub async fn generate_summary_description(
1427            &mut self,
1428            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1429        ) -> std::result::Result<
1430            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1431            tonic::Status,
1432        > {
1433            self.inner
1434                .ready()
1435                .await
1436                .map_err(|e| {
1437                    tonic::Status::unknown(
1438                        format!("Service was not ready: {}", e.into()),
1439                    )
1440                })?;
1441            let codec = tonic::codec::ProstCodec::default();
1442            let path = http::uri::PathAndQuery::from_static(
1443                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1444            );
1445            let mut req = request.into_request();
1446            req.extensions_mut()
1447                .insert(
1448                    GrpcMethod::new(
1449                        "nominal.ai.v1.KnowledgeBaseService",
1450                        "GenerateSummaryDescription",
1451                    ),
1452                );
1453            self.inner.unary(req, path, codec).await
1454        }
1455    }
1456}