nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for workbook AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    #[prost(message, repeated, tag = "1")]
6    pub messages: ::prost::alloc::vec::Vec<ModelMessage>,
7    /// JSON-serialized representation of INotebook
8    #[prost(string, optional, tag = "2")]
9    pub notebook_as_json: ::core::option::Option<::prost::alloc::string::String>,
10    /// The current tab visible in the workbook from the user's perspective
11    #[prost(int32, optional, tag = "3")]
12    pub selected_tab_index: ::core::option::Option<i32>,
13    /// Optional: image files to provide to the agent
14    #[prost(message, repeated, tag = "4")]
15    pub images: ::prost::alloc::vec::Vec<ImagePart>,
16    /// Time range for the tab that is currently visible to the user
17    #[prost(message, optional, tag = "5")]
18    pub range: ::core::option::Option<TimeRange>,
19    /// V2 conversation API persists the message and any assistant responses to storage under the provided
20    /// conversation_id. if id does not exist in the database, then a new conversation is started from this message.
21    #[prost(message, optional, tag = "6")]
22    pub message: ::core::option::Option<AppendMessage>,
23}
24/// This will append a message to an existing conversation
25/// A non existent conversation id will raise an error
26#[derive(Clone, PartialEq, ::prost::Message)]
27pub struct AppendMessage {
28    #[prost(message, optional, tag = "1")]
29    pub message: ::core::option::Option<UserModelMessage>,
30    #[prost(string, tag = "2")]
31    pub conversation_rid: ::prost::alloc::string::String,
32}
33/// CreateConversation request will create a new conversation thread
34/// if old conversation id is not set, a brand new, clear chat is created
35/// If old conversation id is set without a previous message id, the full conversation thread will be copied
36/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
37/// the above case is useful for branching a conversation into a new thread
38#[derive(Clone, PartialEq, ::prost::Message)]
39pub struct CreateConversationRequest {
40    #[prost(string, tag = "1")]
41    pub title: ::prost::alloc::string::String,
42    #[prost(string, tag = "2")]
43    pub workbook_rid: ::prost::alloc::string::String,
44    #[prost(string, optional, tag = "3")]
45    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
46    #[prost(string, optional, tag = "4")]
47    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
48}
49/// CreateConversationResponse will return the conversation id for the new conversation
50#[derive(Clone, PartialEq, ::prost::Message)]
51pub struct CreateConversationResponse {
52    #[prost(string, tag = "1")]
53    pub new_conversation_rid: ::prost::alloc::string::String,
54}
55#[derive(Clone, PartialEq, ::prost::Message)]
56pub struct UpdateConversationMetadataRequest {
57    #[prost(string, tag = "1")]
58    pub title: ::prost::alloc::string::String,
59    #[prost(string, tag = "2")]
60    pub conversation_rid: ::prost::alloc::string::String,
61}
62#[derive(Clone, Copy, PartialEq, ::prost::Message)]
63pub struct UpdateConversationMetadataResponse {}
64#[derive(Clone, PartialEq, ::prost::Message)]
65pub struct DeleteConversationRequest {
66    #[prost(string, tag = "1")]
67    pub conversation_rid: ::prost::alloc::string::String,
68}
69#[derive(Clone, Copy, PartialEq, ::prost::Message)]
70pub struct DeleteConversationResponse {}
71/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
72/// by provided rid. To start from a particular message - you can also provide a message id.
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct GetConversationRequest {
75    #[prost(string, tag = "1")]
76    pub conversation_rid: ::prost::alloc::string::String,
77    #[prost(string, optional, tag = "2")]
78    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
79    #[prost(int32, optional, tag = "3")]
80    pub max_message_count: ::core::option::Option<i32>,
81}
82/// Model message with id allows you to identify the message ID of a given message
83#[derive(Clone, PartialEq, ::prost::Message)]
84pub struct ModelMessageWithId {
85    #[prost(string, tag = "3")]
86    pub message_id: ::prost::alloc::string::String,
87    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
88    pub content: ::core::option::Option<model_message_with_id::Content>,
89}
90/// Nested message and enum types in `ModelMessageWithId`.
91pub mod model_message_with_id {
92    #[derive(Clone, PartialEq, ::prost::Oneof)]
93    pub enum Content {
94        #[prost(message, tag = "1")]
95        Message(super::ModelMessage),
96        #[prost(message, tag = "2")]
97        ToolAction(super::ToolAction),
98    }
99}
100#[derive(Clone, PartialEq, ::prost::Message)]
101pub struct GetConversationResponse {
102    #[prost(message, repeated, tag = "1")]
103    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
104}
105/// Will generate all conversation threads that this user has on this workbook
106#[derive(Clone, PartialEq, ::prost::Message)]
107pub struct ListConversationsRequest {
108    #[prost(string, tag = "1")]
109    pub workbook_rid: ::prost::alloc::string::String,
110}
111#[derive(Clone, PartialEq, ::prost::Message)]
112pub struct ConversationMetadata {
113    #[prost(string, tag = "1")]
114    pub conversation_rid: ::prost::alloc::string::String,
115    #[prost(string, tag = "2")]
116    pub title: ::prost::alloc::string::String,
117    #[prost(message, optional, tag = "3")]
118    pub created_at: ::core::option::Option<
119        super::super::super::google::protobuf::Timestamp,
120    >,
121    #[prost(message, optional, tag = "4")]
122    pub last_updated_at: ::core::option::Option<
123        super::super::super::google::protobuf::Timestamp,
124    >,
125}
126/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
127/// to get a full conversation from storage. These are ordered by creation time.
128#[derive(Clone, PartialEq, ::prost::Message)]
129pub struct ListConversationsResponse {
130    #[prost(message, repeated, tag = "1")]
131    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
132}
133#[derive(Clone, Copy, PartialEq, ::prost::Message)]
134pub struct TimeRange {
135    #[prost(message, optional, tag = "1")]
136    pub range_start: ::core::option::Option<Timestamp>,
137    #[prost(message, optional, tag = "2")]
138    pub range_end: ::core::option::Option<Timestamp>,
139}
140#[derive(Clone, Copy, PartialEq, ::prost::Message)]
141pub struct Timestamp {
142    #[prost(int32, tag = "1")]
143    pub seconds: i32,
144    #[prost(int32, tag = "2")]
145    pub nanoseconds: i32,
146}
147/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
148/// Each message type has its own structure and content.
149#[derive(Clone, PartialEq, ::prost::Message)]
150pub struct ModelMessage {
151    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
152    pub kind: ::core::option::Option<model_message::Kind>,
153}
154/// Nested message and enum types in `ModelMessage`.
155pub mod model_message {
156    #[derive(Clone, PartialEq, ::prost::Oneof)]
157    pub enum Kind {
158        #[prost(message, tag = "1")]
159        User(super::UserModelMessage),
160        #[prost(message, tag = "2")]
161        Assistant(super::AssistantModelMessage),
162    }
163}
164/// A user message containing text
165#[derive(Clone, PartialEq, ::prost::Message)]
166pub struct UserModelMessage {
167    #[prost(message, repeated, tag = "1")]
168    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
169}
170/// An assistant message containing text
171#[derive(Clone, PartialEq, ::prost::Message)]
172pub struct AssistantModelMessage {
173    #[prost(message, repeated, tag = "1")]
174    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
175}
176#[derive(Clone, PartialEq, ::prost::Message)]
177pub struct UserContentPart {
178    #[prost(oneof = "user_content_part::Part", tags = "1")]
179    pub part: ::core::option::Option<user_content_part::Part>,
180}
181/// Nested message and enum types in `UserContentPart`.
182pub mod user_content_part {
183    #[derive(Clone, PartialEq, ::prost::Oneof)]
184    pub enum Part {
185        #[prost(message, tag = "1")]
186        Text(super::TextPart),
187    }
188}
189/// Content part for assistant messages: can be text, reasoning, or mutation.
190#[derive(Clone, PartialEq, ::prost::Message)]
191pub struct AssistantContentPart {
192    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
193    pub part: ::core::option::Option<assistant_content_part::Part>,
194}
195/// Nested message and enum types in `AssistantContentPart`.
196pub mod assistant_content_part {
197    #[derive(Clone, PartialEq, ::prost::Oneof)]
198    pub enum Part {
199        #[prost(message, tag = "1")]
200        Text(super::TextPart),
201        #[prost(message, tag = "2")]
202        Reasoning(super::ReasoningPart),
203    }
204}
205/// Text part for user or assistant messages.
206#[derive(Clone, PartialEq, ::prost::Message)]
207pub struct TextPart {
208    #[prost(string, tag = "1")]
209    pub text: ::prost::alloc::string::String,
210}
211/// User-supplied image part.
212#[derive(Clone, PartialEq, ::prost::Message)]
213pub struct ImagePart {
214    /// The base64-encoded image data
215    #[prost(bytes = "vec", tag = "1")]
216    pub data: ::prost::alloc::vec::Vec<u8>,
217    /// The media type of the image (e.g. "image/png", "image/jpeg")
218    #[prost(string, optional, tag = "2")]
219    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
220    /// Optional: the filename of the image
221    #[prost(string, optional, tag = "3")]
222    pub filename: ::core::option::Option<::prost::alloc::string::String>,
223}
224/// Reasoning part for assistant messages.
225#[derive(Clone, PartialEq, ::prost::Message)]
226pub struct ReasoningPart {
227    #[prost(string, tag = "1")]
228    pub reasoning: ::prost::alloc::string::String,
229}
230/// StreamChatResponse is a discriminated union response to a StreamChatRequest
231#[derive(Clone, PartialEq, ::prost::Message)]
232pub struct StreamChatResponse {
233    #[prost(
234        oneof = "stream_chat_response::Response",
235        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
236    )]
237    pub response: ::core::option::Option<stream_chat_response::Response>,
238}
239/// Nested message and enum types in `StreamChatResponse`.
240pub mod stream_chat_response {
241    #[derive(Clone, PartialEq, ::prost::Oneof)]
242    pub enum Response {
243        #[prost(message, tag = "1")]
244        Finish(super::Finish),
245        #[prost(message, tag = "2")]
246        Error(super::Error),
247        #[prost(message, tag = "3")]
248        TextStart(super::TextStart),
249        #[prost(message, tag = "4")]
250        TextDelta(super::TextDelta),
251        #[prost(message, tag = "5")]
252        TextEnd(super::TextEnd),
253        #[prost(message, tag = "6")]
254        ReasoningStart(super::ReasoningStart),
255        #[prost(message, tag = "7")]
256        ReasoningDelta(super::ReasoningDelta),
257        #[prost(message, tag = "8")]
258        ReasoningEnd(super::ReasoningEnd),
259        #[prost(message, tag = "9")]
260        WorkbookMutation(super::WorkbookMutation),
261        #[prost(message, tag = "10")]
262        ToolAction(super::ToolAction),
263    }
264}
265/// Indicates the end of a chat session
266#[derive(Clone, PartialEq, ::prost::Message)]
267pub struct Finish {
268    /// The message id of the final message sent from the LLMs response
269    /// Useful to keep track in the instance where, over the course of conversation turns, the
270    /// user wants to branch the conversation from this point
271    #[prost(string, tag = "1")]
272    pub last_message_id: ::prost::alloc::string::String,
273}
274/// An error that occurred during the chat session
275#[derive(Clone, PartialEq, ::prost::Message)]
276pub struct Error {
277    #[prost(string, tag = "1")]
278    pub message: ::prost::alloc::string::String,
279}
280/// Indicates the start of a text message from the agent
281#[derive(Clone, PartialEq, ::prost::Message)]
282pub struct TextStart {
283    /// uniquely identifies the text message (e.g. uuid) so that the client can
284    /// merge parallel message streams (if it happens).
285    #[prost(string, tag = "1")]
286    pub id: ::prost::alloc::string::String,
287}
288/// A delta (continuation) of a text message from the agent
289#[derive(Clone, PartialEq, ::prost::Message)]
290pub struct TextDelta {
291    #[prost(string, tag = "1")]
292    pub id: ::prost::alloc::string::String,
293    /// The next chunk of text
294    #[prost(string, tag = "2")]
295    pub delta: ::prost::alloc::string::String,
296}
297/// Indicates the end of a text message from the agent
298#[derive(Clone, PartialEq, ::prost::Message)]
299pub struct TextEnd {
300    #[prost(string, tag = "1")]
301    pub id: ::prost::alloc::string::String,
302}
303/// Indicates the start of a reasoning message from the agent
304#[derive(Clone, PartialEq, ::prost::Message)]
305pub struct ReasoningStart {
306    #[prost(string, tag = "1")]
307    pub id: ::prost::alloc::string::String,
308}
309/// A delta (continuation) of a reasoning message from the agent
310#[derive(Clone, PartialEq, ::prost::Message)]
311pub struct ReasoningDelta {
312    #[prost(string, tag = "1")]
313    pub id: ::prost::alloc::string::String,
314    /// The next chunk of reasoning
315    #[prost(string, tag = "2")]
316    pub delta: ::prost::alloc::string::String,
317}
318/// Indicates the end of a reasoning message from the agent
319#[derive(Clone, PartialEq, ::prost::Message)]
320pub struct ReasoningEnd {
321    #[prost(string, tag = "1")]
322    pub id: ::prost::alloc::string::String,
323}
324/// Add a new tab to the workbook
325#[derive(Clone, PartialEq, ::prost::Message)]
326pub struct AddTabMutation {
327    /// if tab_name is not provided, we'll name it "New Tab"
328    #[prost(string, optional, tag = "1")]
329    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
330}
331/// this is the "result" of the mutation
332#[derive(Clone, PartialEq, ::prost::Message)]
333pub struct AddOrUpdatePanelMutation {
334    /// JSON-serialized representation of IVizDefinition
335    #[prost(string, tag = "1")]
336    pub panel_as_json: ::prost::alloc::string::String,
337    #[prost(string, tag = "2")]
338    pub panel_id: ::prost::alloc::string::String,
339    #[prost(int32, tag = "3")]
340    pub tab_index: i32,
341}
342#[derive(Clone, PartialEq, ::prost::Message)]
343pub struct RemovePanelsMutation {
344    #[prost(string, repeated, tag = "1")]
345    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
346}
347/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
348#[derive(Clone, PartialEq, ::prost::Message)]
349pub struct AddOrReplaceVariableMutation {
350    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
351    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
352    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
353    #[prost(string, tag = "1")]
354    pub compute_spec_as_json: ::prost::alloc::string::String,
355    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
356    #[prost(string, optional, tag = "2")]
357    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
358    #[prost(string, optional, tag = "3")]
359    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
360}
361/// DeleteVariablesMutation is a mutation to delete variables from the workbook
362#[derive(Clone, PartialEq, ::prost::Message)]
363pub struct DeleteVariablesMutation {
364    #[prost(string, repeated, tag = "1")]
365    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
366}
367/// WorkbookMutation is a mutation to the workbook
368#[derive(Clone, PartialEq, ::prost::Message)]
369pub struct WorkbookMutation {
370    #[prost(string, tag = "1")]
371    pub id: ::prost::alloc::string::String,
372    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
373    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
374}
375/// Nested message and enum types in `WorkbookMutation`.
376pub mod workbook_mutation {
377    #[derive(Clone, PartialEq, ::prost::Oneof)]
378    pub enum Mutation {
379        #[prost(message, tag = "2")]
380        AddTab(super::AddTabMutation),
381        #[prost(message, tag = "3")]
382        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
383        #[prost(message, tag = "4")]
384        RemovePanels(super::RemovePanelsMutation),
385        #[prost(message, tag = "5")]
386        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
387        #[prost(message, tag = "6")]
388        DeleteVariables(super::DeleteVariablesMutation),
389    }
390}
391/// this is a concise descirption of a tool call that the agent is making internally
392/// without revealing too much detail about the tool call, it informs the user what the agent is doing
393/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
394/// "Search channels for My Datasource"
395#[derive(Clone, PartialEq, ::prost::Message)]
396pub struct ToolAction {
397    #[prost(string, tag = "1")]
398    pub id: ::prost::alloc::string::String,
399    /// "Thought", "Read", "Find", "Look-up", etc.
400    #[prost(string, tag = "2")]
401    pub tool_action_verb: ::prost::alloc::string::String,
402    /// "workbook", "channel", "variable", "panel", etc.
403    #[prost(string, optional, tag = "3")]
404    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
405}
406/// Generated client implementations.
407pub mod workbook_agent_service_client {
408    #![allow(
409        unused_variables,
410        dead_code,
411        missing_docs,
412        clippy::wildcard_imports,
413        clippy::let_unit_value,
414    )]
415    use tonic::codegen::*;
416    use tonic::codegen::http::Uri;
417    /// WorkbookAgentService provides AI-powered assistance for workbook operations
418    #[derive(Debug, Clone)]
419    pub struct WorkbookAgentServiceClient<T> {
420        inner: tonic::client::Grpc<T>,
421    }
422    impl WorkbookAgentServiceClient<tonic::transport::Channel> {
423        /// Attempt to create a new client by connecting to a given endpoint.
424        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
425        where
426            D: TryInto<tonic::transport::Endpoint>,
427            D::Error: Into<StdError>,
428        {
429            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
430            Ok(Self::new(conn))
431        }
432    }
433    impl<T> WorkbookAgentServiceClient<T>
434    where
435        T: tonic::client::GrpcService<tonic::body::Body>,
436        T::Error: Into<StdError>,
437        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
438        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
439    {
440        pub fn new(inner: T) -> Self {
441            let inner = tonic::client::Grpc::new(inner);
442            Self { inner }
443        }
444        pub fn with_origin(inner: T, origin: Uri) -> Self {
445            let inner = tonic::client::Grpc::with_origin(inner, origin);
446            Self { inner }
447        }
448        pub fn with_interceptor<F>(
449            inner: T,
450            interceptor: F,
451        ) -> WorkbookAgentServiceClient<InterceptedService<T, F>>
452        where
453            F: tonic::service::Interceptor,
454            T::ResponseBody: Default,
455            T: tonic::codegen::Service<
456                http::Request<tonic::body::Body>,
457                Response = http::Response<
458                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
459                >,
460            >,
461            <T as tonic::codegen::Service<
462                http::Request<tonic::body::Body>,
463            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
464        {
465            WorkbookAgentServiceClient::new(InterceptedService::new(inner, interceptor))
466        }
467        /// Compress requests with the given encoding.
468        ///
469        /// This requires the server to support it otherwise it might respond with an
470        /// error.
471        #[must_use]
472        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
473            self.inner = self.inner.send_compressed(encoding);
474            self
475        }
476        /// Enable decompressing responses.
477        #[must_use]
478        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
479            self.inner = self.inner.accept_compressed(encoding);
480            self
481        }
482        /// Limits the maximum size of a decoded message.
483        ///
484        /// Default: `4MB`
485        #[must_use]
486        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
487            self.inner = self.inner.max_decoding_message_size(limit);
488            self
489        }
490        /// Limits the maximum size of an encoded message.
491        ///
492        /// Default: `usize::MAX`
493        #[must_use]
494        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
495            self.inner = self.inner.max_encoding_message_size(limit);
496            self
497        }
498        /// StreamChat handles bidirectional streaming chat for workbook AI agent
499        pub async fn stream_chat(
500            &mut self,
501            request: impl tonic::IntoRequest<super::StreamChatRequest>,
502        ) -> std::result::Result<
503            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
504            tonic::Status,
505        > {
506            self.inner
507                .ready()
508                .await
509                .map_err(|e| {
510                    tonic::Status::unknown(
511                        format!("Service was not ready: {}", e.into()),
512                    )
513                })?;
514            let codec = tonic::codec::ProstCodec::default();
515            let path = http::uri::PathAndQuery::from_static(
516                "/nominal.ai.v1.WorkbookAgentService/StreamChat",
517            );
518            let mut req = request.into_request();
519            req.extensions_mut()
520                .insert(
521                    GrpcMethod::new("nominal.ai.v1.WorkbookAgentService", "StreamChat"),
522                );
523            self.inner.server_streaming(req, path, codec).await
524        }
525        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
526        pub async fn get_conversation(
527            &mut self,
528            request: impl tonic::IntoRequest<super::GetConversationRequest>,
529        ) -> std::result::Result<
530            tonic::Response<super::GetConversationResponse>,
531            tonic::Status,
532        > {
533            self.inner
534                .ready()
535                .await
536                .map_err(|e| {
537                    tonic::Status::unknown(
538                        format!("Service was not ready: {}", e.into()),
539                    )
540                })?;
541            let codec = tonic::codec::ProstCodec::default();
542            let path = http::uri::PathAndQuery::from_static(
543                "/nominal.ai.v1.WorkbookAgentService/GetConversation",
544            );
545            let mut req = request.into_request();
546            req.extensions_mut()
547                .insert(
548                    GrpcMethod::new(
549                        "nominal.ai.v1.WorkbookAgentService",
550                        "GetConversation",
551                    ),
552                );
553            self.inner.unary(req, path, codec).await
554        }
555        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
556        pub async fn list_conversations(
557            &mut self,
558            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
559        ) -> std::result::Result<
560            tonic::Response<super::ListConversationsResponse>,
561            tonic::Status,
562        > {
563            self.inner
564                .ready()
565                .await
566                .map_err(|e| {
567                    tonic::Status::unknown(
568                        format!("Service was not ready: {}", e.into()),
569                    )
570                })?;
571            let codec = tonic::codec::ProstCodec::default();
572            let path = http::uri::PathAndQuery::from_static(
573                "/nominal.ai.v1.WorkbookAgentService/ListConversations",
574            );
575            let mut req = request.into_request();
576            req.extensions_mut()
577                .insert(
578                    GrpcMethod::new(
579                        "nominal.ai.v1.WorkbookAgentService",
580                        "ListConversations",
581                    ),
582                );
583            self.inner.unary(req, path, codec).await
584        }
585        /// CreateConversation handles creating a conversation and assigning it a conversation rid
586        pub async fn create_conversation(
587            &mut self,
588            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
589        ) -> std::result::Result<
590            tonic::Response<super::CreateConversationResponse>,
591            tonic::Status,
592        > {
593            self.inner
594                .ready()
595                .await
596                .map_err(|e| {
597                    tonic::Status::unknown(
598                        format!("Service was not ready: {}", e.into()),
599                    )
600                })?;
601            let codec = tonic::codec::ProstCodec::default();
602            let path = http::uri::PathAndQuery::from_static(
603                "/nominal.ai.v1.WorkbookAgentService/CreateConversation",
604            );
605            let mut req = request.into_request();
606            req.extensions_mut()
607                .insert(
608                    GrpcMethod::new(
609                        "nominal.ai.v1.WorkbookAgentService",
610                        "CreateConversation",
611                    ),
612                );
613            self.inner.unary(req, path, codec).await
614        }
615        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
616        pub async fn update_conversation_metadata(
617            &mut self,
618            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
619        ) -> std::result::Result<
620            tonic::Response<super::UpdateConversationMetadataResponse>,
621            tonic::Status,
622        > {
623            self.inner
624                .ready()
625                .await
626                .map_err(|e| {
627                    tonic::Status::unknown(
628                        format!("Service was not ready: {}", e.into()),
629                    )
630                })?;
631            let codec = tonic::codec::ProstCodec::default();
632            let path = http::uri::PathAndQuery::from_static(
633                "/nominal.ai.v1.WorkbookAgentService/UpdateConversationMetadata",
634            );
635            let mut req = request.into_request();
636            req.extensions_mut()
637                .insert(
638                    GrpcMethod::new(
639                        "nominal.ai.v1.WorkbookAgentService",
640                        "UpdateConversationMetadata",
641                    ),
642                );
643            self.inner.unary(req, path, codec).await
644        }
645        /// DeleteConversation handles deleting a specific conversation by conversation rid
646        pub async fn delete_conversation(
647            &mut self,
648            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
649        ) -> std::result::Result<
650            tonic::Response<super::DeleteConversationResponse>,
651            tonic::Status,
652        > {
653            self.inner
654                .ready()
655                .await
656                .map_err(|e| {
657                    tonic::Status::unknown(
658                        format!("Service was not ready: {}", e.into()),
659                    )
660                })?;
661            let codec = tonic::codec::ProstCodec::default();
662            let path = http::uri::PathAndQuery::from_static(
663                "/nominal.ai.v1.WorkbookAgentService/DeleteConversation",
664            );
665            let mut req = request.into_request();
666            req.extensions_mut()
667                .insert(
668                    GrpcMethod::new(
669                        "nominal.ai.v1.WorkbookAgentService",
670                        "DeleteConversation",
671                    ),
672                );
673            self.inner.unary(req, path, codec).await
674        }
675    }
676}
677#[derive(Clone, Copy, PartialEq, ::prost::Message)]
678pub struct IsAiEnabledForUserRequest {}
679#[derive(Clone, Copy, PartialEq, ::prost::Message)]
680pub struct IsAiEnabledForUserResponse {
681    #[prost(bool, tag = "1")]
682    pub is_enabled: bool,
683}
684/// Generated client implementations.
685pub mod ai_features_service_client {
686    #![allow(
687        unused_variables,
688        dead_code,
689        missing_docs,
690        clippy::wildcard_imports,
691        clippy::let_unit_value,
692    )]
693    use tonic::codegen::*;
694    use tonic::codegen::http::Uri;
695    /// AIFeaturesService provides information about enabled AI features
696    #[derive(Debug, Clone)]
697    pub struct AiFeaturesServiceClient<T> {
698        inner: tonic::client::Grpc<T>,
699    }
700    impl AiFeaturesServiceClient<tonic::transport::Channel> {
701        /// Attempt to create a new client by connecting to a given endpoint.
702        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
703        where
704            D: TryInto<tonic::transport::Endpoint>,
705            D::Error: Into<StdError>,
706        {
707            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
708            Ok(Self::new(conn))
709        }
710    }
711    impl<T> AiFeaturesServiceClient<T>
712    where
713        T: tonic::client::GrpcService<tonic::body::Body>,
714        T::Error: Into<StdError>,
715        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
716        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
717    {
718        pub fn new(inner: T) -> Self {
719            let inner = tonic::client::Grpc::new(inner);
720            Self { inner }
721        }
722        pub fn with_origin(inner: T, origin: Uri) -> Self {
723            let inner = tonic::client::Grpc::with_origin(inner, origin);
724            Self { inner }
725        }
726        pub fn with_interceptor<F>(
727            inner: T,
728            interceptor: F,
729        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
730        where
731            F: tonic::service::Interceptor,
732            T::ResponseBody: Default,
733            T: tonic::codegen::Service<
734                http::Request<tonic::body::Body>,
735                Response = http::Response<
736                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
737                >,
738            >,
739            <T as tonic::codegen::Service<
740                http::Request<tonic::body::Body>,
741            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
742        {
743            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
744        }
745        /// Compress requests with the given encoding.
746        ///
747        /// This requires the server to support it otherwise it might respond with an
748        /// error.
749        #[must_use]
750        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
751            self.inner = self.inner.send_compressed(encoding);
752            self
753        }
754        /// Enable decompressing responses.
755        #[must_use]
756        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
757            self.inner = self.inner.accept_compressed(encoding);
758            self
759        }
760        /// Limits the maximum size of a decoded message.
761        ///
762        /// Default: `4MB`
763        #[must_use]
764        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
765            self.inner = self.inner.max_decoding_message_size(limit);
766            self
767        }
768        /// Limits the maximum size of an encoded message.
769        ///
770        /// Default: `usize::MAX`
771        #[must_use]
772        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
773            self.inner = self.inner.max_encoding_message_size(limit);
774            self
775        }
776        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
777        pub async fn is_ai_enabled_for_user(
778            &mut self,
779            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
780        ) -> std::result::Result<
781            tonic::Response<super::IsAiEnabledForUserResponse>,
782            tonic::Status,
783        > {
784            self.inner
785                .ready()
786                .await
787                .map_err(|e| {
788                    tonic::Status::unknown(
789                        format!("Service was not ready: {}", e.into()),
790                    )
791                })?;
792            let codec = tonic::codec::ProstCodec::default();
793            let path = http::uri::PathAndQuery::from_static(
794                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
795            );
796            let mut req = request.into_request();
797            req.extensions_mut()
798                .insert(
799                    GrpcMethod::new(
800                        "nominal.ai.v1.AIFeaturesService",
801                        "IsAIEnabledForUser",
802                    ),
803                );
804            self.inner.unary(req, path, codec).await
805        }
806    }
807}
808/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
809/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
810#[derive(Clone, PartialEq, ::prost::Message)]
811pub struct CreateOrUpdateKnowledgeBaseRequest {
812    #[prost(string, tag = "1")]
813    pub attachment_rid: ::prost::alloc::string::String,
814    /// summary of the knowledge base, will be used by the LLM to decide when to use it
815    #[prost(string, tag = "2")]
816    pub summary_description: ::prost::alloc::string::String,
817    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
818    pub r#type: ::core::option::Option<i32>,
819}
820/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
821#[derive(Clone, PartialEq, ::prost::Message)]
822pub struct CreateOrUpdateKnowledgeBaseResponse {
823    #[prost(string, tag = "1")]
824    pub knowledge_base_rid: ::prost::alloc::string::String,
825}
826/// KnowledgeBase represents a knowledge base entry
827#[derive(Clone, PartialEq, ::prost::Message)]
828pub struct KnowledgeBase {
829    #[prost(string, tag = "1")]
830    pub knowledge_base_rid: ::prost::alloc::string::String,
831    #[prost(string, tag = "2")]
832    pub attachment_rid: ::prost::alloc::string::String,
833    #[prost(string, tag = "3")]
834    pub workspace_rid: ::prost::alloc::string::String,
835    #[prost(string, tag = "4")]
836    pub summary_description: ::prost::alloc::string::String,
837    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
838    pub r#type: i32,
839    #[prost(int32, tag = "6")]
840    pub version: i32,
841}
842#[derive(Clone, PartialEq, ::prost::Message)]
843pub struct ListRequest {
844    #[prost(string, tag = "1")]
845    pub workspace_rid: ::prost::alloc::string::String,
846}
847#[derive(Clone, PartialEq, ::prost::Message)]
848pub struct ListResponse {
849    #[prost(message, repeated, tag = "1")]
850    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
851}
852#[derive(Clone, PartialEq, ::prost::Message)]
853pub struct DeleteRequest {
854    #[prost(string, tag = "1")]
855    pub knowledge_base_rid: ::prost::alloc::string::String,
856}
857#[derive(Clone, Copy, PartialEq, ::prost::Message)]
858pub struct DeleteResponse {
859    #[prost(bool, tag = "1")]
860    pub success: bool,
861}
862#[derive(Clone, PartialEq, ::prost::Message)]
863pub struct GetBatchRequest {
864    #[prost(string, repeated, tag = "1")]
865    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
866}
867#[derive(Clone, PartialEq, ::prost::Message)]
868pub struct GetBatchResponse {
869    #[prost(message, repeated, tag = "1")]
870    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
871}
872/// generate summary description is intentionally going to return the generated description to the frontend
873/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
874#[derive(Clone, PartialEq, ::prost::Message)]
875pub struct GenerateSummaryDescriptionRequest {
876    #[prost(string, tag = "1")]
877    pub attachment_rid: ::prost::alloc::string::String,
878}
879#[derive(Clone, PartialEq, ::prost::Message)]
880pub struct GenerateSummaryDescriptionResponse {
881    #[prost(string, tag = "1")]
882    pub summary_description: ::prost::alloc::string::String,
883}
884/// KnowledgeBaseType defines the types of knowledge base
885#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
886#[repr(i32)]
887pub enum KnowledgeBaseType {
888    /// defaults to PROMPT
889    Unspecified = 0,
890    /// knowledge base gets added directly to prompt (needs to be small enough!)
891    Prompt = 1,
892    /// knowledge base gets used via vector search on embeddings
893    Embedding = 2,
894}
895impl KnowledgeBaseType {
896    /// String value of the enum field names used in the ProtoBuf definition.
897    ///
898    /// The values are not transformed in any way and thus are considered stable
899    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
900    pub fn as_str_name(&self) -> &'static str {
901        match self {
902            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
903            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
904            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
905        }
906    }
907    /// Creates an enum from field names used in the ProtoBuf definition.
908    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
909        match value {
910            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
911            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
912            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
913            _ => None,
914        }
915    }
916}
917/// Generated client implementations.
918pub mod knowledge_base_service_client {
919    #![allow(
920        unused_variables,
921        dead_code,
922        missing_docs,
923        clippy::wildcard_imports,
924        clippy::let_unit_value,
925    )]
926    use tonic::codegen::*;
927    use tonic::codegen::http::Uri;
928    /// KnowledgeBaseService provides AI-powered knowledge base management
929    #[derive(Debug, Clone)]
930    pub struct KnowledgeBaseServiceClient<T> {
931        inner: tonic::client::Grpc<T>,
932    }
933    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
934        /// Attempt to create a new client by connecting to a given endpoint.
935        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
936        where
937            D: TryInto<tonic::transport::Endpoint>,
938            D::Error: Into<StdError>,
939        {
940            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
941            Ok(Self::new(conn))
942        }
943    }
944    impl<T> KnowledgeBaseServiceClient<T>
945    where
946        T: tonic::client::GrpcService<tonic::body::Body>,
947        T::Error: Into<StdError>,
948        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
949        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
950    {
951        pub fn new(inner: T) -> Self {
952            let inner = tonic::client::Grpc::new(inner);
953            Self { inner }
954        }
955        pub fn with_origin(inner: T, origin: Uri) -> Self {
956            let inner = tonic::client::Grpc::with_origin(inner, origin);
957            Self { inner }
958        }
959        pub fn with_interceptor<F>(
960            inner: T,
961            interceptor: F,
962        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
963        where
964            F: tonic::service::Interceptor,
965            T::ResponseBody: Default,
966            T: tonic::codegen::Service<
967                http::Request<tonic::body::Body>,
968                Response = http::Response<
969                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
970                >,
971            >,
972            <T as tonic::codegen::Service<
973                http::Request<tonic::body::Body>,
974            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
975        {
976            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
977        }
978        /// Compress requests with the given encoding.
979        ///
980        /// This requires the server to support it otherwise it might respond with an
981        /// error.
982        #[must_use]
983        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
984            self.inner = self.inner.send_compressed(encoding);
985            self
986        }
987        /// Enable decompressing responses.
988        #[must_use]
989        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
990            self.inner = self.inner.accept_compressed(encoding);
991            self
992        }
993        /// Limits the maximum size of a decoded message.
994        ///
995        /// Default: `4MB`
996        #[must_use]
997        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
998            self.inner = self.inner.max_decoding_message_size(limit);
999            self
1000        }
1001        /// Limits the maximum size of an encoded message.
1002        ///
1003        /// Default: `usize::MAX`
1004        #[must_use]
1005        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1006            self.inner = self.inner.max_encoding_message_size(limit);
1007            self
1008        }
1009        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1010        pub async fn create_or_update_knowledge_base(
1011            &mut self,
1012            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1013        ) -> std::result::Result<
1014            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1015            tonic::Status,
1016        > {
1017            self.inner
1018                .ready()
1019                .await
1020                .map_err(|e| {
1021                    tonic::Status::unknown(
1022                        format!("Service was not ready: {}", e.into()),
1023                    )
1024                })?;
1025            let codec = tonic::codec::ProstCodec::default();
1026            let path = http::uri::PathAndQuery::from_static(
1027                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1028            );
1029            let mut req = request.into_request();
1030            req.extensions_mut()
1031                .insert(
1032                    GrpcMethod::new(
1033                        "nominal.ai.v1.KnowledgeBaseService",
1034                        "CreateOrUpdateKnowledgeBase",
1035                    ),
1036                );
1037            self.inner.unary(req, path, codec).await
1038        }
1039        /// List returns all knowledge bases in the specified workspace
1040        pub async fn list(
1041            &mut self,
1042            request: impl tonic::IntoRequest<super::ListRequest>,
1043        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1044            self.inner
1045                .ready()
1046                .await
1047                .map_err(|e| {
1048                    tonic::Status::unknown(
1049                        format!("Service was not ready: {}", e.into()),
1050                    )
1051                })?;
1052            let codec = tonic::codec::ProstCodec::default();
1053            let path = http::uri::PathAndQuery::from_static(
1054                "/nominal.ai.v1.KnowledgeBaseService/List",
1055            );
1056            let mut req = request.into_request();
1057            req.extensions_mut()
1058                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1059            self.inner.unary(req, path, codec).await
1060        }
1061        /// Delete removes a knowledge base by its RID
1062        pub async fn delete(
1063            &mut self,
1064            request: impl tonic::IntoRequest<super::DeleteRequest>,
1065        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1066            self.inner
1067                .ready()
1068                .await
1069                .map_err(|e| {
1070                    tonic::Status::unknown(
1071                        format!("Service was not ready: {}", e.into()),
1072                    )
1073                })?;
1074            let codec = tonic::codec::ProstCodec::default();
1075            let path = http::uri::PathAndQuery::from_static(
1076                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1077            );
1078            let mut req = request.into_request();
1079            req.extensions_mut()
1080                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1081            self.inner.unary(req, path, codec).await
1082        }
1083        /// GetBatch retrieves multiple knowledge bases by their RIDs
1084        pub async fn get_batch(
1085            &mut self,
1086            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1087        ) -> std::result::Result<
1088            tonic::Response<super::GetBatchResponse>,
1089            tonic::Status,
1090        > {
1091            self.inner
1092                .ready()
1093                .await
1094                .map_err(|e| {
1095                    tonic::Status::unknown(
1096                        format!("Service was not ready: {}", e.into()),
1097                    )
1098                })?;
1099            let codec = tonic::codec::ProstCodec::default();
1100            let path = http::uri::PathAndQuery::from_static(
1101                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1102            );
1103            let mut req = request.into_request();
1104            req.extensions_mut()
1105                .insert(
1106                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1107                );
1108            self.inner.unary(req, path, codec).await
1109        }
1110        /// GenerateSummaryDescription generates a summary description for an attachment rid
1111        pub async fn generate_summary_description(
1112            &mut self,
1113            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1114        ) -> std::result::Result<
1115            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1116            tonic::Status,
1117        > {
1118            self.inner
1119                .ready()
1120                .await
1121                .map_err(|e| {
1122                    tonic::Status::unknown(
1123                        format!("Service was not ready: {}", e.into()),
1124                    )
1125                })?;
1126            let codec = tonic::codec::ProstCodec::default();
1127            let path = http::uri::PathAndQuery::from_static(
1128                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1129            );
1130            let mut req = request.into_request();
1131            req.extensions_mut()
1132                .insert(
1133                    GrpcMethod::new(
1134                        "nominal.ai.v1.KnowledgeBaseService",
1135                        "GenerateSummaryDescription",
1136                    ),
1137                );
1138            self.inner.unary(req, path, codec).await
1139        }
1140    }
1141}