nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for workbook AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    #[prost(message, repeated, tag = "1")]
6    pub messages: ::prost::alloc::vec::Vec<ModelMessage>,
7    /// JSON-serialized representation of INotebook
8    #[prost(string, optional, tag = "2")]
9    pub notebook_as_json: ::core::option::Option<::prost::alloc::string::String>,
10    /// The current tab visible in the workbook from the user's perspective
11    #[prost(int32, optional, tag = "3")]
12    pub selected_tab_index: ::core::option::Option<i32>,
13    /// Optional: image files to provide to the agent
14    #[prost(message, repeated, tag = "4")]
15    pub images: ::prost::alloc::vec::Vec<ImagePart>,
16    /// Time range for the tab that is currently visible to the user
17    #[prost(message, optional, tag = "5")]
18    pub range: ::core::option::Option<TimeRange>,
19    /// V2 conversation API persists the message and any assistant responses to storage under the provided
20    /// conversation_id. if id does not exist in the database, then a new conversation is started from this message.
21    #[prost(message, optional, tag = "6")]
22    pub message: ::core::option::Option<AppendMessage>,
23}
24/// This will append a message to an existing conversation
25/// A non existent conversation id will raise an error
26#[derive(Clone, PartialEq, ::prost::Message)]
27pub struct AppendMessage {
28    #[prost(message, optional, tag = "1")]
29    pub message: ::core::option::Option<UserModelMessage>,
30    #[prost(string, tag = "2")]
31    pub conversation_rid: ::prost::alloc::string::String,
32}
33/// CreateConversation request will create a new conversation thread
34/// if old conversation id is not set, a brand new, clear chat is created
35/// If old conversation id is set without a previous message id, the full conversation thread will be copied
36/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
37/// the above case is useful for branching a conversation into a new thread
38#[derive(Clone, PartialEq, ::prost::Message)]
39pub struct CreateConversationRequest {
40    #[prost(string, tag = "1")]
41    pub title: ::prost::alloc::string::String,
42    #[prost(string, tag = "2")]
43    pub workspace_rid: ::prost::alloc::string::String,
44    #[prost(string, optional, tag = "3")]
45    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
46    #[prost(string, optional, tag = "4")]
47    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
48}
49/// CreateConversationResponse will return the conversation id for the new conversation
50#[derive(Clone, PartialEq, ::prost::Message)]
51pub struct CreateConversationResponse {
52    #[prost(string, tag = "1")]
53    pub new_conversation_rid: ::prost::alloc::string::String,
54}
55#[derive(Clone, PartialEq, ::prost::Message)]
56pub struct UpdateConversationMetadataRequest {
57    #[prost(string, tag = "1")]
58    pub title: ::prost::alloc::string::String,
59    #[prost(string, tag = "2")]
60    pub conversation_rid: ::prost::alloc::string::String,
61}
62#[derive(Clone, Copy, PartialEq, ::prost::Message)]
63pub struct UpdateConversationMetadataResponse {}
64#[derive(Clone, PartialEq, ::prost::Message)]
65pub struct DeleteConversationRequest {
66    #[prost(string, tag = "1")]
67    pub conversation_rid: ::prost::alloc::string::String,
68}
69#[derive(Clone, Copy, PartialEq, ::prost::Message)]
70pub struct DeleteConversationResponse {}
71/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
72/// by provided rid. To start from a particular message - you can also provide a message id.
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct GetConversationRequest {
75    #[prost(string, tag = "1")]
76    pub conversation_rid: ::prost::alloc::string::String,
77    #[prost(string, optional, tag = "2")]
78    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
79    #[prost(int32, optional, tag = "3")]
80    pub max_message_count: ::core::option::Option<i32>,
81}
82/// Model message with id allows you to identify the message ID of a given message
83#[derive(Clone, PartialEq, ::prost::Message)]
84pub struct ModelMessageWithId {
85    #[prost(string, tag = "3")]
86    pub message_id: ::prost::alloc::string::String,
87    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
88    pub content: ::core::option::Option<model_message_with_id::Content>,
89}
90/// Nested message and enum types in `ModelMessageWithId`.
91pub mod model_message_with_id {
92    #[derive(Clone, PartialEq, ::prost::Oneof)]
93    pub enum Content {
94        #[prost(message, tag = "1")]
95        Message(super::ModelMessage),
96        #[prost(message, tag = "2")]
97        ToolAction(super::ToolAction),
98    }
99}
100#[derive(Clone, PartialEq, ::prost::Message)]
101pub struct GetConversationResponse {
102    #[prost(message, repeated, tag = "1")]
103    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
104}
105/// Will generate all conversation threads that this user has in this workspace
106#[derive(Clone, PartialEq, ::prost::Message)]
107pub struct ListConversationsRequest {
108    #[prost(string, tag = "1")]
109    pub workspace_rid: ::prost::alloc::string::String,
110}
111#[derive(Clone, PartialEq, ::prost::Message)]
112pub struct ConversationMetadata {
113    #[prost(string, tag = "1")]
114    pub conversation_rid: ::prost::alloc::string::String,
115    #[prost(string, tag = "2")]
116    pub title: ::prost::alloc::string::String,
117    #[prost(message, optional, tag = "3")]
118    pub created_at: ::core::option::Option<
119        super::super::super::google::protobuf::Timestamp,
120    >,
121    #[prost(message, optional, tag = "4")]
122    pub last_updated_at: ::core::option::Option<
123        super::super::super::google::protobuf::Timestamp,
124    >,
125}
126/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
127/// to get a full conversation from storage. These are ordered by creation time.
128#[derive(Clone, PartialEq, ::prost::Message)]
129pub struct ListConversationsResponse {
130    #[prost(message, repeated, tag = "1")]
131    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
132}
133#[derive(Clone, Copy, PartialEq, ::prost::Message)]
134pub struct TimeRange {
135    #[prost(message, optional, tag = "1")]
136    pub range_start: ::core::option::Option<Timestamp>,
137    #[prost(message, optional, tag = "2")]
138    pub range_end: ::core::option::Option<Timestamp>,
139}
140#[derive(Clone, Copy, PartialEq, ::prost::Message)]
141pub struct Timestamp {
142    #[prost(int32, tag = "1")]
143    pub seconds: i32,
144    #[prost(int32, tag = "2")]
145    pub nanoseconds: i32,
146}
147/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
148/// Each message type has its own structure and content.
149#[derive(Clone, PartialEq, ::prost::Message)]
150pub struct ModelMessage {
151    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
152    pub kind: ::core::option::Option<model_message::Kind>,
153}
154/// Nested message and enum types in `ModelMessage`.
155pub mod model_message {
156    #[derive(Clone, PartialEq, ::prost::Oneof)]
157    pub enum Kind {
158        #[prost(message, tag = "1")]
159        User(super::UserModelMessage),
160        #[prost(message, tag = "2")]
161        Assistant(super::AssistantModelMessage),
162    }
163}
164/// A user message containing text
165#[derive(Clone, PartialEq, ::prost::Message)]
166pub struct UserModelMessage {
167    #[prost(message, repeated, tag = "1")]
168    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
169}
170/// An assistant message containing text
171#[derive(Clone, PartialEq, ::prost::Message)]
172pub struct AssistantModelMessage {
173    #[prost(message, repeated, tag = "1")]
174    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
175}
176#[derive(Clone, PartialEq, ::prost::Message)]
177pub struct UserContentPart {
178    #[prost(oneof = "user_content_part::Part", tags = "1")]
179    pub part: ::core::option::Option<user_content_part::Part>,
180}
181/// Nested message and enum types in `UserContentPart`.
182pub mod user_content_part {
183    #[derive(Clone, PartialEq, ::prost::Oneof)]
184    pub enum Part {
185        #[prost(message, tag = "1")]
186        Text(super::TextPart),
187    }
188}
189/// Content part for assistant messages: can be text, reasoning, or mutation.
190#[derive(Clone, PartialEq, ::prost::Message)]
191pub struct AssistantContentPart {
192    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
193    pub part: ::core::option::Option<assistant_content_part::Part>,
194}
195/// Nested message and enum types in `AssistantContentPart`.
196pub mod assistant_content_part {
197    #[derive(Clone, PartialEq, ::prost::Oneof)]
198    pub enum Part {
199        #[prost(message, tag = "1")]
200        Text(super::TextPart),
201        #[prost(message, tag = "2")]
202        Reasoning(super::ReasoningPart),
203    }
204}
205/// Text part for user or assistant messages.
206#[derive(Clone, PartialEq, ::prost::Message)]
207pub struct TextPart {
208    #[prost(string, tag = "1")]
209    pub text: ::prost::alloc::string::String,
210}
211/// User-supplied image part.
212#[derive(Clone, PartialEq, ::prost::Message)]
213pub struct ImagePart {
214    /// The base64-encoded image data
215    #[prost(bytes = "vec", tag = "1")]
216    pub data: ::prost::alloc::vec::Vec<u8>,
217    /// The media type of the image (e.g. "image/png", "image/jpeg")
218    #[prost(string, optional, tag = "2")]
219    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
220    /// Optional: the filename of the image
221    #[prost(string, optional, tag = "3")]
222    pub filename: ::core::option::Option<::prost::alloc::string::String>,
223}
224/// Reasoning part for assistant messages.
225#[derive(Clone, PartialEq, ::prost::Message)]
226pub struct ReasoningPart {
227    #[prost(string, tag = "1")]
228    pub reasoning: ::prost::alloc::string::String,
229}
230/// StreamChatResponse is a discriminated union response to a StreamChatRequest
231#[derive(Clone, PartialEq, ::prost::Message)]
232pub struct StreamChatResponse {
233    #[prost(
234        oneof = "stream_chat_response::Response",
235        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
236    )]
237    pub response: ::core::option::Option<stream_chat_response::Response>,
238}
239/// Nested message and enum types in `StreamChatResponse`.
240pub mod stream_chat_response {
241    #[derive(Clone, PartialEq, ::prost::Oneof)]
242    pub enum Response {
243        #[prost(message, tag = "1")]
244        Finish(super::Finish),
245        #[prost(message, tag = "2")]
246        Error(super::Error),
247        #[prost(message, tag = "3")]
248        TextStart(super::TextStart),
249        #[prost(message, tag = "4")]
250        TextDelta(super::TextDelta),
251        #[prost(message, tag = "5")]
252        TextEnd(super::TextEnd),
253        #[prost(message, tag = "6")]
254        ReasoningStart(super::ReasoningStart),
255        #[prost(message, tag = "7")]
256        ReasoningDelta(super::ReasoningDelta),
257        #[prost(message, tag = "8")]
258        ReasoningEnd(super::ReasoningEnd),
259        #[prost(message, tag = "9")]
260        WorkbookMutation(super::WorkbookMutation),
261        #[prost(message, tag = "10")]
262        ToolAction(super::ToolAction),
263    }
264}
265/// Indicates the end of a chat session
266#[derive(Clone, PartialEq, ::prost::Message)]
267pub struct Finish {
268    /// The message ids in order of all generated messages for this agent run
269    /// These ids can be used to branch a message from that specific message
270    #[prost(string, repeated, tag = "1")]
271    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
272}
273/// An error that occurred during the chat session
274#[derive(Clone, PartialEq, ::prost::Message)]
275pub struct Error {
276    #[prost(string, tag = "1")]
277    pub message: ::prost::alloc::string::String,
278}
279/// Indicates the start of a text message from the agent
280#[derive(Clone, PartialEq, ::prost::Message)]
281pub struct TextStart {
282    /// uniquely identifies the text message (e.g. uuid) so that the client can
283    /// merge parallel message streams (if it happens).
284    #[prost(string, tag = "1")]
285    pub id: ::prost::alloc::string::String,
286}
287/// A delta (continuation) of a text message from the agent
288#[derive(Clone, PartialEq, ::prost::Message)]
289pub struct TextDelta {
290    #[prost(string, tag = "1")]
291    pub id: ::prost::alloc::string::String,
292    /// The next chunk of text
293    #[prost(string, tag = "2")]
294    pub delta: ::prost::alloc::string::String,
295}
296/// Indicates the end of a text message from the agent
297#[derive(Clone, PartialEq, ::prost::Message)]
298pub struct TextEnd {
299    #[prost(string, tag = "1")]
300    pub id: ::prost::alloc::string::String,
301}
302/// Indicates the start of a reasoning message from the agent
303#[derive(Clone, PartialEq, ::prost::Message)]
304pub struct ReasoningStart {
305    #[prost(string, tag = "1")]
306    pub id: ::prost::alloc::string::String,
307}
308/// A delta (continuation) of a reasoning message from the agent
309#[derive(Clone, PartialEq, ::prost::Message)]
310pub struct ReasoningDelta {
311    #[prost(string, tag = "1")]
312    pub id: ::prost::alloc::string::String,
313    /// The next chunk of reasoning
314    #[prost(string, tag = "2")]
315    pub delta: ::prost::alloc::string::String,
316}
317/// Indicates the end of a reasoning message from the agent
318#[derive(Clone, PartialEq, ::prost::Message)]
319pub struct ReasoningEnd {
320    #[prost(string, tag = "1")]
321    pub id: ::prost::alloc::string::String,
322}
323/// Add a new tab to the workbook
324#[derive(Clone, PartialEq, ::prost::Message)]
325pub struct AddTabMutation {
326    /// if tab_name is not provided, we'll name it "New Tab"
327    #[prost(string, optional, tag = "1")]
328    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
329}
330/// this is the "result" of the mutation
331#[derive(Clone, PartialEq, ::prost::Message)]
332pub struct AddOrUpdatePanelMutation {
333    /// JSON-serialized representation of IVizDefinition
334    #[prost(string, tag = "1")]
335    pub panel_as_json: ::prost::alloc::string::String,
336    #[prost(string, tag = "2")]
337    pub panel_id: ::prost::alloc::string::String,
338    #[prost(int32, tag = "3")]
339    pub tab_index: i32,
340}
341#[derive(Clone, PartialEq, ::prost::Message)]
342pub struct RemovePanelsMutation {
343    #[prost(string, repeated, tag = "1")]
344    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
345}
346/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
347#[derive(Clone, PartialEq, ::prost::Message)]
348pub struct AddOrReplaceVariableMutation {
349    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
350    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
351    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
352    #[prost(string, tag = "1")]
353    pub compute_spec_as_json: ::prost::alloc::string::String,
354    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
355    #[prost(string, optional, tag = "2")]
356    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
357    #[prost(string, optional, tag = "3")]
358    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
359}
360/// DeleteVariablesMutation is a mutation to delete variables from the workbook
361#[derive(Clone, PartialEq, ::prost::Message)]
362pub struct DeleteVariablesMutation {
363    #[prost(string, repeated, tag = "1")]
364    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
365}
366/// WorkbookMutation is a mutation to the workbook
367#[derive(Clone, PartialEq, ::prost::Message)]
368pub struct WorkbookMutation {
369    #[prost(string, tag = "1")]
370    pub id: ::prost::alloc::string::String,
371    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
372    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
373}
374/// Nested message and enum types in `WorkbookMutation`.
375pub mod workbook_mutation {
376    #[derive(Clone, PartialEq, ::prost::Oneof)]
377    pub enum Mutation {
378        #[prost(message, tag = "2")]
379        AddTab(super::AddTabMutation),
380        #[prost(message, tag = "3")]
381        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
382        #[prost(message, tag = "4")]
383        RemovePanels(super::RemovePanelsMutation),
384        #[prost(message, tag = "5")]
385        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
386        #[prost(message, tag = "6")]
387        DeleteVariables(super::DeleteVariablesMutation),
388    }
389}
390/// this is a concise descirption of a tool call that the agent is making internally
391/// without revealing too much detail about the tool call, it informs the user what the agent is doing
392/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
393/// "Search channels for My Datasource"
394#[derive(Clone, PartialEq, ::prost::Message)]
395pub struct ToolAction {
396    #[prost(string, tag = "1")]
397    pub id: ::prost::alloc::string::String,
398    /// "Thought", "Read", "Find", "Look-up", etc.
399    #[prost(string, tag = "2")]
400    pub tool_action_verb: ::prost::alloc::string::String,
401    /// "workbook", "channel", "variable", "panel", etc.
402    #[prost(string, optional, tag = "3")]
403    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
404}
405/// Generated client implementations.
406pub mod workbook_agent_service_client {
407    #![allow(
408        unused_variables,
409        dead_code,
410        missing_docs,
411        clippy::wildcard_imports,
412        clippy::let_unit_value,
413    )]
414    use tonic::codegen::*;
415    use tonic::codegen::http::Uri;
416    /// WorkbookAgentService provides AI-powered assistance for workbook operations
417    #[derive(Debug, Clone)]
418    pub struct WorkbookAgentServiceClient<T> {
419        inner: tonic::client::Grpc<T>,
420    }
421    impl WorkbookAgentServiceClient<tonic::transport::Channel> {
422        /// Attempt to create a new client by connecting to a given endpoint.
423        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
424        where
425            D: TryInto<tonic::transport::Endpoint>,
426            D::Error: Into<StdError>,
427        {
428            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
429            Ok(Self::new(conn))
430        }
431    }
432    impl<T> WorkbookAgentServiceClient<T>
433    where
434        T: tonic::client::GrpcService<tonic::body::Body>,
435        T::Error: Into<StdError>,
436        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
437        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
438    {
439        pub fn new(inner: T) -> Self {
440            let inner = tonic::client::Grpc::new(inner);
441            Self { inner }
442        }
443        pub fn with_origin(inner: T, origin: Uri) -> Self {
444            let inner = tonic::client::Grpc::with_origin(inner, origin);
445            Self { inner }
446        }
447        pub fn with_interceptor<F>(
448            inner: T,
449            interceptor: F,
450        ) -> WorkbookAgentServiceClient<InterceptedService<T, F>>
451        where
452            F: tonic::service::Interceptor,
453            T::ResponseBody: Default,
454            T: tonic::codegen::Service<
455                http::Request<tonic::body::Body>,
456                Response = http::Response<
457                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
458                >,
459            >,
460            <T as tonic::codegen::Service<
461                http::Request<tonic::body::Body>,
462            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
463        {
464            WorkbookAgentServiceClient::new(InterceptedService::new(inner, interceptor))
465        }
466        /// Compress requests with the given encoding.
467        ///
468        /// This requires the server to support it otherwise it might respond with an
469        /// error.
470        #[must_use]
471        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
472            self.inner = self.inner.send_compressed(encoding);
473            self
474        }
475        /// Enable decompressing responses.
476        #[must_use]
477        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
478            self.inner = self.inner.accept_compressed(encoding);
479            self
480        }
481        /// Limits the maximum size of a decoded message.
482        ///
483        /// Default: `4MB`
484        #[must_use]
485        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
486            self.inner = self.inner.max_decoding_message_size(limit);
487            self
488        }
489        /// Limits the maximum size of an encoded message.
490        ///
491        /// Default: `usize::MAX`
492        #[must_use]
493        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
494            self.inner = self.inner.max_encoding_message_size(limit);
495            self
496        }
497        /// StreamChat handles bidirectional streaming chat for workbook AI agent
498        pub async fn stream_chat(
499            &mut self,
500            request: impl tonic::IntoRequest<super::StreamChatRequest>,
501        ) -> std::result::Result<
502            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
503            tonic::Status,
504        > {
505            self.inner
506                .ready()
507                .await
508                .map_err(|e| {
509                    tonic::Status::unknown(
510                        format!("Service was not ready: {}", e.into()),
511                    )
512                })?;
513            let codec = tonic::codec::ProstCodec::default();
514            let path = http::uri::PathAndQuery::from_static(
515                "/nominal.ai.v1.WorkbookAgentService/StreamChat",
516            );
517            let mut req = request.into_request();
518            req.extensions_mut()
519                .insert(
520                    GrpcMethod::new("nominal.ai.v1.WorkbookAgentService", "StreamChat"),
521                );
522            self.inner.server_streaming(req, path, codec).await
523        }
524        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
525        pub async fn get_conversation(
526            &mut self,
527            request: impl tonic::IntoRequest<super::GetConversationRequest>,
528        ) -> std::result::Result<
529            tonic::Response<super::GetConversationResponse>,
530            tonic::Status,
531        > {
532            self.inner
533                .ready()
534                .await
535                .map_err(|e| {
536                    tonic::Status::unknown(
537                        format!("Service was not ready: {}", e.into()),
538                    )
539                })?;
540            let codec = tonic::codec::ProstCodec::default();
541            let path = http::uri::PathAndQuery::from_static(
542                "/nominal.ai.v1.WorkbookAgentService/GetConversation",
543            );
544            let mut req = request.into_request();
545            req.extensions_mut()
546                .insert(
547                    GrpcMethod::new(
548                        "nominal.ai.v1.WorkbookAgentService",
549                        "GetConversation",
550                    ),
551                );
552            self.inner.unary(req, path, codec).await
553        }
554        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
555        pub async fn list_conversations(
556            &mut self,
557            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
558        ) -> std::result::Result<
559            tonic::Response<super::ListConversationsResponse>,
560            tonic::Status,
561        > {
562            self.inner
563                .ready()
564                .await
565                .map_err(|e| {
566                    tonic::Status::unknown(
567                        format!("Service was not ready: {}", e.into()),
568                    )
569                })?;
570            let codec = tonic::codec::ProstCodec::default();
571            let path = http::uri::PathAndQuery::from_static(
572                "/nominal.ai.v1.WorkbookAgentService/ListConversations",
573            );
574            let mut req = request.into_request();
575            req.extensions_mut()
576                .insert(
577                    GrpcMethod::new(
578                        "nominal.ai.v1.WorkbookAgentService",
579                        "ListConversations",
580                    ),
581                );
582            self.inner.unary(req, path, codec).await
583        }
584        /// CreateConversation handles creating a conversation and assigning it a conversation rid
585        pub async fn create_conversation(
586            &mut self,
587            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
588        ) -> std::result::Result<
589            tonic::Response<super::CreateConversationResponse>,
590            tonic::Status,
591        > {
592            self.inner
593                .ready()
594                .await
595                .map_err(|e| {
596                    tonic::Status::unknown(
597                        format!("Service was not ready: {}", e.into()),
598                    )
599                })?;
600            let codec = tonic::codec::ProstCodec::default();
601            let path = http::uri::PathAndQuery::from_static(
602                "/nominal.ai.v1.WorkbookAgentService/CreateConversation",
603            );
604            let mut req = request.into_request();
605            req.extensions_mut()
606                .insert(
607                    GrpcMethod::new(
608                        "nominal.ai.v1.WorkbookAgentService",
609                        "CreateConversation",
610                    ),
611                );
612            self.inner.unary(req, path, codec).await
613        }
614        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
615        pub async fn update_conversation_metadata(
616            &mut self,
617            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
618        ) -> std::result::Result<
619            tonic::Response<super::UpdateConversationMetadataResponse>,
620            tonic::Status,
621        > {
622            self.inner
623                .ready()
624                .await
625                .map_err(|e| {
626                    tonic::Status::unknown(
627                        format!("Service was not ready: {}", e.into()),
628                    )
629                })?;
630            let codec = tonic::codec::ProstCodec::default();
631            let path = http::uri::PathAndQuery::from_static(
632                "/nominal.ai.v1.WorkbookAgentService/UpdateConversationMetadata",
633            );
634            let mut req = request.into_request();
635            req.extensions_mut()
636                .insert(
637                    GrpcMethod::new(
638                        "nominal.ai.v1.WorkbookAgentService",
639                        "UpdateConversationMetadata",
640                    ),
641                );
642            self.inner.unary(req, path, codec).await
643        }
644        /// DeleteConversation handles deleting a specific conversation by conversation rid
645        pub async fn delete_conversation(
646            &mut self,
647            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
648        ) -> std::result::Result<
649            tonic::Response<super::DeleteConversationResponse>,
650            tonic::Status,
651        > {
652            self.inner
653                .ready()
654                .await
655                .map_err(|e| {
656                    tonic::Status::unknown(
657                        format!("Service was not ready: {}", e.into()),
658                    )
659                })?;
660            let codec = tonic::codec::ProstCodec::default();
661            let path = http::uri::PathAndQuery::from_static(
662                "/nominal.ai.v1.WorkbookAgentService/DeleteConversation",
663            );
664            let mut req = request.into_request();
665            req.extensions_mut()
666                .insert(
667                    GrpcMethod::new(
668                        "nominal.ai.v1.WorkbookAgentService",
669                        "DeleteConversation",
670                    ),
671                );
672            self.inner.unary(req, path, codec).await
673        }
674    }
675}
676#[derive(Clone, Copy, PartialEq, ::prost::Message)]
677pub struct IsAiEnabledForUserRequest {}
678#[derive(Clone, Copy, PartialEq, ::prost::Message)]
679pub struct IsAiEnabledForUserResponse {
680    #[prost(bool, tag = "1")]
681    pub is_enabled: bool,
682}
683/// Generated client implementations.
684pub mod ai_features_service_client {
685    #![allow(
686        unused_variables,
687        dead_code,
688        missing_docs,
689        clippy::wildcard_imports,
690        clippy::let_unit_value,
691    )]
692    use tonic::codegen::*;
693    use tonic::codegen::http::Uri;
694    /// AIFeaturesService provides information about enabled AI features
695    #[derive(Debug, Clone)]
696    pub struct AiFeaturesServiceClient<T> {
697        inner: tonic::client::Grpc<T>,
698    }
699    impl AiFeaturesServiceClient<tonic::transport::Channel> {
700        /// Attempt to create a new client by connecting to a given endpoint.
701        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
702        where
703            D: TryInto<tonic::transport::Endpoint>,
704            D::Error: Into<StdError>,
705        {
706            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
707            Ok(Self::new(conn))
708        }
709    }
710    impl<T> AiFeaturesServiceClient<T>
711    where
712        T: tonic::client::GrpcService<tonic::body::Body>,
713        T::Error: Into<StdError>,
714        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
715        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
716    {
717        pub fn new(inner: T) -> Self {
718            let inner = tonic::client::Grpc::new(inner);
719            Self { inner }
720        }
721        pub fn with_origin(inner: T, origin: Uri) -> Self {
722            let inner = tonic::client::Grpc::with_origin(inner, origin);
723            Self { inner }
724        }
725        pub fn with_interceptor<F>(
726            inner: T,
727            interceptor: F,
728        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
729        where
730            F: tonic::service::Interceptor,
731            T::ResponseBody: Default,
732            T: tonic::codegen::Service<
733                http::Request<tonic::body::Body>,
734                Response = http::Response<
735                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
736                >,
737            >,
738            <T as tonic::codegen::Service<
739                http::Request<tonic::body::Body>,
740            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
741        {
742            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
743        }
744        /// Compress requests with the given encoding.
745        ///
746        /// This requires the server to support it otherwise it might respond with an
747        /// error.
748        #[must_use]
749        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
750            self.inner = self.inner.send_compressed(encoding);
751            self
752        }
753        /// Enable decompressing responses.
754        #[must_use]
755        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
756            self.inner = self.inner.accept_compressed(encoding);
757            self
758        }
759        /// Limits the maximum size of a decoded message.
760        ///
761        /// Default: `4MB`
762        #[must_use]
763        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
764            self.inner = self.inner.max_decoding_message_size(limit);
765            self
766        }
767        /// Limits the maximum size of an encoded message.
768        ///
769        /// Default: `usize::MAX`
770        #[must_use]
771        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
772            self.inner = self.inner.max_encoding_message_size(limit);
773            self
774        }
775        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
776        pub async fn is_ai_enabled_for_user(
777            &mut self,
778            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
779        ) -> std::result::Result<
780            tonic::Response<super::IsAiEnabledForUserResponse>,
781            tonic::Status,
782        > {
783            self.inner
784                .ready()
785                .await
786                .map_err(|e| {
787                    tonic::Status::unknown(
788                        format!("Service was not ready: {}", e.into()),
789                    )
790                })?;
791            let codec = tonic::codec::ProstCodec::default();
792            let path = http::uri::PathAndQuery::from_static(
793                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
794            );
795            let mut req = request.into_request();
796            req.extensions_mut()
797                .insert(
798                    GrpcMethod::new(
799                        "nominal.ai.v1.AIFeaturesService",
800                        "IsAIEnabledForUser",
801                    ),
802                );
803            self.inner.unary(req, path, codec).await
804        }
805    }
806}
807/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
808/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
809#[derive(Clone, PartialEq, ::prost::Message)]
810pub struct CreateOrUpdateKnowledgeBaseRequest {
811    #[prost(string, tag = "1")]
812    pub attachment_rid: ::prost::alloc::string::String,
813    /// summary of the knowledge base, will be used by the LLM to decide when to use it
814    #[prost(string, tag = "2")]
815    pub summary_description: ::prost::alloc::string::String,
816    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
817    pub r#type: ::core::option::Option<i32>,
818}
819/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
820#[derive(Clone, PartialEq, ::prost::Message)]
821pub struct CreateOrUpdateKnowledgeBaseResponse {
822    #[prost(string, tag = "1")]
823    pub knowledge_base_rid: ::prost::alloc::string::String,
824}
825/// KnowledgeBase represents a knowledge base entry
826#[derive(Clone, PartialEq, ::prost::Message)]
827pub struct KnowledgeBase {
828    #[prost(string, tag = "1")]
829    pub knowledge_base_rid: ::prost::alloc::string::String,
830    #[prost(string, tag = "2")]
831    pub attachment_rid: ::prost::alloc::string::String,
832    #[prost(string, tag = "3")]
833    pub workspace_rid: ::prost::alloc::string::String,
834    #[prost(string, tag = "4")]
835    pub summary_description: ::prost::alloc::string::String,
836    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
837    pub r#type: i32,
838    #[prost(int32, tag = "6")]
839    pub version: i32,
840}
841#[derive(Clone, PartialEq, ::prost::Message)]
842pub struct ListRequest {
843    #[prost(string, tag = "1")]
844    pub workspace_rid: ::prost::alloc::string::String,
845}
846#[derive(Clone, PartialEq, ::prost::Message)]
847pub struct ListResponse {
848    #[prost(message, repeated, tag = "1")]
849    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
850}
851#[derive(Clone, PartialEq, ::prost::Message)]
852pub struct DeleteRequest {
853    #[prost(string, tag = "1")]
854    pub knowledge_base_rid: ::prost::alloc::string::String,
855}
856#[derive(Clone, Copy, PartialEq, ::prost::Message)]
857pub struct DeleteResponse {
858    #[prost(bool, tag = "1")]
859    pub success: bool,
860}
861#[derive(Clone, PartialEq, ::prost::Message)]
862pub struct GetBatchRequest {
863    #[prost(string, repeated, tag = "1")]
864    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
865}
866#[derive(Clone, PartialEq, ::prost::Message)]
867pub struct GetBatchResponse {
868    #[prost(message, repeated, tag = "1")]
869    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
870}
871/// generate summary description is intentionally going to return the generated description to the frontend
872/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
873#[derive(Clone, PartialEq, ::prost::Message)]
874pub struct GenerateSummaryDescriptionRequest {
875    #[prost(string, tag = "1")]
876    pub attachment_rid: ::prost::alloc::string::String,
877}
878#[derive(Clone, PartialEq, ::prost::Message)]
879pub struct GenerateSummaryDescriptionResponse {
880    #[prost(string, tag = "1")]
881    pub summary_description: ::prost::alloc::string::String,
882}
883/// KnowledgeBaseType defines the types of knowledge base
884#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
885#[repr(i32)]
886pub enum KnowledgeBaseType {
887    /// defaults to PROMPT
888    Unspecified = 0,
889    /// knowledge base gets added directly to prompt (needs to be small enough!)
890    Prompt = 1,
891    /// knowledge base gets used via vector search on embeddings
892    Embedding = 2,
893}
894impl KnowledgeBaseType {
895    /// String value of the enum field names used in the ProtoBuf definition.
896    ///
897    /// The values are not transformed in any way and thus are considered stable
898    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
899    pub fn as_str_name(&self) -> &'static str {
900        match self {
901            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
902            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
903            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
904        }
905    }
906    /// Creates an enum from field names used in the ProtoBuf definition.
907    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
908        match value {
909            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
910            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
911            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
912            _ => None,
913        }
914    }
915}
916/// Generated client implementations.
917pub mod knowledge_base_service_client {
918    #![allow(
919        unused_variables,
920        dead_code,
921        missing_docs,
922        clippy::wildcard_imports,
923        clippy::let_unit_value,
924    )]
925    use tonic::codegen::*;
926    use tonic::codegen::http::Uri;
927    /// KnowledgeBaseService provides AI-powered knowledge base management
928    #[derive(Debug, Clone)]
929    pub struct KnowledgeBaseServiceClient<T> {
930        inner: tonic::client::Grpc<T>,
931    }
932    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
933        /// Attempt to create a new client by connecting to a given endpoint.
934        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
935        where
936            D: TryInto<tonic::transport::Endpoint>,
937            D::Error: Into<StdError>,
938        {
939            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
940            Ok(Self::new(conn))
941        }
942    }
943    impl<T> KnowledgeBaseServiceClient<T>
944    where
945        T: tonic::client::GrpcService<tonic::body::Body>,
946        T::Error: Into<StdError>,
947        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
948        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
949    {
950        pub fn new(inner: T) -> Self {
951            let inner = tonic::client::Grpc::new(inner);
952            Self { inner }
953        }
954        pub fn with_origin(inner: T, origin: Uri) -> Self {
955            let inner = tonic::client::Grpc::with_origin(inner, origin);
956            Self { inner }
957        }
958        pub fn with_interceptor<F>(
959            inner: T,
960            interceptor: F,
961        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
962        where
963            F: tonic::service::Interceptor,
964            T::ResponseBody: Default,
965            T: tonic::codegen::Service<
966                http::Request<tonic::body::Body>,
967                Response = http::Response<
968                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
969                >,
970            >,
971            <T as tonic::codegen::Service<
972                http::Request<tonic::body::Body>,
973            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
974        {
975            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
976        }
977        /// Compress requests with the given encoding.
978        ///
979        /// This requires the server to support it otherwise it might respond with an
980        /// error.
981        #[must_use]
982        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
983            self.inner = self.inner.send_compressed(encoding);
984            self
985        }
986        /// Enable decompressing responses.
987        #[must_use]
988        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
989            self.inner = self.inner.accept_compressed(encoding);
990            self
991        }
992        /// Limits the maximum size of a decoded message.
993        ///
994        /// Default: `4MB`
995        #[must_use]
996        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
997            self.inner = self.inner.max_decoding_message_size(limit);
998            self
999        }
1000        /// Limits the maximum size of an encoded message.
1001        ///
1002        /// Default: `usize::MAX`
1003        #[must_use]
1004        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1005            self.inner = self.inner.max_encoding_message_size(limit);
1006            self
1007        }
1008        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1009        pub async fn create_or_update_knowledge_base(
1010            &mut self,
1011            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1012        ) -> std::result::Result<
1013            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1014            tonic::Status,
1015        > {
1016            self.inner
1017                .ready()
1018                .await
1019                .map_err(|e| {
1020                    tonic::Status::unknown(
1021                        format!("Service was not ready: {}", e.into()),
1022                    )
1023                })?;
1024            let codec = tonic::codec::ProstCodec::default();
1025            let path = http::uri::PathAndQuery::from_static(
1026                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1027            );
1028            let mut req = request.into_request();
1029            req.extensions_mut()
1030                .insert(
1031                    GrpcMethod::new(
1032                        "nominal.ai.v1.KnowledgeBaseService",
1033                        "CreateOrUpdateKnowledgeBase",
1034                    ),
1035                );
1036            self.inner.unary(req, path, codec).await
1037        }
1038        /// List returns all knowledge bases in the specified workspace
1039        pub async fn list(
1040            &mut self,
1041            request: impl tonic::IntoRequest<super::ListRequest>,
1042        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1043            self.inner
1044                .ready()
1045                .await
1046                .map_err(|e| {
1047                    tonic::Status::unknown(
1048                        format!("Service was not ready: {}", e.into()),
1049                    )
1050                })?;
1051            let codec = tonic::codec::ProstCodec::default();
1052            let path = http::uri::PathAndQuery::from_static(
1053                "/nominal.ai.v1.KnowledgeBaseService/List",
1054            );
1055            let mut req = request.into_request();
1056            req.extensions_mut()
1057                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1058            self.inner.unary(req, path, codec).await
1059        }
1060        /// Delete removes a knowledge base by its RID
1061        pub async fn delete(
1062            &mut self,
1063            request: impl tonic::IntoRequest<super::DeleteRequest>,
1064        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1065            self.inner
1066                .ready()
1067                .await
1068                .map_err(|e| {
1069                    tonic::Status::unknown(
1070                        format!("Service was not ready: {}", e.into()),
1071                    )
1072                })?;
1073            let codec = tonic::codec::ProstCodec::default();
1074            let path = http::uri::PathAndQuery::from_static(
1075                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1076            );
1077            let mut req = request.into_request();
1078            req.extensions_mut()
1079                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1080            self.inner.unary(req, path, codec).await
1081        }
1082        /// GetBatch retrieves multiple knowledge bases by their RIDs
1083        pub async fn get_batch(
1084            &mut self,
1085            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1086        ) -> std::result::Result<
1087            tonic::Response<super::GetBatchResponse>,
1088            tonic::Status,
1089        > {
1090            self.inner
1091                .ready()
1092                .await
1093                .map_err(|e| {
1094                    tonic::Status::unknown(
1095                        format!("Service was not ready: {}", e.into()),
1096                    )
1097                })?;
1098            let codec = tonic::codec::ProstCodec::default();
1099            let path = http::uri::PathAndQuery::from_static(
1100                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1101            );
1102            let mut req = request.into_request();
1103            req.extensions_mut()
1104                .insert(
1105                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1106                );
1107            self.inner.unary(req, path, codec).await
1108        }
1109        /// GenerateSummaryDescription generates a summary description for an attachment rid
1110        pub async fn generate_summary_description(
1111            &mut self,
1112            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1113        ) -> std::result::Result<
1114            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1115            tonic::Status,
1116        > {
1117            self.inner
1118                .ready()
1119                .await
1120                .map_err(|e| {
1121                    tonic::Status::unknown(
1122                        format!("Service was not ready: {}", e.into()),
1123                    )
1124                })?;
1125            let codec = tonic::codec::ProstCodec::default();
1126            let path = http::uri::PathAndQuery::from_static(
1127                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1128            );
1129            let mut req = request.into_request();
1130            req.extensions_mut()
1131                .insert(
1132                    GrpcMethod::new(
1133                        "nominal.ai.v1.KnowledgeBaseService",
1134                        "GenerateSummaryDescription",
1135                    ),
1136                );
1137            self.inner.unary(req, path, codec).await
1138        }
1139    }
1140}