nominal_api/proto/
nominal.ai.v1.rs

1// This file is @generated by prost-build.
2/// StreamChatRequest is a request to stream chat messages for AI agent
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct StreamChatRequest {
5    /// The conversation ID
6    #[prost(string, tag = "1")]
7    pub conversation_rid: ::prost::alloc::string::String,
8    /// The user message to append to the conversation
9    #[prost(message, optional, tag = "2")]
10    pub message: ::core::option::Option<UserModelMessage>,
11    /// Optional: image files to provide to the agent
12    #[prost(message, repeated, tag = "3")]
13    pub images: ::prost::alloc::vec::Vec<ImagePart>,
14    /// Context-specific fields based on the oneofKind.
15    #[prost(oneof = "stream_chat_request::Context", tags = "4, 5")]
16    pub context: ::core::option::Option<stream_chat_request::Context>,
17}
18/// Nested message and enum types in `StreamChatRequest`.
19pub mod stream_chat_request {
20    /// Context-specific fields based on the oneofKind.
21    #[derive(Clone, PartialEq, ::prost::Oneof)]
22    pub enum Context {
23        #[prost(message, tag = "4")]
24        Workbook(super::WorkbookContext),
25        #[prost(message, tag = "5")]
26        Global(super::GlobalContext),
27    }
28}
29/// WorkbookContext contains workbook-specific context fields
30#[derive(Clone, PartialEq, ::prost::Message)]
31pub struct WorkbookContext {
32    /// RID of the workbook to use for context
33    #[prost(string, tag = "1")]
34    pub workbook_rid: ::prost::alloc::string::String,
35    /// Optional: the user's presence in the workbook
36    #[prost(message, optional, tag = "2")]
37    pub user_presence: ::core::option::Option<WorkbookUserPresence>,
38}
39/// DefaultContext (no context)
40#[derive(Clone, Copy, PartialEq, ::prost::Message)]
41pub struct GlobalContext {}
42/// WorkbookUserPresence contains the user's presence in the workbook
43/// which is used to describe what the user is viewing at the time of the message.
44#[derive(Clone, Copy, PartialEq, ::prost::Message)]
45pub struct WorkbookUserPresence {
46    #[prost(int32, optional, tag = "1")]
47    pub tab_index: ::core::option::Option<i32>,
48    #[prost(message, optional, tag = "2")]
49    pub range: ::core::option::Option<TimeRange>,
50}
51/// CreateConversation request will create a new conversation thread
52/// if old conversation id is not set, a brand new, clear chat is created
53/// If old conversation id is set without a previous message id, the full conversation thread will be copied
54/// if previous message id is set with a previous message id, the conversation thread up until that message will be copied
55/// the above case is useful for branching a conversation into a new thread
56#[derive(Clone, PartialEq, ::prost::Message)]
57pub struct CreateConversationRequest {
58    #[prost(string, tag = "1")]
59    pub title: ::prost::alloc::string::String,
60    #[prost(string, tag = "2")]
61    pub workspace_rid: ::prost::alloc::string::String,
62    #[prost(string, optional, tag = "3")]
63    pub old_conversation_rid: ::core::option::Option<::prost::alloc::string::String>,
64    #[prost(string, optional, tag = "4")]
65    pub previous_message_id: ::core::option::Option<::prost::alloc::string::String>,
66}
67/// CreateConversationResponse will return the conversation id for the new conversation
68#[derive(Clone, PartialEq, ::prost::Message)]
69pub struct CreateConversationResponse {
70    #[prost(string, tag = "1")]
71    pub new_conversation_rid: ::prost::alloc::string::String,
72}
73#[derive(Clone, PartialEq, ::prost::Message)]
74pub struct UpdateConversationMetadataRequest {
75    #[prost(string, tag = "1")]
76    pub title: ::prost::alloc::string::String,
77    #[prost(string, tag = "2")]
78    pub conversation_rid: ::prost::alloc::string::String,
79}
80#[derive(Clone, Copy, PartialEq, ::prost::Message)]
81pub struct UpdateConversationMetadataResponse {}
82#[derive(Clone, PartialEq, ::prost::Message)]
83pub struct DeleteConversationRequest {
84    #[prost(string, tag = "1")]
85    pub conversation_rid: ::prost::alloc::string::String,
86}
87#[derive(Clone, Copy, PartialEq, ::prost::Message)]
88pub struct DeleteConversationResponse {}
89/// a GetConversationRequest allows you to retrieve a subset of messages from a conversation thread represented
90/// by provided rid. To start from a particular message - you can also provide a message id.
91#[derive(Clone, PartialEq, ::prost::Message)]
92pub struct GetConversationRequest {
93    #[prost(string, tag = "1")]
94    pub conversation_rid: ::prost::alloc::string::String,
95    #[prost(string, optional, tag = "2")]
96    pub page_start_message_id: ::core::option::Option<::prost::alloc::string::String>,
97    #[prost(int32, optional, tag = "3")]
98    pub max_message_count: ::core::option::Option<i32>,
99}
100/// Model message with id allows you to identify the message ID of a given message
101#[derive(Clone, PartialEq, ::prost::Message)]
102pub struct ModelMessageWithId {
103    #[prost(string, tag = "3")]
104    pub message_id: ::prost::alloc::string::String,
105    #[prost(oneof = "model_message_with_id::Content", tags = "1, 2")]
106    pub content: ::core::option::Option<model_message_with_id::Content>,
107}
108/// Nested message and enum types in `ModelMessageWithId`.
109pub mod model_message_with_id {
110    #[derive(Clone, PartialEq, ::prost::Oneof)]
111    pub enum Content {
112        #[prost(message, tag = "1")]
113        Message(super::ModelMessage),
114        #[prost(message, tag = "2")]
115        ToolAction(super::ToolAction),
116    }
117}
118#[derive(Clone, PartialEq, ::prost::Message)]
119pub struct GetConversationResponse {
120    #[prost(message, repeated, tag = "1")]
121    pub ordered_messages: ::prost::alloc::vec::Vec<ModelMessageWithId>,
122    #[prost(message, optional, tag = "2")]
123    pub conversation_metadata: ::core::option::Option<ConversationMetadata>,
124}
125/// Will generate all conversation threads that this user has in this workspace
126#[derive(Clone, PartialEq, ::prost::Message)]
127pub struct ListConversationsRequest {
128    #[prost(string, tag = "1")]
129    pub workspace_rid: ::prost::alloc::string::String,
130}
131#[derive(Clone, PartialEq, ::prost::Message)]
132pub struct ConversationMetadata {
133    #[prost(string, tag = "1")]
134    pub conversation_rid: ::prost::alloc::string::String,
135    #[prost(string, tag = "2")]
136    pub title: ::prost::alloc::string::String,
137    #[prost(message, optional, tag = "3")]
138    pub created_at: ::core::option::Option<
139        super::super::super::google::protobuf::Timestamp,
140    >,
141    #[prost(message, optional, tag = "4")]
142    pub last_updated_at: ::core::option::Option<
143        super::super::super::google::protobuf::Timestamp,
144    >,
145}
146/// ListConversationsResponse is a list of conversations that can be used in a call to GetConversationRequest
147/// to get a full conversation from storage. These are ordered by creation time.
148#[derive(Clone, PartialEq, ::prost::Message)]
149pub struct ListConversationsResponse {
150    #[prost(message, repeated, tag = "1")]
151    pub conversations: ::prost::alloc::vec::Vec<ConversationMetadata>,
152}
153#[derive(Clone, Copy, PartialEq, ::prost::Message)]
154pub struct TimeRange {
155    #[prost(message, optional, tag = "1")]
156    pub range_start: ::core::option::Option<Timestamp>,
157    #[prost(message, optional, tag = "2")]
158    pub range_end: ::core::option::Option<Timestamp>,
159}
160#[derive(Clone, Copy, PartialEq, ::prost::Message)]
161pub struct Timestamp {
162    #[prost(int32, tag = "1")]
163    pub seconds: i32,
164    #[prost(int32, tag = "2")]
165    pub nanoseconds: i32,
166}
167/// ModelMessage is a discriminated union of system, user, assistant, and tool messages.
168/// Each message type has its own structure and content.
169#[derive(Clone, PartialEq, ::prost::Message)]
170pub struct ModelMessage {
171    #[prost(oneof = "model_message::Kind", tags = "1, 2")]
172    pub kind: ::core::option::Option<model_message::Kind>,
173}
174/// Nested message and enum types in `ModelMessage`.
175pub mod model_message {
176    #[derive(Clone, PartialEq, ::prost::Oneof)]
177    pub enum Kind {
178        #[prost(message, tag = "1")]
179        User(super::UserModelMessage),
180        #[prost(message, tag = "2")]
181        Assistant(super::AssistantModelMessage),
182    }
183}
184/// A user message containing text
185#[derive(Clone, PartialEq, ::prost::Message)]
186pub struct UserModelMessage {
187    #[prost(message, repeated, tag = "1")]
188    pub text: ::prost::alloc::vec::Vec<UserContentPart>,
189}
190/// An assistant message containing text
191#[derive(Clone, PartialEq, ::prost::Message)]
192pub struct AssistantModelMessage {
193    #[prost(message, repeated, tag = "1")]
194    pub content_parts: ::prost::alloc::vec::Vec<AssistantContentPart>,
195}
196#[derive(Clone, PartialEq, ::prost::Message)]
197pub struct UserContentPart {
198    #[prost(oneof = "user_content_part::Part", tags = "1")]
199    pub part: ::core::option::Option<user_content_part::Part>,
200}
201/// Nested message and enum types in `UserContentPart`.
202pub mod user_content_part {
203    #[derive(Clone, PartialEq, ::prost::Oneof)]
204    pub enum Part {
205        #[prost(message, tag = "1")]
206        Text(super::TextPart),
207    }
208}
209/// Content part for assistant messages: can be text, reasoning, or mutation.
210#[derive(Clone, PartialEq, ::prost::Message)]
211pub struct AssistantContentPart {
212    #[prost(oneof = "assistant_content_part::Part", tags = "1, 2")]
213    pub part: ::core::option::Option<assistant_content_part::Part>,
214}
215/// Nested message and enum types in `AssistantContentPart`.
216pub mod assistant_content_part {
217    #[derive(Clone, PartialEq, ::prost::Oneof)]
218    pub enum Part {
219        #[prost(message, tag = "1")]
220        Text(super::TextPart),
221        #[prost(message, tag = "2")]
222        Reasoning(super::ReasoningPart),
223    }
224}
225/// Text part for user or assistant messages.
226#[derive(Clone, PartialEq, ::prost::Message)]
227pub struct TextPart {
228    #[prost(string, tag = "1")]
229    pub text: ::prost::alloc::string::String,
230}
231/// User-supplied image part.
232#[derive(Clone, PartialEq, ::prost::Message)]
233pub struct ImagePart {
234    /// The base64-encoded image data
235    #[prost(bytes = "vec", tag = "1")]
236    pub data: ::prost::alloc::vec::Vec<u8>,
237    /// The media type of the image (e.g. "image/png", "image/jpeg")
238    #[prost(string, optional, tag = "2")]
239    pub media_type: ::core::option::Option<::prost::alloc::string::String>,
240    /// Optional: the filename of the image
241    #[prost(string, optional, tag = "3")]
242    pub filename: ::core::option::Option<::prost::alloc::string::String>,
243}
244/// Reasoning part for assistant messages.
245#[derive(Clone, PartialEq, ::prost::Message)]
246pub struct ReasoningPart {
247    #[prost(string, tag = "1")]
248    pub reasoning: ::prost::alloc::string::String,
249}
250/// StreamChatResponse is a discriminated union response to a StreamChatRequest
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct StreamChatResponse {
253    #[prost(
254        oneof = "stream_chat_response::Response",
255        tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10"
256    )]
257    pub response: ::core::option::Option<stream_chat_response::Response>,
258}
259/// Nested message and enum types in `StreamChatResponse`.
260pub mod stream_chat_response {
261    #[derive(Clone, PartialEq, ::prost::Oneof)]
262    pub enum Response {
263        #[prost(message, tag = "1")]
264        Finish(super::Finish),
265        #[prost(message, tag = "2")]
266        Error(super::Error),
267        #[prost(message, tag = "3")]
268        TextStart(super::TextStart),
269        #[prost(message, tag = "4")]
270        TextDelta(super::TextDelta),
271        #[prost(message, tag = "5")]
272        TextEnd(super::TextEnd),
273        #[prost(message, tag = "6")]
274        ReasoningStart(super::ReasoningStart),
275        #[prost(message, tag = "7")]
276        ReasoningDelta(super::ReasoningDelta),
277        #[prost(message, tag = "8")]
278        ReasoningEnd(super::ReasoningEnd),
279        /// this will be deprecated in favor of MCP-based mutations
280        #[prost(message, tag = "9")]
281        WorkbookMutation(super::WorkbookMutation),
282        #[prost(message, tag = "10")]
283        ToolAction(super::ToolAction),
284    }
285}
286/// Indicates the end of a chat session
287#[derive(Clone, PartialEq, ::prost::Message)]
288pub struct Finish {
289    /// The message ids in order of all generated messages for this agent run
290    /// These ids can be used to branch a message from that specific message
291    #[prost(string, repeated, tag = "1")]
292    pub ordered_message_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
293    /// In the case that this is the first agent run in a conversation thread, we also
294    /// return the new conversation title generated
295    #[prost(string, optional, tag = "2")]
296    pub new_title: ::core::option::Option<::prost::alloc::string::String>,
297}
298/// An error that occurred during the chat session
299#[derive(Clone, PartialEq, ::prost::Message)]
300pub struct Error {
301    #[prost(string, tag = "1")]
302    pub message: ::prost::alloc::string::String,
303}
304/// Indicates the start of a text message from the agent
305#[derive(Clone, PartialEq, ::prost::Message)]
306pub struct TextStart {
307    /// uniquely identifies the text message (e.g. uuid) so that the client can
308    /// merge parallel message streams (if it happens).
309    #[prost(string, tag = "1")]
310    pub id: ::prost::alloc::string::String,
311}
312/// A delta (continuation) of a text message from the agent
313#[derive(Clone, PartialEq, ::prost::Message)]
314pub struct TextDelta {
315    #[prost(string, tag = "1")]
316    pub id: ::prost::alloc::string::String,
317    /// The next chunk of text
318    #[prost(string, tag = "2")]
319    pub delta: ::prost::alloc::string::String,
320}
321/// Indicates the end of a text message from the agent
322#[derive(Clone, PartialEq, ::prost::Message)]
323pub struct TextEnd {
324    #[prost(string, tag = "1")]
325    pub id: ::prost::alloc::string::String,
326}
327/// Indicates the start of a reasoning message from the agent
328#[derive(Clone, PartialEq, ::prost::Message)]
329pub struct ReasoningStart {
330    #[prost(string, tag = "1")]
331    pub id: ::prost::alloc::string::String,
332}
333/// A delta (continuation) of a reasoning message from the agent
334#[derive(Clone, PartialEq, ::prost::Message)]
335pub struct ReasoningDelta {
336    #[prost(string, tag = "1")]
337    pub id: ::prost::alloc::string::String,
338    /// The next chunk of reasoning
339    #[prost(string, tag = "2")]
340    pub delta: ::prost::alloc::string::String,
341}
342/// Indicates the end of a reasoning message from the agent
343#[derive(Clone, PartialEq, ::prost::Message)]
344pub struct ReasoningEnd {
345    #[prost(string, tag = "1")]
346    pub id: ::prost::alloc::string::String,
347}
348/// Add a new tab to the workbook
349#[derive(Clone, PartialEq, ::prost::Message)]
350pub struct AddTabMutation {
351    /// if tab_name is not provided, we'll name it "New Tab"
352    #[prost(string, optional, tag = "1")]
353    pub tab_name: ::core::option::Option<::prost::alloc::string::String>,
354}
355/// this is the "result" of the mutation
356#[derive(Clone, PartialEq, ::prost::Message)]
357pub struct AddOrUpdatePanelMutation {
358    /// JSON-serialized representation of IVizDefinition
359    #[prost(string, tag = "1")]
360    pub panel_as_json: ::prost::alloc::string::String,
361    #[prost(string, tag = "2")]
362    pub panel_id: ::prost::alloc::string::String,
363    #[prost(int32, tag = "3")]
364    pub tab_index: i32,
365}
366#[derive(Clone, PartialEq, ::prost::Message)]
367pub struct RemovePanelsMutation {
368    #[prost(string, repeated, tag = "1")]
369    pub panel_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
370}
371/// AddOrReplaceVariableMutation is a mutation to add or replace a variable in the workbook
372#[derive(Clone, PartialEq, ::prost::Message)]
373pub struct AddOrReplaceVariableMutation {
374    /// scout_compute_api_ComputeSpecWithContext (this needs to be resolved)
375    /// be careful: this uses the scout_compute_api version of ComputeSpecWithContext, which stores the spec as "seriesNode"
376    /// and is NOT the same as ComputeSpecWithContext that is stored in INotebook.
377    #[prost(string, tag = "1")]
378    pub compute_spec_as_json: ::prost::alloc::string::String,
379    /// if variable_name is not provided, we'll assume it's a new variable and auto-generate a unique name
380    #[prost(string, optional, tag = "2")]
381    pub variable_name: ::core::option::Option<::prost::alloc::string::String>,
382    #[prost(string, optional, tag = "3")]
383    pub display_name: ::core::option::Option<::prost::alloc::string::String>,
384}
385/// DeleteVariablesMutation is a mutation to delete variables from the workbook
386#[derive(Clone, PartialEq, ::prost::Message)]
387pub struct DeleteVariablesMutation {
388    #[prost(string, repeated, tag = "1")]
389    pub variable_names: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
390}
391/// WorkbookMutation is a mutation to the workbook
392#[derive(Clone, PartialEq, ::prost::Message)]
393pub struct WorkbookMutation {
394    #[prost(string, tag = "1")]
395    pub id: ::prost::alloc::string::String,
396    #[prost(oneof = "workbook_mutation::Mutation", tags = "2, 3, 4, 5, 6")]
397    pub mutation: ::core::option::Option<workbook_mutation::Mutation>,
398}
399/// Nested message and enum types in `WorkbookMutation`.
400pub mod workbook_mutation {
401    #[derive(Clone, PartialEq, ::prost::Oneof)]
402    pub enum Mutation {
403        #[prost(message, tag = "2")]
404        AddTab(super::AddTabMutation),
405        #[prost(message, tag = "3")]
406        AddOrUpdatePanel(super::AddOrUpdatePanelMutation),
407        #[prost(message, tag = "4")]
408        RemovePanels(super::RemovePanelsMutation),
409        #[prost(message, tag = "5")]
410        AddOrReplaceVariable(super::AddOrReplaceVariableMutation),
411        #[prost(message, tag = "6")]
412        DeleteVariables(super::DeleteVariablesMutation),
413    }
414}
415/// this is a concise description of a tool call that the agent is making internally
416/// without revealing too much detail about the tool call, it informs the user what the agent is doing
417/// at a high level. the format is: `{tool_action_verb} {tool_target}` for example:
418/// "Search channels for My Datasource"
419#[derive(Clone, PartialEq, ::prost::Message)]
420pub struct ToolAction {
421    #[prost(string, tag = "1")]
422    pub id: ::prost::alloc::string::String,
423    /// "Thought", "Read", "Find", "Look-up", etc.
424    #[prost(string, tag = "2")]
425    pub tool_action_verb: ::prost::alloc::string::String,
426    /// "workbook", "channel", "variable", "panel", etc.
427    #[prost(string, optional, tag = "3")]
428    pub tool_target: ::core::option::Option<::prost::alloc::string::String>,
429}
430/// Generated client implementations.
431pub mod ai_agent_service_client {
432    #![allow(
433        unused_variables,
434        dead_code,
435        missing_docs,
436        clippy::wildcard_imports,
437        clippy::let_unit_value,
438    )]
439    use tonic::codegen::*;
440    use tonic::codegen::http::Uri;
441    /// AIAgentService provides AI-powered assistance for general operations
442    #[derive(Debug, Clone)]
443    pub struct AiAgentServiceClient<T> {
444        inner: tonic::client::Grpc<T>,
445    }
446    impl AiAgentServiceClient<tonic::transport::Channel> {
447        /// Attempt to create a new client by connecting to a given endpoint.
448        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
449        where
450            D: TryInto<tonic::transport::Endpoint>,
451            D::Error: Into<StdError>,
452        {
453            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
454            Ok(Self::new(conn))
455        }
456    }
457    impl<T> AiAgentServiceClient<T>
458    where
459        T: tonic::client::GrpcService<tonic::body::Body>,
460        T::Error: Into<StdError>,
461        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
462        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
463    {
464        pub fn new(inner: T) -> Self {
465            let inner = tonic::client::Grpc::new(inner);
466            Self { inner }
467        }
468        pub fn with_origin(inner: T, origin: Uri) -> Self {
469            let inner = tonic::client::Grpc::with_origin(inner, origin);
470            Self { inner }
471        }
472        pub fn with_interceptor<F>(
473            inner: T,
474            interceptor: F,
475        ) -> AiAgentServiceClient<InterceptedService<T, F>>
476        where
477            F: tonic::service::Interceptor,
478            T::ResponseBody: Default,
479            T: tonic::codegen::Service<
480                http::Request<tonic::body::Body>,
481                Response = http::Response<
482                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
483                >,
484            >,
485            <T as tonic::codegen::Service<
486                http::Request<tonic::body::Body>,
487            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
488        {
489            AiAgentServiceClient::new(InterceptedService::new(inner, interceptor))
490        }
491        /// Compress requests with the given encoding.
492        ///
493        /// This requires the server to support it otherwise it might respond with an
494        /// error.
495        #[must_use]
496        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
497            self.inner = self.inner.send_compressed(encoding);
498            self
499        }
500        /// Enable decompressing responses.
501        #[must_use]
502        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
503            self.inner = self.inner.accept_compressed(encoding);
504            self
505        }
506        /// Limits the maximum size of a decoded message.
507        ///
508        /// Default: `4MB`
509        #[must_use]
510        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
511            self.inner = self.inner.max_decoding_message_size(limit);
512            self
513        }
514        /// Limits the maximum size of an encoded message.
515        ///
516        /// Default: `usize::MAX`
517        #[must_use]
518        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
519            self.inner = self.inner.max_encoding_message_size(limit);
520            self
521        }
522        /// StreamChat handles bidirectional streaming chat for AI agent
523        pub async fn stream_chat(
524            &mut self,
525            request: impl tonic::IntoRequest<super::StreamChatRequest>,
526        ) -> std::result::Result<
527            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
528            tonic::Status,
529        > {
530            self.inner
531                .ready()
532                .await
533                .map_err(|e| {
534                    tonic::Status::unknown(
535                        format!("Service was not ready: {}", e.into()),
536                    )
537                })?;
538            let codec = tonic::codec::ProstCodec::default();
539            let path = http::uri::PathAndQuery::from_static(
540                "/nominal.ai.v1.AIAgentService/StreamChat",
541            );
542            let mut req = request.into_request();
543            req.extensions_mut()
544                .insert(GrpcMethod::new("nominal.ai.v1.AIAgentService", "StreamChat"));
545            self.inner.server_streaming(req, path, codec).await
546        }
547        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
548        pub async fn get_conversation(
549            &mut self,
550            request: impl tonic::IntoRequest<super::GetConversationRequest>,
551        ) -> std::result::Result<
552            tonic::Response<super::GetConversationResponse>,
553            tonic::Status,
554        > {
555            self.inner
556                .ready()
557                .await
558                .map_err(|e| {
559                    tonic::Status::unknown(
560                        format!("Service was not ready: {}", e.into()),
561                    )
562                })?;
563            let codec = tonic::codec::ProstCodec::default();
564            let path = http::uri::PathAndQuery::from_static(
565                "/nominal.ai.v1.AIAgentService/GetConversation",
566            );
567            let mut req = request.into_request();
568            req.extensions_mut()
569                .insert(
570                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "GetConversation"),
571                );
572            self.inner.unary(req, path, codec).await
573        }
574        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
575        pub async fn list_conversations(
576            &mut self,
577            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
578        ) -> std::result::Result<
579            tonic::Response<super::ListConversationsResponse>,
580            tonic::Status,
581        > {
582            self.inner
583                .ready()
584                .await
585                .map_err(|e| {
586                    tonic::Status::unknown(
587                        format!("Service was not ready: {}", e.into()),
588                    )
589                })?;
590            let codec = tonic::codec::ProstCodec::default();
591            let path = http::uri::PathAndQuery::from_static(
592                "/nominal.ai.v1.AIAgentService/ListConversations",
593            );
594            let mut req = request.into_request();
595            req.extensions_mut()
596                .insert(
597                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "ListConversations"),
598                );
599            self.inner.unary(req, path, codec).await
600        }
601        /// CreateConversation handles creating a conversation and assigning it a conversation rid
602        pub async fn create_conversation(
603            &mut self,
604            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
605        ) -> std::result::Result<
606            tonic::Response<super::CreateConversationResponse>,
607            tonic::Status,
608        > {
609            self.inner
610                .ready()
611                .await
612                .map_err(|e| {
613                    tonic::Status::unknown(
614                        format!("Service was not ready: {}", e.into()),
615                    )
616                })?;
617            let codec = tonic::codec::ProstCodec::default();
618            let path = http::uri::PathAndQuery::from_static(
619                "/nominal.ai.v1.AIAgentService/CreateConversation",
620            );
621            let mut req = request.into_request();
622            req.extensions_mut()
623                .insert(
624                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "CreateConversation"),
625                );
626            self.inner.unary(req, path, codec).await
627        }
628        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
629        pub async fn update_conversation_metadata(
630            &mut self,
631            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
632        ) -> std::result::Result<
633            tonic::Response<super::UpdateConversationMetadataResponse>,
634            tonic::Status,
635        > {
636            self.inner
637                .ready()
638                .await
639                .map_err(|e| {
640                    tonic::Status::unknown(
641                        format!("Service was not ready: {}", e.into()),
642                    )
643                })?;
644            let codec = tonic::codec::ProstCodec::default();
645            let path = http::uri::PathAndQuery::from_static(
646                "/nominal.ai.v1.AIAgentService/UpdateConversationMetadata",
647            );
648            let mut req = request.into_request();
649            req.extensions_mut()
650                .insert(
651                    GrpcMethod::new(
652                        "nominal.ai.v1.AIAgentService",
653                        "UpdateConversationMetadata",
654                    ),
655                );
656            self.inner.unary(req, path, codec).await
657        }
658        /// DeleteConversation handles deleting a specific conversation by conversation rid
659        pub async fn delete_conversation(
660            &mut self,
661            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
662        ) -> std::result::Result<
663            tonic::Response<super::DeleteConversationResponse>,
664            tonic::Status,
665        > {
666            self.inner
667                .ready()
668                .await
669                .map_err(|e| {
670                    tonic::Status::unknown(
671                        format!("Service was not ready: {}", e.into()),
672                    )
673                })?;
674            let codec = tonic::codec::ProstCodec::default();
675            let path = http::uri::PathAndQuery::from_static(
676                "/nominal.ai.v1.AIAgentService/DeleteConversation",
677            );
678            let mut req = request.into_request();
679            req.extensions_mut()
680                .insert(
681                    GrpcMethod::new("nominal.ai.v1.AIAgentService", "DeleteConversation"),
682                );
683            self.inner.unary(req, path, codec).await
684        }
685    }
686}
687/// StreamChatRequest is a request to stream chat messages for workbook AI agent
688#[derive(Clone, PartialEq, ::prost::Message)]
689pub struct WorkbookAgentServiceStreamChatRequest {
690    #[prost(message, repeated, tag = "1")]
691    pub messages: ::prost::alloc::vec::Vec<ModelMessage>,
692    /// JSON-serialized representation of INotebook
693    #[prost(string, optional, tag = "2")]
694    pub notebook_as_json: ::core::option::Option<::prost::alloc::string::String>,
695    /// The current tab visible in the workbook from the user's perspective
696    #[prost(int32, optional, tag = "3")]
697    pub selected_tab_index: ::core::option::Option<i32>,
698    /// Optional: image files to provide to the agent
699    #[prost(message, repeated, tag = "4")]
700    pub images: ::prost::alloc::vec::Vec<ImagePart>,
701    /// Time range for the tab that is currently visible to the user
702    #[prost(message, optional, tag = "5")]
703    pub range: ::core::option::Option<TimeRange>,
704    /// V2 conversation API persists the message and any assistant responses to storage under the provided
705    /// conversation_id. if id does not exist in the database, then a new conversation is started from this message.
706    #[prost(message, optional, tag = "6")]
707    pub message: ::core::option::Option<AppendMessage>,
708}
709/// This will append a message to an existing conversation
710/// A non existent conversation id will raise an error
711#[derive(Clone, PartialEq, ::prost::Message)]
712pub struct AppendMessage {
713    #[prost(message, optional, tag = "1")]
714    pub message: ::core::option::Option<UserModelMessage>,
715    #[prost(string, tag = "2")]
716    pub conversation_rid: ::prost::alloc::string::String,
717}
718/// Generated client implementations.
719pub mod workbook_agent_service_client {
720    #![allow(
721        unused_variables,
722        dead_code,
723        missing_docs,
724        clippy::wildcard_imports,
725        clippy::let_unit_value,
726    )]
727    use tonic::codegen::*;
728    use tonic::codegen::http::Uri;
729    /// WorkbookAgentService provides AI-powered assistance for workbook operations
730    /// this is deprecated in favor of the AIAgentService
731    #[derive(Debug, Clone)]
732    pub struct WorkbookAgentServiceClient<T> {
733        inner: tonic::client::Grpc<T>,
734    }
735    impl WorkbookAgentServiceClient<tonic::transport::Channel> {
736        /// Attempt to create a new client by connecting to a given endpoint.
737        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
738        where
739            D: TryInto<tonic::transport::Endpoint>,
740            D::Error: Into<StdError>,
741        {
742            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
743            Ok(Self::new(conn))
744        }
745    }
746    impl<T> WorkbookAgentServiceClient<T>
747    where
748        T: tonic::client::GrpcService<tonic::body::Body>,
749        T::Error: Into<StdError>,
750        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
751        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
752    {
753        pub fn new(inner: T) -> Self {
754            let inner = tonic::client::Grpc::new(inner);
755            Self { inner }
756        }
757        pub fn with_origin(inner: T, origin: Uri) -> Self {
758            let inner = tonic::client::Grpc::with_origin(inner, origin);
759            Self { inner }
760        }
761        pub fn with_interceptor<F>(
762            inner: T,
763            interceptor: F,
764        ) -> WorkbookAgentServiceClient<InterceptedService<T, F>>
765        where
766            F: tonic::service::Interceptor,
767            T::ResponseBody: Default,
768            T: tonic::codegen::Service<
769                http::Request<tonic::body::Body>,
770                Response = http::Response<
771                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
772                >,
773            >,
774            <T as tonic::codegen::Service<
775                http::Request<tonic::body::Body>,
776            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
777        {
778            WorkbookAgentServiceClient::new(InterceptedService::new(inner, interceptor))
779        }
780        /// Compress requests with the given encoding.
781        ///
782        /// This requires the server to support it otherwise it might respond with an
783        /// error.
784        #[must_use]
785        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
786            self.inner = self.inner.send_compressed(encoding);
787            self
788        }
789        /// Enable decompressing responses.
790        #[must_use]
791        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
792            self.inner = self.inner.accept_compressed(encoding);
793            self
794        }
795        /// Limits the maximum size of a decoded message.
796        ///
797        /// Default: `4MB`
798        #[must_use]
799        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
800            self.inner = self.inner.max_decoding_message_size(limit);
801            self
802        }
803        /// Limits the maximum size of an encoded message.
804        ///
805        /// Default: `usize::MAX`
806        #[must_use]
807        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
808            self.inner = self.inner.max_encoding_message_size(limit);
809            self
810        }
811        /// StreamChat handles bidirectional streaming chat for workbook AI agent
812        pub async fn stream_chat(
813            &mut self,
814            request: impl tonic::IntoRequest<
815                super::WorkbookAgentServiceStreamChatRequest,
816            >,
817        ) -> std::result::Result<
818            tonic::Response<tonic::codec::Streaming<super::StreamChatResponse>>,
819            tonic::Status,
820        > {
821            self.inner
822                .ready()
823                .await
824                .map_err(|e| {
825                    tonic::Status::unknown(
826                        format!("Service was not ready: {}", e.into()),
827                    )
828                })?;
829            let codec = tonic::codec::ProstCodec::default();
830            let path = http::uri::PathAndQuery::from_static(
831                "/nominal.ai.v1.WorkbookAgentService/StreamChat",
832            );
833            let mut req = request.into_request();
834            req.extensions_mut()
835                .insert(
836                    GrpcMethod::new("nominal.ai.v1.WorkbookAgentService", "StreamChat"),
837                );
838            self.inner.server_streaming(req, path, codec).await
839        }
840        /// GetConversation handles getting a complete conversation list, with an optional limit on number of messages returned
841        pub async fn get_conversation(
842            &mut self,
843            request: impl tonic::IntoRequest<super::GetConversationRequest>,
844        ) -> std::result::Result<
845            tonic::Response<super::GetConversationResponse>,
846            tonic::Status,
847        > {
848            self.inner
849                .ready()
850                .await
851                .map_err(|e| {
852                    tonic::Status::unknown(
853                        format!("Service was not ready: {}", e.into()),
854                    )
855                })?;
856            let codec = tonic::codec::ProstCodec::default();
857            let path = http::uri::PathAndQuery::from_static(
858                "/nominal.ai.v1.WorkbookAgentService/GetConversation",
859            );
860            let mut req = request.into_request();
861            req.extensions_mut()
862                .insert(
863                    GrpcMethod::new(
864                        "nominal.ai.v1.WorkbookAgentService",
865                        "GetConversation",
866                    ),
867                );
868            self.inner.unary(req, path, codec).await
869        }
870        /// ListConversations handles getting the list of conversation ids ordered by most recently updated
871        pub async fn list_conversations(
872            &mut self,
873            request: impl tonic::IntoRequest<super::ListConversationsRequest>,
874        ) -> std::result::Result<
875            tonic::Response<super::ListConversationsResponse>,
876            tonic::Status,
877        > {
878            self.inner
879                .ready()
880                .await
881                .map_err(|e| {
882                    tonic::Status::unknown(
883                        format!("Service was not ready: {}", e.into()),
884                    )
885                })?;
886            let codec = tonic::codec::ProstCodec::default();
887            let path = http::uri::PathAndQuery::from_static(
888                "/nominal.ai.v1.WorkbookAgentService/ListConversations",
889            );
890            let mut req = request.into_request();
891            req.extensions_mut()
892                .insert(
893                    GrpcMethod::new(
894                        "nominal.ai.v1.WorkbookAgentService",
895                        "ListConversations",
896                    ),
897                );
898            self.inner.unary(req, path, codec).await
899        }
900        /// CreateConversation handles creating a conversation and assigning it a conversation rid
901        pub async fn create_conversation(
902            &mut self,
903            request: impl tonic::IntoRequest<super::CreateConversationRequest>,
904        ) -> std::result::Result<
905            tonic::Response<super::CreateConversationResponse>,
906            tonic::Status,
907        > {
908            self.inner
909                .ready()
910                .await
911                .map_err(|e| {
912                    tonic::Status::unknown(
913                        format!("Service was not ready: {}", e.into()),
914                    )
915                })?;
916            let codec = tonic::codec::ProstCodec::default();
917            let path = http::uri::PathAndQuery::from_static(
918                "/nominal.ai.v1.WorkbookAgentService/CreateConversation",
919            );
920            let mut req = request.into_request();
921            req.extensions_mut()
922                .insert(
923                    GrpcMethod::new(
924                        "nominal.ai.v1.WorkbookAgentService",
925                        "CreateConversation",
926                    ),
927                );
928            self.inner.unary(req, path, codec).await
929        }
930        /// UpdateConversationMetadata handles updating any metadata associated with a conversation
931        pub async fn update_conversation_metadata(
932            &mut self,
933            request: impl tonic::IntoRequest<super::UpdateConversationMetadataRequest>,
934        ) -> std::result::Result<
935            tonic::Response<super::UpdateConversationMetadataResponse>,
936            tonic::Status,
937        > {
938            self.inner
939                .ready()
940                .await
941                .map_err(|e| {
942                    tonic::Status::unknown(
943                        format!("Service was not ready: {}", e.into()),
944                    )
945                })?;
946            let codec = tonic::codec::ProstCodec::default();
947            let path = http::uri::PathAndQuery::from_static(
948                "/nominal.ai.v1.WorkbookAgentService/UpdateConversationMetadata",
949            );
950            let mut req = request.into_request();
951            req.extensions_mut()
952                .insert(
953                    GrpcMethod::new(
954                        "nominal.ai.v1.WorkbookAgentService",
955                        "UpdateConversationMetadata",
956                    ),
957                );
958            self.inner.unary(req, path, codec).await
959        }
960        /// DeleteConversation handles deleting a specific conversation by conversation rid
961        pub async fn delete_conversation(
962            &mut self,
963            request: impl tonic::IntoRequest<super::DeleteConversationRequest>,
964        ) -> std::result::Result<
965            tonic::Response<super::DeleteConversationResponse>,
966            tonic::Status,
967        > {
968            self.inner
969                .ready()
970                .await
971                .map_err(|e| {
972                    tonic::Status::unknown(
973                        format!("Service was not ready: {}", e.into()),
974                    )
975                })?;
976            let codec = tonic::codec::ProstCodec::default();
977            let path = http::uri::PathAndQuery::from_static(
978                "/nominal.ai.v1.WorkbookAgentService/DeleteConversation",
979            );
980            let mut req = request.into_request();
981            req.extensions_mut()
982                .insert(
983                    GrpcMethod::new(
984                        "nominal.ai.v1.WorkbookAgentService",
985                        "DeleteConversation",
986                    ),
987                );
988            self.inner.unary(req, path, codec).await
989        }
990    }
991}
992#[derive(Clone, Copy, PartialEq, ::prost::Message)]
993pub struct GetProviderStatusRequest {}
994#[derive(Clone, Copy, PartialEq, ::prost::Message)]
995pub struct GetProviderStatusResponse {
996    /// Timestamp when the last status was determined
997    #[prost(message, optional, tag = "1")]
998    pub timestamp: ::core::option::Option<
999        super::super::super::google::protobuf::Timestamp,
1000    >,
1001    /// Status of the most recent health check probe
1002    #[prost(message, optional, tag = "2")]
1003    pub last_status: ::core::option::Option<ProviderStatus>,
1004    /// Aggregated status over the last 30 minutes (DEGRADED if any check failed or exceeded thresholds)
1005    #[prost(message, optional, tag = "3")]
1006    pub aggregated_status_over_last_30m: ::core::option::Option<ProviderStatus>,
1007}
1008#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1009pub struct ProviderStatus {
1010    #[prost(oneof = "provider_status::Status", tags = "1, 2")]
1011    pub status: ::core::option::Option<provider_status::Status>,
1012}
1013/// Nested message and enum types in `ProviderStatus`.
1014pub mod provider_status {
1015    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
1016    pub enum Status {
1017        #[prost(message, tag = "1")]
1018        Healthy(super::Healthy),
1019        #[prost(message, tag = "2")]
1020        Degraded(super::Degraded),
1021    }
1022}
1023#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1024pub struct Healthy {}
1025#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1026pub struct Degraded {
1027    #[prost(enumeration = "DegradationReason", tag = "1")]
1028    pub reason: i32,
1029}
1030#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1031pub struct ProviderMetrics {
1032    #[prost(int32, tag = "1")]
1033    pub time_to_first_token_ms: i32,
1034    #[prost(int32, tag = "2")]
1035    pub total_time_ms: i32,
1036}
1037#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1038#[repr(i32)]
1039pub enum DegradationReason {
1040    Unspecified = 0,
1041    HighLatency = 1,
1042    Failures = 2,
1043    HighLatencyAndFailures = 3,
1044}
1045impl DegradationReason {
1046    /// String value of the enum field names used in the ProtoBuf definition.
1047    ///
1048    /// The values are not transformed in any way and thus are considered stable
1049    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1050    pub fn as_str_name(&self) -> &'static str {
1051        match self {
1052            Self::Unspecified => "DEGRADATION_REASON_UNSPECIFIED",
1053            Self::HighLatency => "DEGRADATION_REASON_HIGH_LATENCY",
1054            Self::Failures => "DEGRADATION_REASON_FAILURES",
1055            Self::HighLatencyAndFailures => {
1056                "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES"
1057            }
1058        }
1059    }
1060    /// Creates an enum from field names used in the ProtoBuf definition.
1061    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1062        match value {
1063            "DEGRADATION_REASON_UNSPECIFIED" => Some(Self::Unspecified),
1064            "DEGRADATION_REASON_HIGH_LATENCY" => Some(Self::HighLatency),
1065            "DEGRADATION_REASON_FAILURES" => Some(Self::Failures),
1066            "DEGRADATION_REASON_HIGH_LATENCY_AND_FAILURES" => {
1067                Some(Self::HighLatencyAndFailures)
1068            }
1069            _ => None,
1070        }
1071    }
1072}
1073/// Generated client implementations.
1074pub mod model_provider_health_service_client {
1075    #![allow(
1076        unused_variables,
1077        dead_code,
1078        missing_docs,
1079        clippy::wildcard_imports,
1080        clippy::let_unit_value,
1081    )]
1082    use tonic::codegen::*;
1083    use tonic::codegen::http::Uri;
1084    /// ModelProviderHealthService monitors the health and performance of the backing LLM model provider.
1085    /// It runs lightweight health checks every 5 minutes to measure provider responsiveness and reliability,
1086    /// independent of the complexity of user prompts.
1087    #[derive(Debug, Clone)]
1088    pub struct ModelProviderHealthServiceClient<T> {
1089        inner: tonic::client::Grpc<T>,
1090    }
1091    impl ModelProviderHealthServiceClient<tonic::transport::Channel> {
1092        /// Attempt to create a new client by connecting to a given endpoint.
1093        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1094        where
1095            D: TryInto<tonic::transport::Endpoint>,
1096            D::Error: Into<StdError>,
1097        {
1098            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1099            Ok(Self::new(conn))
1100        }
1101    }
1102    impl<T> ModelProviderHealthServiceClient<T>
1103    where
1104        T: tonic::client::GrpcService<tonic::body::Body>,
1105        T::Error: Into<StdError>,
1106        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1107        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1108    {
1109        pub fn new(inner: T) -> Self {
1110            let inner = tonic::client::Grpc::new(inner);
1111            Self { inner }
1112        }
1113        pub fn with_origin(inner: T, origin: Uri) -> Self {
1114            let inner = tonic::client::Grpc::with_origin(inner, origin);
1115            Self { inner }
1116        }
1117        pub fn with_interceptor<F>(
1118            inner: T,
1119            interceptor: F,
1120        ) -> ModelProviderHealthServiceClient<InterceptedService<T, F>>
1121        where
1122            F: tonic::service::Interceptor,
1123            T::ResponseBody: Default,
1124            T: tonic::codegen::Service<
1125                http::Request<tonic::body::Body>,
1126                Response = http::Response<
1127                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1128                >,
1129            >,
1130            <T as tonic::codegen::Service<
1131                http::Request<tonic::body::Body>,
1132            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1133        {
1134            ModelProviderHealthServiceClient::new(
1135                InterceptedService::new(inner, interceptor),
1136            )
1137        }
1138        /// Compress requests with the given encoding.
1139        ///
1140        /// This requires the server to support it otherwise it might respond with an
1141        /// error.
1142        #[must_use]
1143        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1144            self.inner = self.inner.send_compressed(encoding);
1145            self
1146        }
1147        /// Enable decompressing responses.
1148        #[must_use]
1149        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1150            self.inner = self.inner.accept_compressed(encoding);
1151            self
1152        }
1153        /// Limits the maximum size of a decoded message.
1154        ///
1155        /// Default: `4MB`
1156        #[must_use]
1157        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1158            self.inner = self.inner.max_decoding_message_size(limit);
1159            self
1160        }
1161        /// Limits the maximum size of an encoded message.
1162        ///
1163        /// Default: `usize::MAX`
1164        #[must_use]
1165        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1166            self.inner = self.inner.max_encoding_message_size(limit);
1167            self
1168        }
1169        /// GetProviderStatus returns the current health status of the model provider based on recent health checks.
1170        /// The status is HEALTHY if all checks in the last 30 minutes passed latency thresholds,
1171        /// or DEGRADED if any checks exceeded latency thresholds or failed entirely.
1172        pub async fn get_provider_status(
1173            &mut self,
1174            request: impl tonic::IntoRequest<super::GetProviderStatusRequest>,
1175        ) -> std::result::Result<
1176            tonic::Response<super::GetProviderStatusResponse>,
1177            tonic::Status,
1178        > {
1179            self.inner
1180                .ready()
1181                .await
1182                .map_err(|e| {
1183                    tonic::Status::unknown(
1184                        format!("Service was not ready: {}", e.into()),
1185                    )
1186                })?;
1187            let codec = tonic::codec::ProstCodec::default();
1188            let path = http::uri::PathAndQuery::from_static(
1189                "/nominal.ai.v1.ModelProviderHealthService/GetProviderStatus",
1190            );
1191            let mut req = request.into_request();
1192            req.extensions_mut()
1193                .insert(
1194                    GrpcMethod::new(
1195                        "nominal.ai.v1.ModelProviderHealthService",
1196                        "GetProviderStatus",
1197                    ),
1198                );
1199            self.inner.unary(req, path, codec).await
1200        }
1201    }
1202}
1203#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1204pub struct IsAiEnabledForUserRequest {}
1205#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1206pub struct IsAiEnabledForUserResponse {
1207    #[prost(bool, tag = "1")]
1208    pub is_enabled: bool,
1209}
1210/// Generated client implementations.
1211pub mod ai_features_service_client {
1212    #![allow(
1213        unused_variables,
1214        dead_code,
1215        missing_docs,
1216        clippy::wildcard_imports,
1217        clippy::let_unit_value,
1218    )]
1219    use tonic::codegen::*;
1220    use tonic::codegen::http::Uri;
1221    /// AIFeaturesService provides information about enabled AI features
1222    #[derive(Debug, Clone)]
1223    pub struct AiFeaturesServiceClient<T> {
1224        inner: tonic::client::Grpc<T>,
1225    }
1226    impl AiFeaturesServiceClient<tonic::transport::Channel> {
1227        /// Attempt to create a new client by connecting to a given endpoint.
1228        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1229        where
1230            D: TryInto<tonic::transport::Endpoint>,
1231            D::Error: Into<StdError>,
1232        {
1233            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1234            Ok(Self::new(conn))
1235        }
1236    }
1237    impl<T> AiFeaturesServiceClient<T>
1238    where
1239        T: tonic::client::GrpcService<tonic::body::Body>,
1240        T::Error: Into<StdError>,
1241        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1242        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1243    {
1244        pub fn new(inner: T) -> Self {
1245            let inner = tonic::client::Grpc::new(inner);
1246            Self { inner }
1247        }
1248        pub fn with_origin(inner: T, origin: Uri) -> Self {
1249            let inner = tonic::client::Grpc::with_origin(inner, origin);
1250            Self { inner }
1251        }
1252        pub fn with_interceptor<F>(
1253            inner: T,
1254            interceptor: F,
1255        ) -> AiFeaturesServiceClient<InterceptedService<T, F>>
1256        where
1257            F: tonic::service::Interceptor,
1258            T::ResponseBody: Default,
1259            T: tonic::codegen::Service<
1260                http::Request<tonic::body::Body>,
1261                Response = http::Response<
1262                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1263                >,
1264            >,
1265            <T as tonic::codegen::Service<
1266                http::Request<tonic::body::Body>,
1267            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1268        {
1269            AiFeaturesServiceClient::new(InterceptedService::new(inner, interceptor))
1270        }
1271        /// Compress requests with the given encoding.
1272        ///
1273        /// This requires the server to support it otherwise it might respond with an
1274        /// error.
1275        #[must_use]
1276        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1277            self.inner = self.inner.send_compressed(encoding);
1278            self
1279        }
1280        /// Enable decompressing responses.
1281        #[must_use]
1282        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1283            self.inner = self.inner.accept_compressed(encoding);
1284            self
1285        }
1286        /// Limits the maximum size of a decoded message.
1287        ///
1288        /// Default: `4MB`
1289        #[must_use]
1290        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1291            self.inner = self.inner.max_decoding_message_size(limit);
1292            self
1293        }
1294        /// Limits the maximum size of an encoded message.
1295        ///
1296        /// Default: `usize::MAX`
1297        #[must_use]
1298        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1299            self.inner = self.inner.max_encoding_message_size(limit);
1300            self
1301        }
1302        /// IsAIEnabledForUser can be used to check if AI is enabled for a specific user
1303        pub async fn is_ai_enabled_for_user(
1304            &mut self,
1305            request: impl tonic::IntoRequest<super::IsAiEnabledForUserRequest>,
1306        ) -> std::result::Result<
1307            tonic::Response<super::IsAiEnabledForUserResponse>,
1308            tonic::Status,
1309        > {
1310            self.inner
1311                .ready()
1312                .await
1313                .map_err(|e| {
1314                    tonic::Status::unknown(
1315                        format!("Service was not ready: {}", e.into()),
1316                    )
1317                })?;
1318            let codec = tonic::codec::ProstCodec::default();
1319            let path = http::uri::PathAndQuery::from_static(
1320                "/nominal.ai.v1.AIFeaturesService/IsAIEnabledForUser",
1321            );
1322            let mut req = request.into_request();
1323            req.extensions_mut()
1324                .insert(
1325                    GrpcMethod::new(
1326                        "nominal.ai.v1.AIFeaturesService",
1327                        "IsAIEnabledForUser",
1328                    ),
1329                );
1330            self.inner.unary(req, path, codec).await
1331        }
1332    }
1333}
1334/// CreateOrUpdateKnowledgeBaseRequest is a request to create a knowledge from an attachment in the attachment's
1335/// workspace. Subsequent calls for the same attachment will overwrite the existing knowledge base.
1336#[derive(Clone, PartialEq, ::prost::Message)]
1337pub struct CreateOrUpdateKnowledgeBaseRequest {
1338    #[prost(string, tag = "1")]
1339    pub attachment_rid: ::prost::alloc::string::String,
1340    /// summary of the knowledge base, will be used by the LLM to decide when to use it
1341    #[prost(string, tag = "2")]
1342    pub summary_description: ::prost::alloc::string::String,
1343    #[prost(enumeration = "KnowledgeBaseType", optional, tag = "3")]
1344    pub r#type: ::core::option::Option<i32>,
1345}
1346/// CreateOrUpdateKnowledgeBaseResponse is the response to creating/updating a knowledge base
1347#[derive(Clone, PartialEq, ::prost::Message)]
1348pub struct CreateOrUpdateKnowledgeBaseResponse {
1349    #[prost(string, tag = "1")]
1350    pub knowledge_base_rid: ::prost::alloc::string::String,
1351}
1352/// KnowledgeBase represents a knowledge base entry
1353#[derive(Clone, PartialEq, ::prost::Message)]
1354pub struct KnowledgeBase {
1355    #[prost(string, tag = "1")]
1356    pub knowledge_base_rid: ::prost::alloc::string::String,
1357    #[prost(string, tag = "2")]
1358    pub attachment_rid: ::prost::alloc::string::String,
1359    #[prost(string, tag = "3")]
1360    pub workspace_rid: ::prost::alloc::string::String,
1361    #[prost(string, tag = "4")]
1362    pub summary_description: ::prost::alloc::string::String,
1363    #[prost(enumeration = "KnowledgeBaseType", tag = "5")]
1364    pub r#type: i32,
1365    #[prost(int32, tag = "6")]
1366    pub version: i32,
1367}
1368#[derive(Clone, PartialEq, ::prost::Message)]
1369pub struct ListRequest {
1370    #[prost(string, tag = "1")]
1371    pub workspace_rid: ::prost::alloc::string::String,
1372}
1373#[derive(Clone, PartialEq, ::prost::Message)]
1374pub struct ListResponse {
1375    #[prost(message, repeated, tag = "1")]
1376    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1377}
1378#[derive(Clone, PartialEq, ::prost::Message)]
1379pub struct DeleteRequest {
1380    #[prost(string, tag = "1")]
1381    pub knowledge_base_rid: ::prost::alloc::string::String,
1382}
1383#[derive(Clone, Copy, PartialEq, ::prost::Message)]
1384pub struct DeleteResponse {
1385    #[prost(bool, tag = "1")]
1386    pub success: bool,
1387}
1388#[derive(Clone, PartialEq, ::prost::Message)]
1389pub struct GetBatchRequest {
1390    #[prost(string, repeated, tag = "1")]
1391    pub knowledge_base_rids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
1392}
1393#[derive(Clone, PartialEq, ::prost::Message)]
1394pub struct GetBatchResponse {
1395    #[prost(message, repeated, tag = "1")]
1396    pub knowledge_bases: ::prost::alloc::vec::Vec<KnowledgeBase>,
1397}
1398/// generate summary description is intentionally going to return the generated description to the frontend
1399/// rather than storing it in the knowledge base directly because the description needs to be accepted by the user
1400#[derive(Clone, PartialEq, ::prost::Message)]
1401pub struct GenerateSummaryDescriptionRequest {
1402    #[prost(string, tag = "1")]
1403    pub attachment_rid: ::prost::alloc::string::String,
1404}
1405#[derive(Clone, PartialEq, ::prost::Message)]
1406pub struct GenerateSummaryDescriptionResponse {
1407    #[prost(string, tag = "1")]
1408    pub summary_description: ::prost::alloc::string::String,
1409}
1410/// KnowledgeBaseType defines the types of knowledge base
1411#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1412#[repr(i32)]
1413pub enum KnowledgeBaseType {
1414    /// defaults to PROMPT
1415    Unspecified = 0,
1416    /// knowledge base gets added directly to prompt (needs to be small enough!)
1417    Prompt = 1,
1418    /// knowledge base gets used via vector search on embeddings
1419    Embedding = 2,
1420}
1421impl KnowledgeBaseType {
1422    /// String value of the enum field names used in the ProtoBuf definition.
1423    ///
1424    /// The values are not transformed in any way and thus are considered stable
1425    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1426    pub fn as_str_name(&self) -> &'static str {
1427        match self {
1428            Self::Unspecified => "KNOWLEDGE_BASE_TYPE_UNSPECIFIED",
1429            Self::Prompt => "KNOWLEDGE_BASE_TYPE_PROMPT",
1430            Self::Embedding => "KNOWLEDGE_BASE_TYPE_EMBEDDING",
1431        }
1432    }
1433    /// Creates an enum from field names used in the ProtoBuf definition.
1434    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1435        match value {
1436            "KNOWLEDGE_BASE_TYPE_UNSPECIFIED" => Some(Self::Unspecified),
1437            "KNOWLEDGE_BASE_TYPE_PROMPT" => Some(Self::Prompt),
1438            "KNOWLEDGE_BASE_TYPE_EMBEDDING" => Some(Self::Embedding),
1439            _ => None,
1440        }
1441    }
1442}
1443/// Generated client implementations.
1444pub mod knowledge_base_service_client {
1445    #![allow(
1446        unused_variables,
1447        dead_code,
1448        missing_docs,
1449        clippy::wildcard_imports,
1450        clippy::let_unit_value,
1451    )]
1452    use tonic::codegen::*;
1453    use tonic::codegen::http::Uri;
1454    /// KnowledgeBaseService provides AI-powered knowledge base management
1455    #[derive(Debug, Clone)]
1456    pub struct KnowledgeBaseServiceClient<T> {
1457        inner: tonic::client::Grpc<T>,
1458    }
1459    impl KnowledgeBaseServiceClient<tonic::transport::Channel> {
1460        /// Attempt to create a new client by connecting to a given endpoint.
1461        pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error>
1462        where
1463            D: TryInto<tonic::transport::Endpoint>,
1464            D::Error: Into<StdError>,
1465        {
1466            let conn = tonic::transport::Endpoint::new(dst)?.connect().await?;
1467            Ok(Self::new(conn))
1468        }
1469    }
1470    impl<T> KnowledgeBaseServiceClient<T>
1471    where
1472        T: tonic::client::GrpcService<tonic::body::Body>,
1473        T::Error: Into<StdError>,
1474        T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
1475        <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
1476    {
1477        pub fn new(inner: T) -> Self {
1478            let inner = tonic::client::Grpc::new(inner);
1479            Self { inner }
1480        }
1481        pub fn with_origin(inner: T, origin: Uri) -> Self {
1482            let inner = tonic::client::Grpc::with_origin(inner, origin);
1483            Self { inner }
1484        }
1485        pub fn with_interceptor<F>(
1486            inner: T,
1487            interceptor: F,
1488        ) -> KnowledgeBaseServiceClient<InterceptedService<T, F>>
1489        where
1490            F: tonic::service::Interceptor,
1491            T::ResponseBody: Default,
1492            T: tonic::codegen::Service<
1493                http::Request<tonic::body::Body>,
1494                Response = http::Response<
1495                    <T as tonic::client::GrpcService<tonic::body::Body>>::ResponseBody,
1496                >,
1497            >,
1498            <T as tonic::codegen::Service<
1499                http::Request<tonic::body::Body>,
1500            >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
1501        {
1502            KnowledgeBaseServiceClient::new(InterceptedService::new(inner, interceptor))
1503        }
1504        /// Compress requests with the given encoding.
1505        ///
1506        /// This requires the server to support it otherwise it might respond with an
1507        /// error.
1508        #[must_use]
1509        pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
1510            self.inner = self.inner.send_compressed(encoding);
1511            self
1512        }
1513        /// Enable decompressing responses.
1514        #[must_use]
1515        pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
1516            self.inner = self.inner.accept_compressed(encoding);
1517            self
1518        }
1519        /// Limits the maximum size of a decoded message.
1520        ///
1521        /// Default: `4MB`
1522        #[must_use]
1523        pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
1524            self.inner = self.inner.max_decoding_message_size(limit);
1525            self
1526        }
1527        /// Limits the maximum size of an encoded message.
1528        ///
1529        /// Default: `usize::MAX`
1530        #[must_use]
1531        pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
1532            self.inner = self.inner.max_encoding_message_size(limit);
1533            self
1534        }
1535        /// CreateOrUpdateKnowledgeBase creates a knowledge base in the workspace
1536        pub async fn create_or_update_knowledge_base(
1537            &mut self,
1538            request: impl tonic::IntoRequest<super::CreateOrUpdateKnowledgeBaseRequest>,
1539        ) -> std::result::Result<
1540            tonic::Response<super::CreateOrUpdateKnowledgeBaseResponse>,
1541            tonic::Status,
1542        > {
1543            self.inner
1544                .ready()
1545                .await
1546                .map_err(|e| {
1547                    tonic::Status::unknown(
1548                        format!("Service was not ready: {}", e.into()),
1549                    )
1550                })?;
1551            let codec = tonic::codec::ProstCodec::default();
1552            let path = http::uri::PathAndQuery::from_static(
1553                "/nominal.ai.v1.KnowledgeBaseService/CreateOrUpdateKnowledgeBase",
1554            );
1555            let mut req = request.into_request();
1556            req.extensions_mut()
1557                .insert(
1558                    GrpcMethod::new(
1559                        "nominal.ai.v1.KnowledgeBaseService",
1560                        "CreateOrUpdateKnowledgeBase",
1561                    ),
1562                );
1563            self.inner.unary(req, path, codec).await
1564        }
1565        /// List returns all knowledge bases in the specified workspace
1566        pub async fn list(
1567            &mut self,
1568            request: impl tonic::IntoRequest<super::ListRequest>,
1569        ) -> std::result::Result<tonic::Response<super::ListResponse>, tonic::Status> {
1570            self.inner
1571                .ready()
1572                .await
1573                .map_err(|e| {
1574                    tonic::Status::unknown(
1575                        format!("Service was not ready: {}", e.into()),
1576                    )
1577                })?;
1578            let codec = tonic::codec::ProstCodec::default();
1579            let path = http::uri::PathAndQuery::from_static(
1580                "/nominal.ai.v1.KnowledgeBaseService/List",
1581            );
1582            let mut req = request.into_request();
1583            req.extensions_mut()
1584                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "List"));
1585            self.inner.unary(req, path, codec).await
1586        }
1587        /// Delete removes a knowledge base by its RID
1588        pub async fn delete(
1589            &mut self,
1590            request: impl tonic::IntoRequest<super::DeleteRequest>,
1591        ) -> std::result::Result<tonic::Response<super::DeleteResponse>, tonic::Status> {
1592            self.inner
1593                .ready()
1594                .await
1595                .map_err(|e| {
1596                    tonic::Status::unknown(
1597                        format!("Service was not ready: {}", e.into()),
1598                    )
1599                })?;
1600            let codec = tonic::codec::ProstCodec::default();
1601            let path = http::uri::PathAndQuery::from_static(
1602                "/nominal.ai.v1.KnowledgeBaseService/Delete",
1603            );
1604            let mut req = request.into_request();
1605            req.extensions_mut()
1606                .insert(GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "Delete"));
1607            self.inner.unary(req, path, codec).await
1608        }
1609        /// GetBatch retrieves multiple knowledge bases by their RIDs
1610        pub async fn get_batch(
1611            &mut self,
1612            request: impl tonic::IntoRequest<super::GetBatchRequest>,
1613        ) -> std::result::Result<
1614            tonic::Response<super::GetBatchResponse>,
1615            tonic::Status,
1616        > {
1617            self.inner
1618                .ready()
1619                .await
1620                .map_err(|e| {
1621                    tonic::Status::unknown(
1622                        format!("Service was not ready: {}", e.into()),
1623                    )
1624                })?;
1625            let codec = tonic::codec::ProstCodec::default();
1626            let path = http::uri::PathAndQuery::from_static(
1627                "/nominal.ai.v1.KnowledgeBaseService/GetBatch",
1628            );
1629            let mut req = request.into_request();
1630            req.extensions_mut()
1631                .insert(
1632                    GrpcMethod::new("nominal.ai.v1.KnowledgeBaseService", "GetBatch"),
1633                );
1634            self.inner.unary(req, path, codec).await
1635        }
1636        /// GenerateSummaryDescription generates a summary description for an attachment rid
1637        pub async fn generate_summary_description(
1638            &mut self,
1639            request: impl tonic::IntoRequest<super::GenerateSummaryDescriptionRequest>,
1640        ) -> std::result::Result<
1641            tonic::Response<super::GenerateSummaryDescriptionResponse>,
1642            tonic::Status,
1643        > {
1644            self.inner
1645                .ready()
1646                .await
1647                .map_err(|e| {
1648                    tonic::Status::unknown(
1649                        format!("Service was not ready: {}", e.into()),
1650                    )
1651                })?;
1652            let codec = tonic::codec::ProstCodec::default();
1653            let path = http::uri::PathAndQuery::from_static(
1654                "/nominal.ai.v1.KnowledgeBaseService/GenerateSummaryDescription",
1655            );
1656            let mut req = request.into_request();
1657            req.extensions_mut()
1658                .insert(
1659                    GrpcMethod::new(
1660                        "nominal.ai.v1.KnowledgeBaseService",
1661                        "GenerateSummaryDescription",
1662                    ),
1663                );
1664            self.inner.unary(req, path, codec).await
1665        }
1666    }
1667}