1use crate::protocols;
5
6pub use protocols::{Annotated, TokenIdType};
7
8pub mod openai {
9 use super::*;
10 use dynamo_runtime::pipeline::{ServerStreamingEngine, UnaryEngine};
11
12 pub mod completions {
13 use super::*;
14
15 pub use protocols::openai::completions::{
16 NvCreateCompletionRequest, NvCreateCompletionResponse,
17 };
18
19 pub type OpenAICompletionsUnaryEngine =
21 UnaryEngine<NvCreateCompletionRequest, NvCreateCompletionResponse>;
22
23 pub type OpenAICompletionsStreamingEngine =
25 ServerStreamingEngine<NvCreateCompletionRequest, Annotated<NvCreateCompletionResponse>>;
26 }
27
28 pub mod chat_completions {
29 use super::*;
30
31 pub use protocols::openai::chat_completions::{
32 NvCreateChatCompletionRequest, NvCreateChatCompletionResponse,
33 NvCreateChatCompletionStreamResponse,
34 };
35
36 pub type OpenAIChatCompletionsUnaryEngine =
38 UnaryEngine<NvCreateChatCompletionRequest, NvCreateChatCompletionResponse>;
39
40 pub type OpenAIChatCompletionsStreamingEngine = ServerStreamingEngine<
42 NvCreateChatCompletionRequest,
43 Annotated<NvCreateChatCompletionStreamResponse>,
44 >;
45 }
46
47 pub mod embeddings {
48 use super::*;
49
50 pub use protocols::openai::embeddings::{
51 NvCreateEmbeddingRequest, NvCreateEmbeddingResponse,
52 };
53
54 pub type OpenAIEmbeddingsUnaryEngine =
56 UnaryEngine<NvCreateEmbeddingRequest, NvCreateEmbeddingResponse>;
57
58 pub type OpenAIEmbeddingsStreamingEngine =
60 ServerStreamingEngine<NvCreateEmbeddingRequest, Annotated<NvCreateEmbeddingResponse>>;
61 }
62}
63
64pub mod generic {
65 use super::*;
66 use dynamo_runtime::pipeline::{ServerStreamingEngine, UnaryEngine};
67
68 pub mod tensor {
69 use super::*;
70
71 pub use protocols::tensor::{NvCreateTensorRequest, NvCreateTensorResponse};
72
73 pub type TensorUnaryEngine = UnaryEngine<NvCreateTensorRequest, NvCreateTensorResponse>;
75
76 pub type TensorStreamingEngine =
78 ServerStreamingEngine<NvCreateTensorRequest, Annotated<NvCreateTensorResponse>>;
79 }
80}