omni_llm_kit/
lib.rs

1mod common;
2mod http_client;
3pub mod model;
4mod models;
5mod reqwest_client;
6pub use reqwest_client::*;
7pub mod anthropic;
8pub mod openai;
9mod tool;
10
11pub use models::*;
12pub use model::*;
13pub use http_client::*;
14pub use tool::*;
15#[cfg(test)]
16mod tests {
17    use crate::model::{LanguageModelRequest, LanguageModelRequestMessage, MessageContent, Role};
18    use crate::models::{AnthropicLanguageModelProvider, OpenAiLanguageModelProvider};
19    use crate::openai::Model;
20    use crate::{anthropic, reqwest_client, AnthropicSettings};
21    use futures_util::StreamExt;
22    use std::sync::Arc;
23    use crate::anthropic::AnthropicModelMode;
24
25    #[tokio::test]
26    async fn test_openai_language_model() {
27        dotenvy::dotenv().ok();
28        let client = Arc::new(reqwest_client::ReqwestClient::new());
29        let provider = OpenAiLanguageModelProvider::new(client);
30        let model = provider.create_language_model(Model::Custom {
31            // name: "kimi-thinking-preview".to_string(),
32            // name: "kimi-k2-0711-preview".to_string(),
33            name: "aaa".to_string(),
34            display_name: Some("kimi-k2-turbo-preview".into()),
35            max_tokens: 0,
36            max_output_tokens: None,
37            max_completion_tokens: None,
38        });
39
40        let mut req = LanguageModelRequest::default();
41
42        req.messages = vec![LanguageModelRequestMessage {
43            role: Role::User,
44            content: vec![MessageContent::Text("请解释 1+1=2。深度思考".into())],
45            cache: false,
46        }];
47        let mut stream = model.stream_completion(req).await.unwrap();
48        while let Some(it) = stream.next().await {
49            match it {
50                Ok(event) => {
51                    println!("Event: {:?}", event);
52                }
53                Err(e) => {
54                    eprintln!("Error: {:?}", e);
55                }
56            }
57        }
58    }
59
60    #[tokio::test]
61    async fn test_anthropic_language_model() {
62        dotenvy::dotenv().ok();
63
64        let api_url = std::env::var("ANTHROPIC_API_BASE_URL").unwrap();
65        let api_key = std::env::var("ANTHROPIC_API_KEY").unwrap();
66        let anthropic_settings = AnthropicSettings{
67            api_url: api_url,
68            api_key: api_key
69        };
70        global_registry::register_arc!(AnthropicSettings, anthropic_settings);
71        let client = Arc::new(reqwest_client::ReqwestClient::new());
72        let provider = AnthropicLanguageModelProvider::new(client);
73        let model = provider.create_language_model(anthropic::Model::Custom {
74            name: "moonshot-v1-8k".to_string(),
75            display_name: Some("kimi-k2-turbo-preview".into()),
76            tool_override: None,
77            max_tokens: 0,
78            max_output_tokens: None,
79            default_temperature: None,
80            extra_beta_headers: vec![],
81            cache_configuration: None,
82            // mode: Default::default(),
83            mode: AnthropicModelMode::Thinking { budget_tokens: None }
84        });
85
86        let mut req = LanguageModelRequest::default();
87
88        req.messages = vec![LanguageModelRequestMessage {
89            role: Role::User,
90            content: vec![MessageContent::Text("who are you".into())],
91            cache: false,
92        }];
93        let mut stream = model.stream_completion(req).await.unwrap();
94        while let Some(it) = stream.next().await {
95            match it {
96                Ok(event) => {
97                    println!("Event: {:?}", event);
98                }
99                Err(e) => {
100                    eprintln!("Error: {:?}", e);
101                }
102            }
103        }
104    }
105}