1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
use agentix::{LlmEvent, Request};
use futures::StreamExt;
use std::env;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// You can swap this to Provider::OpenAI, Provider::Anthropic, etc.
let api_key = env::var("DEEPSEEK_API_KEY")
.expect("DEEPSEEK_API_KEY must be set in your environment variables");
let http = reqwest::Client::new();
println!("Sending request to DeepSeek...");
let mut stream = Request::deepseek(api_key)
.system_prompt("You are a helpful and concise AI assistant.")
.user("Write a short haiku about Rust programming.")
.stream(&http)
.await?;
println!("\nResponse:");
// Process the stream of events
while let Some(event) = stream.next().await {
match event {
LlmEvent::Reasoning(r) => {
// Print reasoning in cyan if the model supports it (e.g., DeepSeek R1)
print!("\x1b[36m{r}\x1b[0m");
}
LlmEvent::Token(t) => {
// Print standard output tokens
print!("{t}");
}
LlmEvent::Done => {
break;
}
LlmEvent::Error(e) => {
eprintln!("\nError encountered: {e}");
}
_ => {
// Ignore other events like ToolCall or Usage for this basic example
}
}
}
println!("\n");
Ok(())
}