request_example/
request_example.rs

1use std::{io::Write, time::Duration};
2
3use futures_util::StreamExt;
4use serde_json::json;
5use service_utils_rs::{error::Result, utils::Request};
6use tokio::time::sleep;
7
8#[tokio::main]
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 base_url
14    client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16    // 设置默认的请求头
17    let mut default_headers = Vec::new();
18    default_headers.push(("Content-Type", "application/json".to_string()));
19    client.set_default_headers(default_headers)?;
20
21    // 定义自定义请求头
22    let mut custom_headers = Vec::new();
23    custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25    // 创建一个 POST 请求体
26    let body = json!({
27        "title": "foo",
28        "body": "bar",
29        "userId": 1
30    });
31
32    // 发送 POST 请求
33    let response = client
34        .post("/posts", &body, Some(custom_headers.clone()))
35        .await?;
36
37    println!("POST Response: {:?}", response.status());
38    let response_body = response.text().await?;
39    println!("Response Body: {}", response_body);
40
41    // 发送 GET 请求
42    let response = client
43        .get("/posts/1", None, Some(custom_headers.clone()))
44        .await?;
45
46    println!("GET Response: {:?}", response.status());
47    let response_body = response.text().await?;
48    println!("Response Body: {}", response_body);
49
50    // 设置 Ollama 流式请求 base_url
51    client.set_base_url("http://localhost:11434")?;
52    let stream_headers = vec![("Content-Type", "application/json".to_string())];
53    client.set_default_headers(stream_headers)?;
54
55    // 构造 Ollama 请求体
56    let stream_body = json!({
57        "model": "llama3.2",
58        "stream": true,
59        "messages": [
60            {"role": "user", "content": "Hello, who are you?"}
61        ]
62    });
63
64    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66    println!("Streaming Response:");
67    while let Some(chunk) = stream.next().await {
68        let data = chunk?;
69        let s = std::str::from_utf8(&data).unwrap();
70
71        for line in s.lines().filter(|l| !l.trim().is_empty()) {
72            match serde_json::from_str::<serde_json::Value>(line) {
73                Ok(json) => {
74                    if let Some(content) = json["message"]["content"].as_str() {
75                        print!("{}", content);
76                        std::io::stdout().flush().unwrap();
77                    }
78                    if json["done"] == true {
79                        println!();
80                        break;
81                    }
82                }
83                Err(err) => {
84                    eprintln!("Parse error: {}", err);
85                }
86            }
87        }
88
89        // 可选:稍作等待,避免拉取过快影响显示
90        sleep(Duration::from_millis(20)).await;
91    }
92
93    Ok(())
94}
95
96// cargo run --example request_example  --features request