Request

Struct Request 

Source
pub struct Request { /* private fields */ }
Expand description

An HTTP request builder and executor with base URL and default headers.

Implementations§

Source§

impl Request

Source

pub fn new() -> Self

Create a new Request client.

Examples found in repository?
examples/llm_stream_req.rs (line 11)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 Ollama 流式请求 base_url
14    client.set_base_url("http://localhost:11434")?;
15    let stream_headers = vec![("Content-Type", "application/json".to_string())];
16    client.set_default_headers(stream_headers)?;
17
18    // 构造 Ollama 请求体
19    let stream_body = json!({
20        "model": "llama3.2",
21        "stream": true,
22        "messages": [
23            {"role": "user", "content": "Hello, who are you?"}
24        ]
25    });
26
27    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29    println!("Streaming Response:");
30    while let Some(chunk) = stream.next().await {
31        let data = chunk?;
32        let s = std::str::from_utf8(&data).unwrap();
33
34        for line in s.lines().filter(|l| !l.trim().is_empty()) {
35            match serde_json::from_str::<serde_json::Value>(line) {
36                Ok(json) => {
37                    if let Some(content) = json["message"]["content"].as_str() {
38                        print!("{}", content);
39                        std::io::stdout().flush().unwrap();
40                    }
41                    if json["done"] == true {
42                        println!();
43                        break;
44                    }
45                }
46                Err(err) => {
47                    eprintln!("Parse error: {}", err);
48                }
49            }
50        }
51
52        // 可选:稍作等待,避免拉取过快影响显示
53        sleep(Duration::from_millis(20)).await;
54    }
55
56    Ok(())
57}
More examples
Hide additional examples
examples/request_example.rs (line 11)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 base_url
14    client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16    // 设置默认的请求头
17    let mut default_headers = Vec::new();
18    default_headers.push(("Content-Type", "application/json".to_string()));
19    client.set_default_headers(default_headers)?;
20
21    // 定义自定义请求头
22    let mut custom_headers = Vec::new();
23    custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25    // 创建一个 POST 请求体
26    let body = json!({
27        "title": "foo",
28        "body": "bar",
29        "userId": 1
30    });
31
32    // 发送 POST 请求
33    let response = client
34        .post("/posts", &body, Some(custom_headers.clone()))
35        .await?;
36
37    println!("POST Response: {:?}", response.status());
38    let response_body = response.text().await?;
39    println!("Response Body: {}", response_body);
40
41    // 发送 GET 请求
42    let response = client
43        .get("/posts/1", None, Some(custom_headers.clone()))
44        .await?;
45
46    println!("GET Response: {:?}", response.status());
47    let response_body = response.text().await?;
48    println!("Response Body: {}", response_body);
49
50    // 设置 Ollama 流式请求 base_url
51    client.set_base_url("http://localhost:11434")?;
52    let stream_headers = vec![("Content-Type", "application/json".to_string())];
53    client.set_default_headers(stream_headers)?;
54
55    // 构造 Ollama 请求体
56    let stream_body = json!({
57        "model": "llama3.2",
58        "stream": true,
59        "messages": [
60            {"role": "user", "content": "Hello, who are you?"}
61        ]
62    });
63
64    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66    println!("Streaming Response:");
67    while let Some(chunk) = stream.next().await {
68        let data = chunk?;
69        let s = std::str::from_utf8(&data).unwrap();
70
71        for line in s.lines().filter(|l| !l.trim().is_empty()) {
72            match serde_json::from_str::<serde_json::Value>(line) {
73                Ok(json) => {
74                    if let Some(content) = json["message"]["content"].as_str() {
75                        print!("{}", content);
76                        std::io::stdout().flush().unwrap();
77                    }
78                    if json["done"] == true {
79                        println!();
80                        break;
81                    }
82                }
83                Err(err) => {
84                    eprintln!("Parse error: {}", err);
85                }
86            }
87        }
88
89        // 可选:稍作等待,避免拉取过快影响显示
90        sleep(Duration::from_millis(20)).await;
91    }
92
93    Ok(())
94}
Source

pub fn with_timeout(timeout_sec: u64) -> Result<Self>

Source

pub fn set_base_url(&mut self, base_url: &str) -> Result<()>

Set the base URL for all requests.

Examples found in repository?
examples/llm_stream_req.rs (line 14)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 Ollama 流式请求 base_url
14    client.set_base_url("http://localhost:11434")?;
15    let stream_headers = vec![("Content-Type", "application/json".to_string())];
16    client.set_default_headers(stream_headers)?;
17
18    // 构造 Ollama 请求体
19    let stream_body = json!({
20        "model": "llama3.2",
21        "stream": true,
22        "messages": [
23            {"role": "user", "content": "Hello, who are you?"}
24        ]
25    });
26
27    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29    println!("Streaming Response:");
30    while let Some(chunk) = stream.next().await {
31        let data = chunk?;
32        let s = std::str::from_utf8(&data).unwrap();
33
34        for line in s.lines().filter(|l| !l.trim().is_empty()) {
35            match serde_json::from_str::<serde_json::Value>(line) {
36                Ok(json) => {
37                    if let Some(content) = json["message"]["content"].as_str() {
38                        print!("{}", content);
39                        std::io::stdout().flush().unwrap();
40                    }
41                    if json["done"] == true {
42                        println!();
43                        break;
44                    }
45                }
46                Err(err) => {
47                    eprintln!("Parse error: {}", err);
48                }
49            }
50        }
51
52        // 可选:稍作等待,避免拉取过快影响显示
53        sleep(Duration::from_millis(20)).await;
54    }
55
56    Ok(())
57}
More examples
Hide additional examples
examples/request_example.rs (line 14)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 base_url
14    client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16    // 设置默认的请求头
17    let mut default_headers = Vec::new();
18    default_headers.push(("Content-Type", "application/json".to_string()));
19    client.set_default_headers(default_headers)?;
20
21    // 定义自定义请求头
22    let mut custom_headers = Vec::new();
23    custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25    // 创建一个 POST 请求体
26    let body = json!({
27        "title": "foo",
28        "body": "bar",
29        "userId": 1
30    });
31
32    // 发送 POST 请求
33    let response = client
34        .post("/posts", &body, Some(custom_headers.clone()))
35        .await?;
36
37    println!("POST Response: {:?}", response.status());
38    let response_body = response.text().await?;
39    println!("Response Body: {}", response_body);
40
41    // 发送 GET 请求
42    let response = client
43        .get("/posts/1", None, Some(custom_headers.clone()))
44        .await?;
45
46    println!("GET Response: {:?}", response.status());
47    let response_body = response.text().await?;
48    println!("Response Body: {}", response_body);
49
50    // 设置 Ollama 流式请求 base_url
51    client.set_base_url("http://localhost:11434")?;
52    let stream_headers = vec![("Content-Type", "application/json".to_string())];
53    client.set_default_headers(stream_headers)?;
54
55    // 构造 Ollama 请求体
56    let stream_body = json!({
57        "model": "llama3.2",
58        "stream": true,
59        "messages": [
60            {"role": "user", "content": "Hello, who are you?"}
61        ]
62    });
63
64    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66    println!("Streaming Response:");
67    while let Some(chunk) = stream.next().await {
68        let data = chunk?;
69        let s = std::str::from_utf8(&data).unwrap();
70
71        for line in s.lines().filter(|l| !l.trim().is_empty()) {
72            match serde_json::from_str::<serde_json::Value>(line) {
73                Ok(json) => {
74                    if let Some(content) = json["message"]["content"].as_str() {
75                        print!("{}", content);
76                        std::io::stdout().flush().unwrap();
77                    }
78                    if json["done"] == true {
79                        println!();
80                        break;
81                    }
82                }
83                Err(err) => {
84                    eprintln!("Parse error: {}", err);
85                }
86            }
87        }
88
89        // 可选:稍作等待,避免拉取过快影响显示
90        sleep(Duration::from_millis(20)).await;
91    }
92
93    Ok(())
94}
Source

pub fn set_default_headers( &mut self, headers: Vec<(&'static str, String)>, ) -> Result<()>

Set default headers to be applied on all requests.

Examples found in repository?
examples/llm_stream_req.rs (line 16)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 Ollama 流式请求 base_url
14    client.set_base_url("http://localhost:11434")?;
15    let stream_headers = vec![("Content-Type", "application/json".to_string())];
16    client.set_default_headers(stream_headers)?;
17
18    // 构造 Ollama 请求体
19    let stream_body = json!({
20        "model": "llama3.2",
21        "stream": true,
22        "messages": [
23            {"role": "user", "content": "Hello, who are you?"}
24        ]
25    });
26
27    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29    println!("Streaming Response:");
30    while let Some(chunk) = stream.next().await {
31        let data = chunk?;
32        let s = std::str::from_utf8(&data).unwrap();
33
34        for line in s.lines().filter(|l| !l.trim().is_empty()) {
35            match serde_json::from_str::<serde_json::Value>(line) {
36                Ok(json) => {
37                    if let Some(content) = json["message"]["content"].as_str() {
38                        print!("{}", content);
39                        std::io::stdout().flush().unwrap();
40                    }
41                    if json["done"] == true {
42                        println!();
43                        break;
44                    }
45                }
46                Err(err) => {
47                    eprintln!("Parse error: {}", err);
48                }
49            }
50        }
51
52        // 可选:稍作等待,避免拉取过快影响显示
53        sleep(Duration::from_millis(20)).await;
54    }
55
56    Ok(())
57}
More examples
Hide additional examples
examples/request_example.rs (line 19)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 base_url
14    client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16    // 设置默认的请求头
17    let mut default_headers = Vec::new();
18    default_headers.push(("Content-Type", "application/json".to_string()));
19    client.set_default_headers(default_headers)?;
20
21    // 定义自定义请求头
22    let mut custom_headers = Vec::new();
23    custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25    // 创建一个 POST 请求体
26    let body = json!({
27        "title": "foo",
28        "body": "bar",
29        "userId": 1
30    });
31
32    // 发送 POST 请求
33    let response = client
34        .post("/posts", &body, Some(custom_headers.clone()))
35        .await?;
36
37    println!("POST Response: {:?}", response.status());
38    let response_body = response.text().await?;
39    println!("Response Body: {}", response_body);
40
41    // 发送 GET 请求
42    let response = client
43        .get("/posts/1", None, Some(custom_headers.clone()))
44        .await?;
45
46    println!("GET Response: {:?}", response.status());
47    let response_body = response.text().await?;
48    println!("Response Body: {}", response_body);
49
50    // 设置 Ollama 流式请求 base_url
51    client.set_base_url("http://localhost:11434")?;
52    let stream_headers = vec![("Content-Type", "application/json".to_string())];
53    client.set_default_headers(stream_headers)?;
54
55    // 构造 Ollama 请求体
56    let stream_body = json!({
57        "model": "llama3.2",
58        "stream": true,
59        "messages": [
60            {"role": "user", "content": "Hello, who are you?"}
61        ]
62    });
63
64    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66    println!("Streaming Response:");
67    while let Some(chunk) = stream.next().await {
68        let data = chunk?;
69        let s = std::str::from_utf8(&data).unwrap();
70
71        for line in s.lines().filter(|l| !l.trim().is_empty()) {
72            match serde_json::from_str::<serde_json::Value>(line) {
73                Ok(json) => {
74                    if let Some(content) = json["message"]["content"].as_str() {
75                        print!("{}", content);
76                        std::io::stdout().flush().unwrap();
77                    }
78                    if json["done"] == true {
79                        println!();
80                        break;
81                    }
82                }
83                Err(err) => {
84                    eprintln!("Parse error: {}", err);
85                }
86            }
87        }
88
89        // 可选:稍作等待,避免拉取过快影响显示
90        sleep(Duration::from_millis(20)).await;
91    }
92
93    Ok(())
94}
Source

pub async fn get( &self, endpoint: &str, query: Option<Vec<(String, String)>>, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>

Send a GET request.

Examples found in repository?
examples/request_example.rs (line 43)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 base_url
14    client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16    // 设置默认的请求头
17    let mut default_headers = Vec::new();
18    default_headers.push(("Content-Type", "application/json".to_string()));
19    client.set_default_headers(default_headers)?;
20
21    // 定义自定义请求头
22    let mut custom_headers = Vec::new();
23    custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25    // 创建一个 POST 请求体
26    let body = json!({
27        "title": "foo",
28        "body": "bar",
29        "userId": 1
30    });
31
32    // 发送 POST 请求
33    let response = client
34        .post("/posts", &body, Some(custom_headers.clone()))
35        .await?;
36
37    println!("POST Response: {:?}", response.status());
38    let response_body = response.text().await?;
39    println!("Response Body: {}", response_body);
40
41    // 发送 GET 请求
42    let response = client
43        .get("/posts/1", None, Some(custom_headers.clone()))
44        .await?;
45
46    println!("GET Response: {:?}", response.status());
47    let response_body = response.text().await?;
48    println!("Response Body: {}", response_body);
49
50    // 设置 Ollama 流式请求 base_url
51    client.set_base_url("http://localhost:11434")?;
52    let stream_headers = vec![("Content-Type", "application/json".to_string())];
53    client.set_default_headers(stream_headers)?;
54
55    // 构造 Ollama 请求体
56    let stream_body = json!({
57        "model": "llama3.2",
58        "stream": true,
59        "messages": [
60            {"role": "user", "content": "Hello, who are you?"}
61        ]
62    });
63
64    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66    println!("Streaming Response:");
67    while let Some(chunk) = stream.next().await {
68        let data = chunk?;
69        let s = std::str::from_utf8(&data).unwrap();
70
71        for line in s.lines().filter(|l| !l.trim().is_empty()) {
72            match serde_json::from_str::<serde_json::Value>(line) {
73                Ok(json) => {
74                    if let Some(content) = json["message"]["content"].as_str() {
75                        print!("{}", content);
76                        std::io::stdout().flush().unwrap();
77                    }
78                    if json["done"] == true {
79                        println!();
80                        break;
81                    }
82                }
83                Err(err) => {
84                    eprintln!("Parse error: {}", err);
85                }
86            }
87        }
88
89        // 可选:稍作等待,避免拉取过快影响显示
90        sleep(Duration::from_millis(20)).await;
91    }
92
93    Ok(())
94}
Source

pub async fn post( &self, endpoint: &str, body: &Value, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>

Send a POST request with JSON body.

Examples found in repository?
examples/request_example.rs (line 34)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 base_url
14    client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16    // 设置默认的请求头
17    let mut default_headers = Vec::new();
18    default_headers.push(("Content-Type", "application/json".to_string()));
19    client.set_default_headers(default_headers)?;
20
21    // 定义自定义请求头
22    let mut custom_headers = Vec::new();
23    custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25    // 创建一个 POST 请求体
26    let body = json!({
27        "title": "foo",
28        "body": "bar",
29        "userId": 1
30    });
31
32    // 发送 POST 请求
33    let response = client
34        .post("/posts", &body, Some(custom_headers.clone()))
35        .await?;
36
37    println!("POST Response: {:?}", response.status());
38    let response_body = response.text().await?;
39    println!("Response Body: {}", response_body);
40
41    // 发送 GET 请求
42    let response = client
43        .get("/posts/1", None, Some(custom_headers.clone()))
44        .await?;
45
46    println!("GET Response: {:?}", response.status());
47    let response_body = response.text().await?;
48    println!("Response Body: {}", response_body);
49
50    // 设置 Ollama 流式请求 base_url
51    client.set_base_url("http://localhost:11434")?;
52    let stream_headers = vec![("Content-Type", "application/json".to_string())];
53    client.set_default_headers(stream_headers)?;
54
55    // 构造 Ollama 请求体
56    let stream_body = json!({
57        "model": "llama3.2",
58        "stream": true,
59        "messages": [
60            {"role": "user", "content": "Hello, who are you?"}
61        ]
62    });
63
64    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66    println!("Streaming Response:");
67    while let Some(chunk) = stream.next().await {
68        let data = chunk?;
69        let s = std::str::from_utf8(&data).unwrap();
70
71        for line in s.lines().filter(|l| !l.trim().is_empty()) {
72            match serde_json::from_str::<serde_json::Value>(line) {
73                Ok(json) => {
74                    if let Some(content) = json["message"]["content"].as_str() {
75                        print!("{}", content);
76                        std::io::stdout().flush().unwrap();
77                    }
78                    if json["done"] == true {
79                        println!();
80                        break;
81                    }
82                }
83                Err(err) => {
84                    eprintln!("Parse error: {}", err);
85                }
86            }
87        }
88
89        // 可选:稍作等待,避免拉取过快影响显示
90        sleep(Duration::from_millis(20)).await;
91    }
92
93    Ok(())
94}
Source

pub async fn put( &self, endpoint: &str, body: &Value, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>

Send a PUT request with JSON body.

Source

pub async fn delete( &self, endpoint: &str, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>

Send a DELETE request.

Source

pub async fn post_stream( &self, endpoint: &str, body: &Value, headers: Option<Vec<(&'static str, String)>>, ) -> Result<ByteStream>

Send a streaming POST request and return the response stream.

Examples found in repository?
examples/llm_stream_req.rs (line 27)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 Ollama 流式请求 base_url
14    client.set_base_url("http://localhost:11434")?;
15    let stream_headers = vec![("Content-Type", "application/json".to_string())];
16    client.set_default_headers(stream_headers)?;
17
18    // 构造 Ollama 请求体
19    let stream_body = json!({
20        "model": "llama3.2",
21        "stream": true,
22        "messages": [
23            {"role": "user", "content": "Hello, who are you?"}
24        ]
25    });
26
27    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29    println!("Streaming Response:");
30    while let Some(chunk) = stream.next().await {
31        let data = chunk?;
32        let s = std::str::from_utf8(&data).unwrap();
33
34        for line in s.lines().filter(|l| !l.trim().is_empty()) {
35            match serde_json::from_str::<serde_json::Value>(line) {
36                Ok(json) => {
37                    if let Some(content) = json["message"]["content"].as_str() {
38                        print!("{}", content);
39                        std::io::stdout().flush().unwrap();
40                    }
41                    if json["done"] == true {
42                        println!();
43                        break;
44                    }
45                }
46                Err(err) => {
47                    eprintln!("Parse error: {}", err);
48                }
49            }
50        }
51
52        // 可选:稍作等待,避免拉取过快影响显示
53        sleep(Duration::from_millis(20)).await;
54    }
55
56    Ok(())
57}
More examples
Hide additional examples
examples/request_example.rs (line 64)
9async fn main() -> Result<()> {
10    // 创建一个新的 Request 实例
11    let mut client = Request::new();
12
13    // 设置 base_url
14    client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16    // 设置默认的请求头
17    let mut default_headers = Vec::new();
18    default_headers.push(("Content-Type", "application/json".to_string()));
19    client.set_default_headers(default_headers)?;
20
21    // 定义自定义请求头
22    let mut custom_headers = Vec::new();
23    custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25    // 创建一个 POST 请求体
26    let body = json!({
27        "title": "foo",
28        "body": "bar",
29        "userId": 1
30    });
31
32    // 发送 POST 请求
33    let response = client
34        .post("/posts", &body, Some(custom_headers.clone()))
35        .await?;
36
37    println!("POST Response: {:?}", response.status());
38    let response_body = response.text().await?;
39    println!("Response Body: {}", response_body);
40
41    // 发送 GET 请求
42    let response = client
43        .get("/posts/1", None, Some(custom_headers.clone()))
44        .await?;
45
46    println!("GET Response: {:?}", response.status());
47    let response_body = response.text().await?;
48    println!("Response Body: {}", response_body);
49
50    // 设置 Ollama 流式请求 base_url
51    client.set_base_url("http://localhost:11434")?;
52    let stream_headers = vec![("Content-Type", "application/json".to_string())];
53    client.set_default_headers(stream_headers)?;
54
55    // 构造 Ollama 请求体
56    let stream_body = json!({
57        "model": "llama3.2",
58        "stream": true,
59        "messages": [
60            {"role": "user", "content": "Hello, who are you?"}
61        ]
62    });
63
64    let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66    println!("Streaming Response:");
67    while let Some(chunk) = stream.next().await {
68        let data = chunk?;
69        let s = std::str::from_utf8(&data).unwrap();
70
71        for line in s.lines().filter(|l| !l.trim().is_empty()) {
72            match serde_json::from_str::<serde_json::Value>(line) {
73                Ok(json) => {
74                    if let Some(content) = json["message"]["content"].as_str() {
75                        print!("{}", content);
76                        std::io::stdout().flush().unwrap();
77                    }
78                    if json["done"] == true {
79                        println!();
80                        break;
81                    }
82                }
83                Err(err) => {
84                    eprintln!("Parse error: {}", err);
85                }
86            }
87        }
88
89        // 可选:稍作等待,避免拉取过快影响显示
90        sleep(Duration::from_millis(20)).await;
91    }
92
93    Ok(())
94}

Trait Implementations§

Source§

impl Debug for Request

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<G1, G2> Within<G2> for G1
where G2: Contains<G1>,

Source§

fn is_within(&self, b: &G2) -> bool

Source§

impl<T> ErasedDestructor for T
where T: 'static,