pub struct Request { /* private fields */ }Expand description
An HTTP request builder and executor with base URL and default headers.
Implementations§
Source§impl Request
impl Request
Sourcepub fn new() -> Self
pub fn new() -> Self
Create a new Request client.
Examples found in repository?
examples/llm_stream_req.rs (line 11)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 Ollama 流式请求 base_url
14 client.set_base_url("http://localhost:11434")?;
15 let stream_headers = vec![("Content-Type", "application/json".to_string())];
16 client.set_default_headers(stream_headers)?;
17
18 // 构造 Ollama 请求体
19 let stream_body = json!({
20 "model": "llama3.2",
21 "stream": true,
22 "messages": [
23 {"role": "user", "content": "Hello, who are you?"}
24 ]
25 });
26
27 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29 println!("Streaming Response:");
30 while let Some(chunk) = stream.next().await {
31 let data = chunk?;
32 let s = std::str::from_utf8(&data).unwrap();
33
34 for line in s.lines().filter(|l| !l.trim().is_empty()) {
35 match serde_json::from_str::<serde_json::Value>(line) {
36 Ok(json) => {
37 if let Some(content) = json["message"]["content"].as_str() {
38 print!("{}", content);
39 std::io::stdout().flush().unwrap();
40 }
41 if json["done"] == true {
42 println!();
43 break;
44 }
45 }
46 Err(err) => {
47 eprintln!("Parse error: {}", err);
48 }
49 }
50 }
51
52 // 可选:稍作等待,避免拉取过快影响显示
53 sleep(Duration::from_millis(20)).await;
54 }
55
56 Ok(())
57}More examples
examples/request_example.rs (line 11)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 base_url
14 client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16 // 设置默认的请求头
17 let mut default_headers = Vec::new();
18 default_headers.push(("Content-Type", "application/json".to_string()));
19 client.set_default_headers(default_headers)?;
20
21 // 定义自定义请求头
22 let mut custom_headers = Vec::new();
23 custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25 // 创建一个 POST 请求体
26 let body = json!({
27 "title": "foo",
28 "body": "bar",
29 "userId": 1
30 });
31
32 // 发送 POST 请求
33 let response = client
34 .post("/posts", &body, Some(custom_headers.clone()))
35 .await?;
36
37 println!("POST Response: {:?}", response.status());
38 let response_body = response.text().await?;
39 println!("Response Body: {}", response_body);
40
41 // 发送 GET 请求
42 let response = client
43 .get("/posts/1", None, Some(custom_headers.clone()))
44 .await?;
45
46 println!("GET Response: {:?}", response.status());
47 let response_body = response.text().await?;
48 println!("Response Body: {}", response_body);
49
50 // 设置 Ollama 流式请求 base_url
51 client.set_base_url("http://localhost:11434")?;
52 let stream_headers = vec![("Content-Type", "application/json".to_string())];
53 client.set_default_headers(stream_headers)?;
54
55 // 构造 Ollama 请求体
56 let stream_body = json!({
57 "model": "llama3.2",
58 "stream": true,
59 "messages": [
60 {"role": "user", "content": "Hello, who are you?"}
61 ]
62 });
63
64 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66 println!("Streaming Response:");
67 while let Some(chunk) = stream.next().await {
68 let data = chunk?;
69 let s = std::str::from_utf8(&data).unwrap();
70
71 for line in s.lines().filter(|l| !l.trim().is_empty()) {
72 match serde_json::from_str::<serde_json::Value>(line) {
73 Ok(json) => {
74 if let Some(content) = json["message"]["content"].as_str() {
75 print!("{}", content);
76 std::io::stdout().flush().unwrap();
77 }
78 if json["done"] == true {
79 println!();
80 break;
81 }
82 }
83 Err(err) => {
84 eprintln!("Parse error: {}", err);
85 }
86 }
87 }
88
89 // 可选:稍作等待,避免拉取过快影响显示
90 sleep(Duration::from_millis(20)).await;
91 }
92
93 Ok(())
94}pub fn with_timeout(timeout_sec: u64) -> Result<Self>
Sourcepub fn set_base_url(&mut self, base_url: &str) -> Result<()>
pub fn set_base_url(&mut self, base_url: &str) -> Result<()>
Set the base URL for all requests.
Examples found in repository?
examples/llm_stream_req.rs (line 14)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 Ollama 流式请求 base_url
14 client.set_base_url("http://localhost:11434")?;
15 let stream_headers = vec![("Content-Type", "application/json".to_string())];
16 client.set_default_headers(stream_headers)?;
17
18 // 构造 Ollama 请求体
19 let stream_body = json!({
20 "model": "llama3.2",
21 "stream": true,
22 "messages": [
23 {"role": "user", "content": "Hello, who are you?"}
24 ]
25 });
26
27 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29 println!("Streaming Response:");
30 while let Some(chunk) = stream.next().await {
31 let data = chunk?;
32 let s = std::str::from_utf8(&data).unwrap();
33
34 for line in s.lines().filter(|l| !l.trim().is_empty()) {
35 match serde_json::from_str::<serde_json::Value>(line) {
36 Ok(json) => {
37 if let Some(content) = json["message"]["content"].as_str() {
38 print!("{}", content);
39 std::io::stdout().flush().unwrap();
40 }
41 if json["done"] == true {
42 println!();
43 break;
44 }
45 }
46 Err(err) => {
47 eprintln!("Parse error: {}", err);
48 }
49 }
50 }
51
52 // 可选:稍作等待,避免拉取过快影响显示
53 sleep(Duration::from_millis(20)).await;
54 }
55
56 Ok(())
57}More examples
examples/request_example.rs (line 14)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 base_url
14 client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16 // 设置默认的请求头
17 let mut default_headers = Vec::new();
18 default_headers.push(("Content-Type", "application/json".to_string()));
19 client.set_default_headers(default_headers)?;
20
21 // 定义自定义请求头
22 let mut custom_headers = Vec::new();
23 custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25 // 创建一个 POST 请求体
26 let body = json!({
27 "title": "foo",
28 "body": "bar",
29 "userId": 1
30 });
31
32 // 发送 POST 请求
33 let response = client
34 .post("/posts", &body, Some(custom_headers.clone()))
35 .await?;
36
37 println!("POST Response: {:?}", response.status());
38 let response_body = response.text().await?;
39 println!("Response Body: {}", response_body);
40
41 // 发送 GET 请求
42 let response = client
43 .get("/posts/1", None, Some(custom_headers.clone()))
44 .await?;
45
46 println!("GET Response: {:?}", response.status());
47 let response_body = response.text().await?;
48 println!("Response Body: {}", response_body);
49
50 // 设置 Ollama 流式请求 base_url
51 client.set_base_url("http://localhost:11434")?;
52 let stream_headers = vec![("Content-Type", "application/json".to_string())];
53 client.set_default_headers(stream_headers)?;
54
55 // 构造 Ollama 请求体
56 let stream_body = json!({
57 "model": "llama3.2",
58 "stream": true,
59 "messages": [
60 {"role": "user", "content": "Hello, who are you?"}
61 ]
62 });
63
64 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66 println!("Streaming Response:");
67 while let Some(chunk) = stream.next().await {
68 let data = chunk?;
69 let s = std::str::from_utf8(&data).unwrap();
70
71 for line in s.lines().filter(|l| !l.trim().is_empty()) {
72 match serde_json::from_str::<serde_json::Value>(line) {
73 Ok(json) => {
74 if let Some(content) = json["message"]["content"].as_str() {
75 print!("{}", content);
76 std::io::stdout().flush().unwrap();
77 }
78 if json["done"] == true {
79 println!();
80 break;
81 }
82 }
83 Err(err) => {
84 eprintln!("Parse error: {}", err);
85 }
86 }
87 }
88
89 // 可选:稍作等待,避免拉取过快影响显示
90 sleep(Duration::from_millis(20)).await;
91 }
92
93 Ok(())
94}Sourcepub fn set_default_headers(
&mut self,
headers: Vec<(&'static str, String)>,
) -> Result<()>
pub fn set_default_headers( &mut self, headers: Vec<(&'static str, String)>, ) -> Result<()>
Set default headers to be applied on all requests.
Examples found in repository?
examples/llm_stream_req.rs (line 16)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 Ollama 流式请求 base_url
14 client.set_base_url("http://localhost:11434")?;
15 let stream_headers = vec![("Content-Type", "application/json".to_string())];
16 client.set_default_headers(stream_headers)?;
17
18 // 构造 Ollama 请求体
19 let stream_body = json!({
20 "model": "llama3.2",
21 "stream": true,
22 "messages": [
23 {"role": "user", "content": "Hello, who are you?"}
24 ]
25 });
26
27 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29 println!("Streaming Response:");
30 while let Some(chunk) = stream.next().await {
31 let data = chunk?;
32 let s = std::str::from_utf8(&data).unwrap();
33
34 for line in s.lines().filter(|l| !l.trim().is_empty()) {
35 match serde_json::from_str::<serde_json::Value>(line) {
36 Ok(json) => {
37 if let Some(content) = json["message"]["content"].as_str() {
38 print!("{}", content);
39 std::io::stdout().flush().unwrap();
40 }
41 if json["done"] == true {
42 println!();
43 break;
44 }
45 }
46 Err(err) => {
47 eprintln!("Parse error: {}", err);
48 }
49 }
50 }
51
52 // 可选:稍作等待,避免拉取过快影响显示
53 sleep(Duration::from_millis(20)).await;
54 }
55
56 Ok(())
57}More examples
examples/request_example.rs (line 19)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 base_url
14 client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16 // 设置默认的请求头
17 let mut default_headers = Vec::new();
18 default_headers.push(("Content-Type", "application/json".to_string()));
19 client.set_default_headers(default_headers)?;
20
21 // 定义自定义请求头
22 let mut custom_headers = Vec::new();
23 custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25 // 创建一个 POST 请求体
26 let body = json!({
27 "title": "foo",
28 "body": "bar",
29 "userId": 1
30 });
31
32 // 发送 POST 请求
33 let response = client
34 .post("/posts", &body, Some(custom_headers.clone()))
35 .await?;
36
37 println!("POST Response: {:?}", response.status());
38 let response_body = response.text().await?;
39 println!("Response Body: {}", response_body);
40
41 // 发送 GET 请求
42 let response = client
43 .get("/posts/1", None, Some(custom_headers.clone()))
44 .await?;
45
46 println!("GET Response: {:?}", response.status());
47 let response_body = response.text().await?;
48 println!("Response Body: {}", response_body);
49
50 // 设置 Ollama 流式请求 base_url
51 client.set_base_url("http://localhost:11434")?;
52 let stream_headers = vec![("Content-Type", "application/json".to_string())];
53 client.set_default_headers(stream_headers)?;
54
55 // 构造 Ollama 请求体
56 let stream_body = json!({
57 "model": "llama3.2",
58 "stream": true,
59 "messages": [
60 {"role": "user", "content": "Hello, who are you?"}
61 ]
62 });
63
64 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66 println!("Streaming Response:");
67 while let Some(chunk) = stream.next().await {
68 let data = chunk?;
69 let s = std::str::from_utf8(&data).unwrap();
70
71 for line in s.lines().filter(|l| !l.trim().is_empty()) {
72 match serde_json::from_str::<serde_json::Value>(line) {
73 Ok(json) => {
74 if let Some(content) = json["message"]["content"].as_str() {
75 print!("{}", content);
76 std::io::stdout().flush().unwrap();
77 }
78 if json["done"] == true {
79 println!();
80 break;
81 }
82 }
83 Err(err) => {
84 eprintln!("Parse error: {}", err);
85 }
86 }
87 }
88
89 // 可选:稍作等待,避免拉取过快影响显示
90 sleep(Duration::from_millis(20)).await;
91 }
92
93 Ok(())
94}Sourcepub async fn get(
&self,
endpoint: &str,
query: Option<Vec<(String, String)>>,
headers: Option<Vec<(&'static str, String)>>,
) -> Result<Response>
pub async fn get( &self, endpoint: &str, query: Option<Vec<(String, String)>>, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>
Send a GET request.
Examples found in repository?
examples/request_example.rs (line 43)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 base_url
14 client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16 // 设置默认的请求头
17 let mut default_headers = Vec::new();
18 default_headers.push(("Content-Type", "application/json".to_string()));
19 client.set_default_headers(default_headers)?;
20
21 // 定义自定义请求头
22 let mut custom_headers = Vec::new();
23 custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25 // 创建一个 POST 请求体
26 let body = json!({
27 "title": "foo",
28 "body": "bar",
29 "userId": 1
30 });
31
32 // 发送 POST 请求
33 let response = client
34 .post("/posts", &body, Some(custom_headers.clone()))
35 .await?;
36
37 println!("POST Response: {:?}", response.status());
38 let response_body = response.text().await?;
39 println!("Response Body: {}", response_body);
40
41 // 发送 GET 请求
42 let response = client
43 .get("/posts/1", None, Some(custom_headers.clone()))
44 .await?;
45
46 println!("GET Response: {:?}", response.status());
47 let response_body = response.text().await?;
48 println!("Response Body: {}", response_body);
49
50 // 设置 Ollama 流式请求 base_url
51 client.set_base_url("http://localhost:11434")?;
52 let stream_headers = vec![("Content-Type", "application/json".to_string())];
53 client.set_default_headers(stream_headers)?;
54
55 // 构造 Ollama 请求体
56 let stream_body = json!({
57 "model": "llama3.2",
58 "stream": true,
59 "messages": [
60 {"role": "user", "content": "Hello, who are you?"}
61 ]
62 });
63
64 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66 println!("Streaming Response:");
67 while let Some(chunk) = stream.next().await {
68 let data = chunk?;
69 let s = std::str::from_utf8(&data).unwrap();
70
71 for line in s.lines().filter(|l| !l.trim().is_empty()) {
72 match serde_json::from_str::<serde_json::Value>(line) {
73 Ok(json) => {
74 if let Some(content) = json["message"]["content"].as_str() {
75 print!("{}", content);
76 std::io::stdout().flush().unwrap();
77 }
78 if json["done"] == true {
79 println!();
80 break;
81 }
82 }
83 Err(err) => {
84 eprintln!("Parse error: {}", err);
85 }
86 }
87 }
88
89 // 可选:稍作等待,避免拉取过快影响显示
90 sleep(Duration::from_millis(20)).await;
91 }
92
93 Ok(())
94}Sourcepub async fn post(
&self,
endpoint: &str,
body: &Value,
headers: Option<Vec<(&'static str, String)>>,
) -> Result<Response>
pub async fn post( &self, endpoint: &str, body: &Value, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>
Send a POST request with JSON body.
Examples found in repository?
examples/request_example.rs (line 34)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 base_url
14 client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16 // 设置默认的请求头
17 let mut default_headers = Vec::new();
18 default_headers.push(("Content-Type", "application/json".to_string()));
19 client.set_default_headers(default_headers)?;
20
21 // 定义自定义请求头
22 let mut custom_headers = Vec::new();
23 custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25 // 创建一个 POST 请求体
26 let body = json!({
27 "title": "foo",
28 "body": "bar",
29 "userId": 1
30 });
31
32 // 发送 POST 请求
33 let response = client
34 .post("/posts", &body, Some(custom_headers.clone()))
35 .await?;
36
37 println!("POST Response: {:?}", response.status());
38 let response_body = response.text().await?;
39 println!("Response Body: {}", response_body);
40
41 // 发送 GET 请求
42 let response = client
43 .get("/posts/1", None, Some(custom_headers.clone()))
44 .await?;
45
46 println!("GET Response: {:?}", response.status());
47 let response_body = response.text().await?;
48 println!("Response Body: {}", response_body);
49
50 // 设置 Ollama 流式请求 base_url
51 client.set_base_url("http://localhost:11434")?;
52 let stream_headers = vec![("Content-Type", "application/json".to_string())];
53 client.set_default_headers(stream_headers)?;
54
55 // 构造 Ollama 请求体
56 let stream_body = json!({
57 "model": "llama3.2",
58 "stream": true,
59 "messages": [
60 {"role": "user", "content": "Hello, who are you?"}
61 ]
62 });
63
64 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66 println!("Streaming Response:");
67 while let Some(chunk) = stream.next().await {
68 let data = chunk?;
69 let s = std::str::from_utf8(&data).unwrap();
70
71 for line in s.lines().filter(|l| !l.trim().is_empty()) {
72 match serde_json::from_str::<serde_json::Value>(line) {
73 Ok(json) => {
74 if let Some(content) = json["message"]["content"].as_str() {
75 print!("{}", content);
76 std::io::stdout().flush().unwrap();
77 }
78 if json["done"] == true {
79 println!();
80 break;
81 }
82 }
83 Err(err) => {
84 eprintln!("Parse error: {}", err);
85 }
86 }
87 }
88
89 // 可选:稍作等待,避免拉取过快影响显示
90 sleep(Duration::from_millis(20)).await;
91 }
92
93 Ok(())
94}Sourcepub async fn put(
&self,
endpoint: &str,
body: &Value,
headers: Option<Vec<(&'static str, String)>>,
) -> Result<Response>
pub async fn put( &self, endpoint: &str, body: &Value, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>
Send a PUT request with JSON body.
Sourcepub async fn delete(
&self,
endpoint: &str,
headers: Option<Vec<(&'static str, String)>>,
) -> Result<Response>
pub async fn delete( &self, endpoint: &str, headers: Option<Vec<(&'static str, String)>>, ) -> Result<Response>
Send a DELETE request.
Sourcepub async fn post_stream(
&self,
endpoint: &str,
body: &Value,
headers: Option<Vec<(&'static str, String)>>,
) -> Result<ByteStream>
pub async fn post_stream( &self, endpoint: &str, body: &Value, headers: Option<Vec<(&'static str, String)>>, ) -> Result<ByteStream>
Send a streaming POST request and return the response stream.
Examples found in repository?
examples/llm_stream_req.rs (line 27)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 Ollama 流式请求 base_url
14 client.set_base_url("http://localhost:11434")?;
15 let stream_headers = vec![("Content-Type", "application/json".to_string())];
16 client.set_default_headers(stream_headers)?;
17
18 // 构造 Ollama 请求体
19 let stream_body = json!({
20 "model": "llama3.2",
21 "stream": true,
22 "messages": [
23 {"role": "user", "content": "Hello, who are you?"}
24 ]
25 });
26
27 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
28
29 println!("Streaming Response:");
30 while let Some(chunk) = stream.next().await {
31 let data = chunk?;
32 let s = std::str::from_utf8(&data).unwrap();
33
34 for line in s.lines().filter(|l| !l.trim().is_empty()) {
35 match serde_json::from_str::<serde_json::Value>(line) {
36 Ok(json) => {
37 if let Some(content) = json["message"]["content"].as_str() {
38 print!("{}", content);
39 std::io::stdout().flush().unwrap();
40 }
41 if json["done"] == true {
42 println!();
43 break;
44 }
45 }
46 Err(err) => {
47 eprintln!("Parse error: {}", err);
48 }
49 }
50 }
51
52 // 可选:稍作等待,避免拉取过快影响显示
53 sleep(Duration::from_millis(20)).await;
54 }
55
56 Ok(())
57}More examples
examples/request_example.rs (line 64)
9async fn main() -> Result<()> {
10 // 创建一个新的 Request 实例
11 let mut client = Request::new();
12
13 // 设置 base_url
14 client.set_base_url("https://jsonplaceholder.typicode.com")?;
15
16 // 设置默认的请求头
17 let mut default_headers = Vec::new();
18 default_headers.push(("Content-Type", "application/json".to_string()));
19 client.set_default_headers(default_headers)?;
20
21 // 定义自定义请求头
22 let mut custom_headers = Vec::new();
23 custom_headers.push(("Authorization", "Bearer some_token".to_string()));
24
25 // 创建一个 POST 请求体
26 let body = json!({
27 "title": "foo",
28 "body": "bar",
29 "userId": 1
30 });
31
32 // 发送 POST 请求
33 let response = client
34 .post("/posts", &body, Some(custom_headers.clone()))
35 .await?;
36
37 println!("POST Response: {:?}", response.status());
38 let response_body = response.text().await?;
39 println!("Response Body: {}", response_body);
40
41 // 发送 GET 请求
42 let response = client
43 .get("/posts/1", None, Some(custom_headers.clone()))
44 .await?;
45
46 println!("GET Response: {:?}", response.status());
47 let response_body = response.text().await?;
48 println!("Response Body: {}", response_body);
49
50 // 设置 Ollama 流式请求 base_url
51 client.set_base_url("http://localhost:11434")?;
52 let stream_headers = vec![("Content-Type", "application/json".to_string())];
53 client.set_default_headers(stream_headers)?;
54
55 // 构造 Ollama 请求体
56 let stream_body = json!({
57 "model": "llama3.2",
58 "stream": true,
59 "messages": [
60 {"role": "user", "content": "Hello, who are you?"}
61 ]
62 });
63
64 let mut stream = client.post_stream("api/chat", &stream_body, None).await?;
65
66 println!("Streaming Response:");
67 while let Some(chunk) = stream.next().await {
68 let data = chunk?;
69 let s = std::str::from_utf8(&data).unwrap();
70
71 for line in s.lines().filter(|l| !l.trim().is_empty()) {
72 match serde_json::from_str::<serde_json::Value>(line) {
73 Ok(json) => {
74 if let Some(content) = json["message"]["content"].as_str() {
75 print!("{}", content);
76 std::io::stdout().flush().unwrap();
77 }
78 if json["done"] == true {
79 println!();
80 break;
81 }
82 }
83 Err(err) => {
84 eprintln!("Parse error: {}", err);
85 }
86 }
87 }
88
89 // 可选:稍作等待,避免拉取过快影响显示
90 sleep(Duration::from_millis(20)).await;
91 }
92
93 Ok(())
94}Trait Implementations§
Auto Trait Implementations§
impl Freeze for Request
impl !RefUnwindSafe for Request
impl Send for Request
impl Sync for Request
impl Unpin for Request
impl !UnwindSafe for Request
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more