soda_pool/
lib.rs

1#![warn(clippy::unwrap_used)]
2#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
3#![cfg_attr(docsrs, feature(doc_auto_cfg))]
4
5//! This crate provides an automatically managed connection pool for the
6//! [tonic]'s [`Channel`](tonic::transport::Channel)s. It provides simpler to
7//! use interface by automatically updating the pool with new channels based on
8//! DNS resolution, detecting failed connections, and exposing methods to report
9//! broken endpoints.
10//!
11//! For more detailed explanation of the internal workings of the pool, please
12//! see the [implementation details](#implementation-details).
13//!
14//! # Usage
15//!
16//! Although this crate is designed to be used by gRPC clients generated by
17//! [`soda-pool-build`] crate, it can also be used directly.
18//!
19//! ```no_run
20//! # use std::error::Error;
21//! use soda_pool::{ChannelPoolBuilder, EndpointTemplate};
22//! use tonic::transport::Channel;
23//! use url::Url;
24//!
25//! #[tokio::main]
26//! async fn main() -> Result<(), Box<dyn Error>> {
27//!     let url = Url::parse("http://grpc.example.com:50051")?;
28//!     let endpoint_template = EndpointTemplate::new(url)?;
29//!     let pool = ChannelPoolBuilder::new(endpoint_template).build();
30//!
31//!     // Get a channel from the pool
32//!     let Some((ip_address, channel)) = pool.get_channel().await else {
33//!         Err(NoChannelAppError)?
34//!     };
35//!
36//!     // Use the channel to make a gRPC request
37//!     let result = make_request(channel).await;
38//!     match result {
39//!         Ok(_) => println!("Request succeeded"),
40//!         Err(ref e) => {
41//!             // Warning: Error can also be returned from the gRPC server.
42//!             // Examine the error and decide whether to report the endpoint as broken.
43//!             println!("Request failed: {:?}. Reporting as broken", e);
44//!             pool.report_broken(ip_address).await;
45//!         }
46//!     }
47//!     Ok(())
48//! }
49//!
50//! # #[derive(Debug)]
51//! // Placeholder for application-specific error handling.
52//! struct NoChannelAppError;
53//! # impl Error for NoChannelAppError {}
54//! # impl std::fmt::Display for NoChannelAppError {
55//! #     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
56//! #         write!(f, "No channel available")
57//! #     }
58//! # }
59//!
60//! // Placeholder for the actual gRPC request.
61//! async fn make_request(channel: Channel) -> Result<(), Box<dyn Error>> {
62//!     return Ok(())
63//! }
64//! ```
65//!
66//! # Feature flags
67//!
68//! - `tls`: Enables TLS options for the [`EndpointTemplate`].
69//!
70//! # Implementation details
71//!
72//! On creation of the pool, two background tasks are started:
73//! 1. A task that periodically resolves the DNS name and updates the pool with
74//!    new channels.
75//!
76//!    Frequency of this check can be configured using
77//!    [`ChannelPoolBuilder::dns_check_interval`](crate::ChannelPoolBuilder::dns_interval)
78//!    method. The default is 5 seconds.
79//!
80//! 2. A task that periodically checks connections previously marked as broken
81//!    and tries to reconnect them.
82//!
83//!    Frequency of this check is calculated on per IP basis following the
84//!    exponential backoff algorithm. The backoff time will range from 1 to
85//!    approximately 60 seconds.
86//!
87//! Those tasks are shared between all clones of the pool and continue to run
88//! until the last copy is dropped.
89
90mod pool;
91pub use pool::*;
92
93mod dns;
94
95mod broken_endpoints;
96
97mod endpoint_template;
98pub use endpoint_template::*;
99
100mod retry;
101pub use retry::*;
102
103#[doc(hidden)]
104pub mod deps;
105
106mod macros;
107mod ready_channels;