soda_pool/lib.rs
1#![warn(clippy::unwrap_used)]
2#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
3#![cfg_attr(docsrs, feature(doc_auto_cfg))]
4
5//! This crate provides an automatically managed connection pool for the
6//! [tonic]'s [`Channel`](tonic::transport::Channel)s. It provides simpler to
7//! use interface by automatically updating the pool with new channels based on
8//! DNS resolution, detecting failed connections, and exposing methods to report
9//! broken endpoints.
10//!
11//! For more detailed explanation of the internal workings of the pool, please
12//! see the [implementation details](#implementation-details).
13//!
14//! # Usage
15//!
16//! Although this crate is designed to be used by gRPC clients generated by
17//! [`soda-pool-build`] crate, it can also be used directly.
18//!
19//! ```no_run
20//! # use std::error::Error;
21//! use soda_pool::{ChannelPool, EndpointTemplate, ManagedChannelPoolBuilder};
22//! use tonic::transport::Channel;
23//! use url::Url;
24//!
25//! #[tokio::main]
26//! async fn main() -> Result<(), Box<dyn Error>> {
27//! let url = Url::parse("http://grpc.example.com:50051")?;
28//! let endpoint_template = EndpointTemplate::new(url)?;
29//! let pool = ManagedChannelPoolBuilder::new(endpoint_template).build();
30//!
31//! // Get a channel from the pool
32//! let Some((ip_address, channel)) = pool.get_channel().await else {
33//! Err(NoChannelAppError)?
34//! };
35//!
36//! // Use the channel to make a gRPC request
37//! let result = make_request(channel).await;
38//! match result {
39//! Ok(_) => println!("Request succeeded"),
40//! Err(ref e) => {
41//! // Warning: Error can also be returned from the gRPC server.
42//! // Examine the error and decide whether to report the endpoint as broken.
43//! println!("Request failed: {:?}. Reporting as broken", e);
44//! pool.report_broken(ip_address).await;
45//! }
46//! }
47//! Ok(())
48//! }
49//!
50//! # #[derive(Debug)]
51//! // Placeholder for application-specific error handling.
52//! struct NoChannelAppError;
53//! # impl Error for NoChannelAppError {}
54//! # impl std::fmt::Display for NoChannelAppError {
55//! # fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
56//! # write!(f, "No channel available")
57//! # }
58//! # }
59//!
60//! // Placeholder for the actual gRPC request.
61//! async fn make_request(channel: Channel) -> Result<(), Box<dyn Error>> {
62//! return Ok(())
63//! }
64//! ```
65//!
66//! # Feature flags
67//!
68//! - `tls`: Enables TLS options for the [`EndpointTemplate`].
69//! - `mock`: Enables the mock implementation of the [`ChannelPool`] trait for
70//! testing purposes.
71//!
72//! # Implementation details
73//!
74//! On creation of the pool, two background tasks are started:
75//! 1. A task that periodically resolves the DNS name and updates the pool with
76//! new channels.
77//!
78//! Frequency of this check can be configured using
79//! [`ManagedChannelPoolBuilder::dns_interval`] method. The default is 5
80//! seconds.
81//!
82//! 2. A task that periodically checks connections previously marked as broken
83//! and tries to reconnect them.
84//!
85//! Frequency of this check is calculated on per IP basis following the
86//! exponential backoff algorithm. The backoff time will range from 1 to
87//! approximately 60 seconds.
88//!
89//! Those tasks are shared between all clones of the pool and continue to run
90//! until the last copy is dropped.
91
92mod pool;
93pub use pool::*;
94
95mod dns;
96
97mod broken_endpoints;
98
99mod endpoint_template;
100pub use endpoint_template::*;
101
102mod retry;
103pub use retry::*;
104
105#[doc(hidden)]
106pub mod deps;
107
108mod macros;
109mod ready_channels;
110
111#[cfg(feature = "mock")]
112pub mod mock;