pub struct Monitor { /* private fields */ }
Expand description
A monitor for coordinating and managing a collection of workers.
Implementations§
Source§impl Monitor
impl Monitor
Sourcepub fn register<Req, S, P, Ctx>(self, worker: Worker<Ready<S, P>>) -> Selfwhere
S: Service<Request<Req, Ctx>> + Send + 'static,
S::Future: Send,
S::Error: Send + Sync + 'static + Into<BoxDynError>,
P: Backend<Request<Req, Ctx>> + Send + 'static,
P::Stream: Unpin + Send + 'static,
P::Layer: Layer<S> + Send,
<P::Layer as Layer<S>>::Service: Service<Request<Req, Ctx>> + Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Future: Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Error: Send + Sync + Into<BoxDynError>,
Req: Send + 'static,
Ctx: Send + 'static,
pub fn register<Req, S, P, Ctx>(self, worker: Worker<Ready<S, P>>) -> Selfwhere
S: Service<Request<Req, Ctx>> + Send + 'static,
S::Future: Send,
S::Error: Send + Sync + 'static + Into<BoxDynError>,
P: Backend<Request<Req, Ctx>> + Send + 'static,
P::Stream: Unpin + Send + 'static,
P::Layer: Layer<S> + Send,
<P::Layer as Layer<S>>::Service: Service<Request<Req, Ctx>> + Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Future: Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Error: Send + Sync + Into<BoxDynError>,
Req: Send + 'static,
Ctx: Send + 'static,
Registers a single instance of a Worker
Sourcepub fn register_with_count<Req, S, P, Ctx>(
self,
count: usize,
worker: Worker<Ready<S, P>>,
) -> Selfwhere
S: Service<Request<Req, Ctx>> + Send + 'static + Clone,
S::Future: Send,
S::Error: Send + Sync + 'static + Into<BoxDynError>,
P: Backend<Request<Req, Ctx>> + Send + 'static + Clone,
P::Stream: Unpin + Send + 'static,
P::Layer: Layer<S> + Send,
<P::Layer as Layer<S>>::Service: Service<Request<Req, Ctx>> + Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Future: Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Error: Send + Sync + Into<BoxDynError>,
Req: Send + 'static,
Ctx: Send + 'static,
👎Deprecated since 0.6.0: Consider using the .register
as workers now offer concurrency by default
pub fn register_with_count<Req, S, P, Ctx>(
self,
count: usize,
worker: Worker<Ready<S, P>>,
) -> Selfwhere
S: Service<Request<Req, Ctx>> + Send + 'static + Clone,
S::Future: Send,
S::Error: Send + Sync + 'static + Into<BoxDynError>,
P: Backend<Request<Req, Ctx>> + Send + 'static + Clone,
P::Stream: Unpin + Send + 'static,
P::Layer: Layer<S> + Send,
<P::Layer as Layer<S>>::Service: Service<Request<Req, Ctx>> + Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Future: Send,
<<P::Layer as Layer<S>>::Service as Service<Request<Req, Ctx>>>::Error: Send + Sync + Into<BoxDynError>,
Req: Send + 'static,
Ctx: Send + 'static,
.register
as workers now offer concurrency by defaultSourcepub async fn run_with_signal<S>(self, signal: S) -> Result<()>
pub async fn run_with_signal<S>(self, signal: S) -> Result<()>
Runs the monitor and all its registered workers until they have all completed or a shutdown signal is received.
§Arguments
signal
- AFuture
that resolves when a shutdown signal is received.
§Errors
If the monitor fails to shutdown gracefully, an std::io::Error
will be returned.
§Remarks
If a timeout has been set using the Monitor::shutdown_timeout
method, the monitor
will wait for all workers to complete up to the timeout duration before exiting.
If the timeout is reached and workers have not completed, the monitor will exit forcefully.
Source§impl Monitor
impl Monitor
Sourcepub fn new() -> Self
pub fn new() -> Self
Creates a new monitor instance.
§Returns
A new monitor instance, with an empty collection of workers.
Sourcepub fn shutdown_timeout(self, duration: Duration) -> Self
pub fn shutdown_timeout(self, duration: Duration) -> Self
Sourcepub fn with_terminator(
self,
fut: impl Future<Output = ()> + Send + 'static,
) -> Self
pub fn with_terminator( self, fut: impl Future<Output = ()> + Send + 'static, ) -> Self
Sets a future that will start being polled when the monitor’s shutdown process starts.
After shutdown has been initiated, the terminator
future will be run, and if it completes
before all tasks are completed the shutdown process will complete, thus finishing the
shutdown even if there are outstanding tasks. This can be useful for using a timeout or
signal (or combination) to force a full shutdown even if one or more tasks are taking
longer than expected to finish.