qsu/rt.rs
1//! Server application wrapper runtime.
2//!
3//! # Overview
4//! The _qsu_'s runtime lives in a layer between the actual server application
5//! and the "service subsystems" that servier applications can integrate
6//! against:
7//!
8//! <pre>
9//! +---------------------+
10//! | Server Application | (HTTP server, Matrix server, etc)
11//! +---------------------+
12//! | qsu rt |
13//! +---------------------+
14//! | Platform service | (Windows Service subsystem, systemd, etc)
15//! | subsystem |
16//! +---------------------+
17//! </pre>
18//!
19//! The primary goal of the _qsu_ runtime is to provide a consistent interface
20//! that will be mapped to whatever service subsystem it is actually running
21//! on. This including no special subsystem at all; such as when running the
22//! server application as a regular foreground process (which is common during
23//! development).
24//!
25//! # How server applications are implemented and used
26//! _qsu_ needs to know what kind of runtime the server application expects.
27//! Server applications pick the runtime type by implementing a trait, of which
28//! there are currently three recognized types:
29//! - [`ServiceHandler`] is used for "non-async" server applications.
30//! - [`TokioServiceHandler`] is used for server applications that run under
31//! the tokio executor.
32//! - [`RocketServiceHandler`] is for server applications that are built on top
33//! of the Rocket HTTP framework.
34//!
35//! Each of these implement three methods:
36//! - `init()` is for initializing the service.
37//! - `run()` is for running the actual server application.
38//! - `shutdown()` is for shutting down the server application.
39//!
40//! The specifics of these trait methods may look quite different.
41//!
42//! Once a service trait has been implemented, the application creates a
43//! [`RunCtx`] object and calls its [`run()`](RunCtx::run()) method, passing in
44//! an service implementation object.
45//!
46//! Note: Only one service wrapper must be initialized per process.
47//!
48//! ## Service Handler semantics
49//! When a handler is run through the [`RunCtx`] it will first call the
50//! handler's `init()` method. If it returns `Ok()`, its `run()` method will
51//! be run.
52//!
53//! The handler's `shutdown()` will be called regardless of whether `init()` or
54//! `run()` was successful (the only precondition for `shutdown()` to be called
55//! is that `init()` was called).
56//!
57//! # Argument parser
58//! _qsu_ offers an [argument parser](crate::argp::ArgParser), which can
59//! abstract away much of the runtime management and service registration.
60
61mod nosvc;
62mod rttype;
63mod signals;
64
65#[cfg(all(target_os = "linux", feature = "systemd"))]
66#[cfg_attr(docsrs, doc(cfg(feature = "systemd")))]
67mod systemd;
68
69#[cfg(windows)]
70pub mod winsvc;
71
72use std::sync::{
73 atomic::{AtomicU32, Ordering},
74 Arc, OnceLock
75};
76
77#[cfg(any(feature = "tokio", feature = "rocket"))]
78use async_trait::async_trait;
79
80#[cfg(feature = "tokio")]
81use tokio::runtime;
82
83use tokio::sync::broadcast;
84
85#[cfg(all(target_os = "linux", feature = "systemd"))]
86use sd_notify::NotifyState;
87
88
89use crate::{err::CbErr, lumberjack::LumberJack};
90
91
92#[derive(Copy, Clone, Debug, PartialEq, Eq)]
93pub enum RunAs {
94 Foreground,
95 SvcSubsys
96}
97
98/// Keep track of whether the service application is running as foreground
99/// process or running under a service subsystem.
100static RUNAS: OnceLock<RunAs> = OnceLock::new();
101
102pub fn runas() -> Option<RunAs> {
103 RUNAS.get().copied()
104}
105
106
107/// The run time environment.
108///
109/// Can be used by the application callbacks to determine whether it is running
110/// as a service.
111#[derive(Debug, Clone)]
112pub enum RunEnv {
113 /// Running as a foreground process.
114 Foreground,
115
116 /// Running as a service.
117 ///
118 /// If the service subsystem has named services, this will contain the
119 /// service name.
120 Service(Option<String>)
121}
122
123/// Used to pass an optional message to the service subsystem whenever a
124/// startup or shutdown checkpoint as been reached.
125pub enum StateMsg {
126 Ref(&'static str),
127 Owned(String)
128}
129
130impl From<&'static str> for StateMsg {
131 fn from(msg: &'static str) -> Self {
132 Self::Ref(msg)
133 }
134}
135
136impl From<String> for StateMsg {
137 fn from(msg: String) -> Self {
138 Self::Owned(msg)
139 }
140}
141
142impl AsRef<str> for StateMsg {
143 fn as_ref(&self) -> &str {
144 match self {
145 Self::Ref(s) => s,
146 Self::Owned(s) => s
147 }
148 }
149}
150
151
152/// Report the current startup/shutdown state to the platform service
153/// subsystem.
154pub(crate) trait StateReporter {
155 fn starting(&self, checkpoint: u32, msg: Option<StateMsg>);
156
157 fn started(&self);
158
159 fn stopping(&self, checkpoint: u32, msg: Option<StateMsg>);
160
161 fn stopped(&self);
162}
163
164
165/// Context passed to `init()` service application callback.
166///
167/// This context can be used to query whether the service application is
168/// running as foreground process or running within a service subsystem.
169///
170/// It can also be used to report progress status back to the service
171/// subsystems, for platforms that support it.
172pub struct InitCtx {
173 re: RunEnv,
174 sr: Arc<dyn StateReporter + Send + Sync>,
175 cnt: Arc<AtomicU32>
176}
177
178impl InitCtx {
179 /// Return context used to identify whether service application is running as
180 /// a foreground process or a system service.
181 #[must_use]
182 pub fn runenv(&self) -> RunEnv {
183 self.re.clone()
184 }
185
186 /// Report startup state to the system service manager.
187 ///
188 /// For foreground processes and services that do not support startup state
189 /// notifications this method has no effect.
190 pub fn report(&self, status: Option<StateMsg>) {
191 let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst);
192 if let Some(ref msg) = status {
193 tracing::trace!(
194 "Reached init checkpoint {checkpoint}; {}",
195 msg.as_ref()
196 );
197 } else {
198 tracing::trace!("Reached init checkpoint {checkpoint}");
199 }
200 self.sr.starting(checkpoint, status);
201 }
202}
203
204
205/// Context passed to `term()` service application callback.
206///
207/// This context can be used to query whether the service application is
208/// running as foreground process or running within a service subsystem.
209///
210/// It can also be used to report progress status back to the service
211/// subsystems, for platforms that support it.
212pub struct TermCtx {
213 re: RunEnv,
214 sr: Arc<dyn StateReporter + Send + Sync>,
215 cnt: Arc<AtomicU32>
216}
217
218impl TermCtx {
219 /// Return context used to identify whether service application is running as
220 /// a foreground process or a system service.
221 #[must_use]
222 pub fn runenv(&self) -> RunEnv {
223 self.re.clone()
224 }
225
226 /// Report shutdown state to the system service manager.
227 ///
228 /// For foreground processes and services that do not support shutdown state
229 /// notifications this method has no effect.
230 pub fn report(&self, status: Option<StateMsg>) {
231 let checkpoint = self.cnt.fetch_add(1, Ordering::SeqCst);
232 if let Some(ref msg) = status {
233 tracing::trace!(
234 "Reached term checkpoint {checkpoint}; {}",
235 msg.as_ref()
236 );
237 } else {
238 tracing::trace!("Reached term checkpoint {checkpoint}");
239 }
240 self.sr.stopping(checkpoint, status);
241 }
242}
243
244
245/// "Synchronous" (non-`async`) server application.
246///
247/// Implement this for an object that wraps a server application that does not
248/// use an async runtime.
249pub trait ServiceHandler {
250 type AppErr;
251
252 /// Implement to handle service application initialization.
253 ///
254 /// # Errors
255 /// Application-defined error returned from callback will be wrapped in
256 /// [`CbErr::App`] and returned to application.
257 fn init(&mut self, ictx: InitCtx) -> Result<(), Self::AppErr>;
258
259 /// Implement to run service application.
260 ///
261 /// # Errors
262 /// Application-defined error returned from callback will be wrapped in
263 /// [`CbErr::App`] and returned to application.
264 fn run(&mut self, re: &RunEnv) -> Result<(), Self::AppErr>;
265
266 /// Implement to handle service application termination.
267 ///
268 /// # Errors
269 /// Application-defined error returned from callback will be wrapped in
270 /// [`CbErr::App`] and returned to application.
271 fn shutdown(&mut self, tctx: TermCtx) -> Result<(), Self::AppErr>;
272}
273
274
275/// `async` server application built on the tokio runtime.
276///
277/// Implement this for an object that wraps a server application that uses
278/// tokio as an async runtime.
279#[cfg(feature = "tokio")]
280#[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
281#[async_trait]
282pub trait TokioServiceHandler {
283 type AppErr;
284
285 async fn init(&mut self, ictx: InitCtx) -> Result<(), Self::AppErr>;
286
287 async fn run(&mut self, re: &RunEnv) -> Result<(), Self::AppErr>;
288
289 async fn shutdown(&mut self, tctx: TermCtx) -> Result<(), Self::AppErr>;
290}
291
292
293/// Rocket server application handler.
294///
295/// While Rocket is built on top of tokio, it \[Rocket\] wants to initialize
296/// tokio itself.
297///
298/// There are two major ways to write Rocket services using qsu; either the
299/// application can let qsu be aware of the server applications' `Rocket`
300/// instances. It does this by creating the `Rocket` instances in
301/// `RocketServiceHandler::init()` and returns them. _qsu_ will ignite these
302/// rockets and pass them to `RocketServiceHandler::run()`. The application is
303/// responsible for launching the rockets at this point.
304///
305/// The other way to do it is to completely manage the `Rocket` instances in
306/// application code (by not returning rocket instances from `init()`).
307///
308/// Allowing _qsu_ to manage the `Rocket` instances will cause _qsu_ to request
309/// graceful shutdown of all `Rocket` instances once a `SvcEvt::Shutdown` is
310/// sent by the runtime.
311///
312/// It is recommended that `ctrlc` shutdown and termination signals are
313/// disabled in each `Rocket` instance's configuration, and allow the _qsu_
314/// runtime to be responsible for initiating the `Rocket` shutdown.
315#[cfg(feature = "rocket")]
316#[cfg_attr(docsrs, doc(cfg(feature = "rocket")))]
317#[async_trait]
318pub trait RocketServiceHandler {
319 type AppErr;
320
321 /// Rocket service initialization.
322 ///
323 /// The returned `Rocket`s will be ignited and their shutdown handlers will
324 /// be triggered on shutdown.
325 async fn init(
326 &mut self,
327 ictx: InitCtx
328 ) -> Result<Vec<rocket::Rocket<rocket::Build>>, Self::AppErr>;
329
330 /// Server application main entry point.
331 ///
332 /// If the `init()` trait method returned `Rocket<Build>` instances to the
333 /// qsu runtime it will ignote these and pass them to `run()`. It is the
334 /// responsibility of this method to launch the rockets and await them.
335 async fn run(
336 &mut self,
337 rockets: Vec<rocket::Rocket<rocket::Ignite>>,
338 re: &RunEnv
339 ) -> Result<(), Self::AppErr>;
340
341 async fn shutdown(&mut self, tctx: TermCtx) -> Result<(), Self::AppErr>;
342}
343
344
345/// The means through which the service termination happened.
346#[derive(Copy, Clone, Debug)]
347pub enum Demise {
348 /// On unixy platforms, this indicates that the termination was initiated
349 /// via a `SIGINT` signal. When running as a foreground process on Windows
350 /// this indicates that Ctrl+C was issued.
351 Interrupted,
352
353 /// On unixy platforms, this indicates that the termination was initiated
354 /// via the `SIGTERM` signal.
355 ///
356 /// On Windows, running as a service, this indicates that the service
357 /// subsystem requested service to be shut down. Running as a foreground
358 /// process, this means that Ctrl+Break was issued or that the console
359 /// window was closed.
360 Terminated,
361
362 /// Reached the end of the service application without any external requests
363 /// to terminate.
364 ReachedEnd
365}
366
367#[derive(Copy, Clone, Debug)]
368pub enum UserSig {
369 /// SIGUSR1
370 Sig1,
371
372 /// SIGUSR2
373 Sig2
374}
375
376
377/// Event notifications that originate from the service subsystem that is
378/// controlling the server application.
379#[derive(Copy, Clone, Debug)]
380pub enum SvcEvt {
381 /// User events.
382 ///
383 /// These will be generated on unixy platform if the process receives
384 /// SIGUSR1 or SIGUSR2.
385 User(UserSig),
386
387 /// Service subsystem has requested that the server application should pause
388 /// its operations.
389 ///
390 /// Only the Windows service subsystem will emit these events.
391 Pause,
392
393 /// Service subsystem has requested that the server application should
394 /// resume its operations.
395 ///
396 /// Only the Windows service subsystem will emit these events.
397 Resume,
398
399 /// Service subsystem has requested that the services configuration should
400 /// be reread.
401 ///
402 /// On Unixy platforms this is triggered by SIGHUP, and is unsupported on
403 /// Windows.
404 ReloadConf,
405
406 /// The service application is terminating. The `Demise` value indicates
407 /// the reason for the shutdown.
408 Shutdown(Demise)
409}
410
411
412/// The server application runtime type.
413// large_enum_variant isn't relevant here because only one instance of this is
414// ever created for a process.
415#[allow(clippy::large_enum_variant, clippy::module_name_repetitions)]
416pub enum SrvAppRt<ApEr> {
417 /// A plain non-async (sometimes referred to as "blocking") server
418 /// application.
419 Sync {
420 svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>,
421 rt_handler: Box<dyn ServiceHandler<AppErr = ApEr> + Send>
422 },
423
424 /// Initializa a tokio runtime, and call each application handler from an
425 /// async context.
426 #[cfg(feature = "tokio")]
427 #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
428 Tokio {
429 rtbldr: Option<runtime::Builder>,
430 svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>,
431 rt_handler: Box<dyn TokioServiceHandler<AppErr = ApEr> + Send>
432 },
433
434 /// Allow Rocket to initialize the tokio runtime, and call each application
435 /// handler from an async context.
436 #[cfg(feature = "rocket")]
437 #[cfg_attr(docsrs, doc(cfg(feature = "rocket")))]
438 Rocket {
439 svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>,
440 rt_handler: Box<dyn RocketServiceHandler<AppErr = ApEr> + Send>
441 }
442}
443
444
445/// Service runner context.
446pub struct RunCtx {
447 service: bool,
448 svcname: String,
449 log_init: bool,
450 test_mode: bool
451}
452
453impl RunCtx {
454 /// Run as a systemd service.
455 #[cfg(all(target_os = "linux", feature = "systemd"))]
456 fn systemd<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>>
457 where
458 ApEr: Send + std::fmt::Debug
459 {
460 LumberJack::default().set_init(self.log_init).init()?;
461
462 tracing::debug!("Running service '{}'", self.svcname);
463
464 let sr = Arc::new(systemd::ServiceReporter {});
465
466 let re = RunEnv::Service(Some(self.svcname.clone()));
467
468 match st {
469 SrvAppRt::Sync {
470 svcevt_handler,
471 rt_handler
472 } => rttype::sync_main(rttype::SyncMainParams {
473 re,
474 svcevt_handler,
475 rt_handler,
476 sr,
477 svcevt_ch: None,
478 test_mode: self.test_mode
479 }),
480 SrvAppRt::Tokio {
481 rtbldr,
482 svcevt_handler,
483 rt_handler
484 } => rttype::tokio_main(
485 rtbldr,
486 rttype::TokioMainParams {
487 re,
488 svcevt_handler,
489 rt_handler,
490 sr,
491 svcevt_ch: None
492 }
493 ),
494 #[cfg(feature = "rocket")]
495 SrvAppRt::Rocket {
496 svcevt_handler,
497 rt_handler
498 } => rttype::rocket_main(rttype::RocketMainParams {
499 re,
500 svcevt_handler,
501 rt_handler,
502 sr,
503 svcevt_ch: None
504 })
505 }
506 }
507
508 /// Run as a Windows service.
509 #[cfg(windows)]
510 fn winsvc<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>>
511 where
512 ApEr: Send + 'static + std::fmt::Debug
513 {
514 winsvc::run(&self.svcname, st)?;
515
516 Ok(())
517 }
518
519 /// Run as a foreground server
520 fn foreground<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>>
521 where
522 ApEr: Send + std::fmt::Debug
523 {
524 LumberJack::default().set_init(self.log_init).init()?;
525
526 tracing::debug!("Running service '{}'", self.svcname);
527
528 let sr = Arc::new(nosvc::ServiceReporter {});
529
530 match st {
531 SrvAppRt::Sync {
532 svcevt_handler,
533 rt_handler
534 } => rttype::sync_main(rttype::SyncMainParams {
535 re: RunEnv::Foreground,
536 svcevt_handler,
537 rt_handler,
538 sr,
539 svcevt_ch: None,
540 test_mode: self.test_mode
541 }),
542
543 #[cfg(feature = "tokio")]
544 SrvAppRt::Tokio {
545 rtbldr,
546 svcevt_handler,
547 rt_handler
548 } => rttype::tokio_main(
549 rtbldr,
550 rttype::TokioMainParams {
551 re: RunEnv::Foreground,
552 svcevt_handler,
553 rt_handler,
554 sr,
555 svcevt_ch: None
556 }
557 ),
558
559 #[cfg(feature = "rocket")]
560 SrvAppRt::Rocket {
561 svcevt_handler,
562 rt_handler
563 } => rttype::rocket_main(rttype::RocketMainParams {
564 re: RunEnv::Foreground,
565 svcevt_handler,
566 rt_handler,
567 sr,
568 svcevt_ch: None
569 })
570 }
571 }
572}
573
574impl RunCtx {
575 /// Create a new service running context.
576 #[must_use]
577 pub fn new(name: &str) -> Self {
578 Self {
579 service: false,
580 svcname: name.into(),
581 log_init: true,
582 test_mode: false
583 }
584 }
585
586 /// Enable test mode.
587 ///
588 /// This method is intended for tests only.
589 ///
590 /// qsu performs a few global initialization that will fail if run repeatedly
591 /// within the same process. This causes some problem when running tests,
592 /// because rust may run tests in threads within the same process.
593 #[doc(hidden)]
594 #[must_use]
595 pub const fn test_mode(mut self) -> Self {
596 self.log_init = false;
597 self.test_mode = true;
598 self
599 }
600
601 /// Disable logging/tracing initialization.
602 ///
603 /// This is useful in tests because tests may run in different threads within
604 /// the same process, causing the log/tracing initialization to panic.
605 #[doc(hidden)]
606 #[must_use]
607 pub const fn log_init(mut self, flag: bool) -> Self {
608 self.log_init = flag;
609 self
610 }
611
612 /// Reference version of [`RunCtx::log_init()`].
613 #[doc(hidden)]
614 pub fn log_init_ref(&mut self, flag: bool) -> &mut Self {
615 self.log_init = flag;
616 self
617 }
618
619 /// Mark this run context to run under the operating system's subservice, if
620 /// one is available on this platform.
621 #[must_use]
622 pub const fn service(mut self) -> Self {
623 self.service = true;
624 self
625 }
626
627 /// Mark this run context to run under the operating system's subservice, if
628 /// one is available on this platform.
629 pub fn service_ref(&mut self) -> &mut Self {
630 self.service = true;
631 self
632 }
633
634 #[must_use]
635 pub const fn is_service(&self) -> bool {
636 self.service
637 }
638
639 /// Launch the application.
640 ///
641 /// If this `RunCtx` has been marked as a _service_ then it will perform the
642 /// appropriate service subsystem integration before running the actual
643 /// server application code.
644 ///
645 /// This function must only be called from the main thread of the process,
646 /// and must be called before any other threads are started.
647 ///
648 /// # Errors
649 /// [`CbErr::App`] is returned, containing application-specific error, if n
650 /// application callback returned an error. [`CbErr::Lib`] indicates that an
651 /// error occurred in the qsu runtime.
652 pub fn run<ApEr>(self, st: SrvAppRt<ApEr>) -> Result<(), CbErr<ApEr>>
653 where
654 ApEr: Send + 'static + std::fmt::Debug
655 {
656 if self.service {
657 let _ = RUNAS.set(RunAs::SvcSubsys);
658
659 #[cfg(all(target_os = "linux", feature = "systemd"))]
660 self.systemd(st)?;
661
662 #[cfg(windows)]
663 self.winsvc(st)?;
664
665 // ToDo: We should check for other platforms here (like macOS/launchd)
666 } else {
667 let _ = RUNAS.set(RunAs::Foreground);
668
669 // Do not run against any specific service subsystem. Despite its name
670 // this isn't necessarily running as a foreground process; some service
671 // subsystems do not make a distinction. Perhaps a better mental model
672 // is that certain service subsystems expects to run regular "foreground"
673 // processes.
674 self.foreground(st)?;
675 }
676
677 Ok(())
678 }
679
680 /// Convenience method around [`Self::run()`] using [`SrvAppRt::Sync`].
681 #[allow(clippy::missing_errors_doc)]
682 pub fn run_sync<ApEr>(
683 self,
684 svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>,
685 rt_handler: Box<dyn ServiceHandler<AppErr = ApEr> + Send>
686 ) -> Result<(), CbErr<ApEr>>
687 where
688 ApEr: Send + 'static + std::fmt::Debug
689 {
690 self.run(SrvAppRt::Sync {
691 svcevt_handler,
692 rt_handler
693 })
694 }
695
696 /// Convenience method around [`Self::run()`] using [`SrvAppRt::Tokio`].
697 #[cfg(feature = "tokio")]
698 #[cfg_attr(docsrs, doc(cfg(feature = "tokio")))]
699 #[allow(clippy::missing_errors_doc)]
700 pub fn run_tokio<ApEr>(
701 self,
702 rtbldr: Option<runtime::Builder>,
703 svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>,
704 rt_handler: Box<dyn TokioServiceHandler<AppErr = ApEr> + Send>
705 ) -> Result<(), CbErr<ApEr>>
706 where
707 ApEr: Send + 'static + std::fmt::Debug
708 {
709 self.run(SrvAppRt::Tokio {
710 rtbldr,
711 svcevt_handler,
712 rt_handler
713 })
714 }
715
716 /// Convenience method around [`Self::run()`] using [`SrvAppRt::Rocket`].
717 #[cfg(feature = "rocket")]
718 #[cfg_attr(docsrs, doc(cfg(feature = "rocket")))]
719 #[allow(clippy::missing_errors_doc)]
720 pub fn run_rocket<ApEr>(
721 self,
722 svcevt_handler: Box<dyn FnMut(SvcEvt) + Send>,
723 rt_handler: Box<dyn RocketServiceHandler<AppErr = ApEr> + Send>
724 ) -> Result<(), CbErr<ApEr>>
725 where
726 ApEr: Send + 'static + std::fmt::Debug
727 {
728 self.run(SrvAppRt::Rocket {
729 svcevt_handler,
730 rt_handler
731 })
732 }
733}
734
735/// Internal thread used to run service event handler.
736#[tracing::instrument(name = "svcevtthread", skip_all)]
737fn svcevt_thread(
738 mut rx: broadcast::Receiver<SvcEvt>,
739 mut evt_handler: Box<dyn FnMut(SvcEvt) + Send>
740) {
741 while let Ok(msg) = rx.blocking_recv() {
742 tracing::debug!("Received {:?}", msg);
743
744 #[cfg(all(target_os = "linux", feature = "systemd"))]
745 if matches!(msg, SvcEvt::ReloadConf) {
746 //
747 // Reload has been requested -- report RELOADING=1 and MONOTONIC_USEC to
748 // systemd before calling the application callback.
749 //
750 let ts =
751 nix::time::clock_gettime(nix::time::ClockId::CLOCK_MONOTONIC).unwrap();
752 let s = format!(
753 "RELOADING=1\nMONOTONIC_USEC={}{:06}",
754 ts.tv_sec(),
755 ts.tv_nsec() / 1000
756 );
757 tracing::trace!("Sending notification to systemd: {}", s);
758
759 let custom = NotifyState::Custom(&s);
760 if let Err(e) = sd_notify::notify(false, &[custom]) {
761 log::error!(
762 "Unable to send RELOADING=1 notification to systemd; {}",
763 e
764 );
765 }
766 }
767
768 //
769 // Call the application callback
770 //
771 evt_handler(msg);
772
773 #[cfg(all(target_os = "linux", feature = "systemd"))]
774 if matches!(msg, SvcEvt::ReloadConf) {
775 //
776 // This is a reload; report READY=1 to systemd after the application
777 // callback has been called.
778 //
779 tracing::trace!("Sending notification to systemd: READY=1");
780 if let Err(e) = sd_notify::notify(false, &[NotifyState::Ready]) {
781 log::error!("Unable to send READY=1 notification to systemd; {}", e);
782 }
783 }
784
785 // If the event message was either shutdown or terminate, break out of loop
786 // so the thread will terminate
787 if let SvcEvt::Shutdown(_) = msg {
788 tracing::debug!("Terminating thread");
789 // break out of loop when the service shutdown has been rquested
790 break;
791 }
792 }
793}
794
795// vim: set ft=rust et sw=2 ts=2 sts=2 cinoptions=2 tw=79 :