hyperi-rustlib 2.7.1

Opinionated, drop-in Rust toolkit for production services at scale. The patterns from blog posts as actual code: 8-layer config cascade, structured logging with PII masking, Prometheus + OpenTelemetry, Kafka/gRPC transports, tiered disk-spillover, adaptive worker pools, graceful shutdown.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
// Project:   hyperi-rustlib
// File:      src/lib.rs
// Purpose:   Main library entry point and public API exports
// Language:  Rust
//
// License:   FSL-1.1-ALv2
// Copyright: (c) 2026 HYPERI PTY LIMITED

//! # hyperi-rustlib
//!
//! There's plenty of sage advice out there about how to run Rust services in
//! production at scale — config cascades, structured logging, masking secrets,
//! multi-backend secrets management, Prometheus, OpenTelemetry, Kafka transports,
//! tiered disk-spillover sinks, adaptive worker pools, graceful shutdown — but
//! almost none of it as code you can just install and use.
//!
//! **This is that code.** Opinionated, drop-in, working out of the box. The
//! patterns from blog posts as actual library — not a framework you assemble
//! from twenty crates and a weekend.
//!
//! Built as the foundation for HyperI's PB/hr data services. Generic enough
//! that you don't need to be at HyperI to use it.
//!
//! ## Quick Start
//!
//! ```rust,no_run
//! use hyperi_rustlib::{env, config, logger, metrics};
//!
//! fn main() -> Result<(), Box<dyn std::error::Error>> {
//!     // Detect runtime environment
//!     let environment = env::Environment::detect();
//!     println!("Running in: {:?}", environment);
//!
//!     // Initialise logger (respects LOG_LEVEL env var)
//!     logger::setup_default()?;
//!
//!     // Load configuration with 7-layer cascade
//!     config::setup(config::ConfigOptions {
//!         env_prefix: "MYAPP".into(),
//!         ..Default::default()
//!     })?;
//!
//!     // Access config
//!     let cfg = config::get();
//!     let db_host = cfg.get_string("database.host").unwrap_or_default();
//!
//!     // Create metrics
//!     let metrics_mgr = metrics::MetricsManager::new("myapp");
//!     let _counter = metrics_mgr.counter("requests_total", "Total requests processed");
//!
//!     tracing::info!(db_host = %db_host, "Application started");
//!     Ok(())
//! }
//! ```
//!
//! See `docs/CORE-PILLARS.md` in the repository for the auto-wiring architecture.

#![deny(unsafe_code)]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(clippy::pedantic)]
// Crate-wide hard-deny for the most pernicious async footgun. Holding a
// sync `Mutex`/`RwLock` guard across `.await` is the canonical way to
// deadlock a tokio runtime under contention. The lint catches this at
// compile time. Integration tests that legitimately serialise via a
// sync mutex opt out file-locally with `#![allow(...)]` and a comment
// explaining the safety reasoning.
#![deny(clippy::await_holding_lock)]
#![warn(rustdoc::broken_intra_doc_links)]
#![warn(rustdoc::private_intra_doc_links)]
#![warn(rustdoc::invalid_codeblock_attributes)]
#![warn(rustdoc::bare_urls)]
#![allow(clippy::module_name_repetitions)]
#![allow(clippy::doc_markdown)] // Allow brand names without backticks
#![allow(clippy::cast_precision_loss)] // Metrics values are fine with f64 precision
#![allow(clippy::missing_panics_doc)] // MVP does not require exhaustive docs
#![allow(clippy::missing_errors_doc)] // MVP does not require exhaustive docs
#![allow(clippy::double_must_use)] // Return types already marked must_use
#![allow(clippy::unused_async)] // Async for future compatibility
#![allow(clippy::redundant_closure_for_method_calls)] // Clearer with explicit closure
#![allow(clippy::result_large_err)] // figment::Error is large by design
#![allow(clippy::needless_pass_by_value)]
// API cleaner with owned values
// Test code allowances - unwrap is acceptable in tests for cleaner assertions
#![cfg_attr(test, allow(clippy::unwrap_used))]
#![cfg_attr(test, allow(clippy::field_reassign_with_default))]

// Core modules (always available)
pub mod env;
pub mod kafka_config;
pub mod sensitive;

#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
pub mod runtime;

#[cfg(feature = "shutdown")]
#[cfg_attr(docsrs, doc(cfg(feature = "shutdown")))]
pub mod shutdown;

#[cfg(feature = "health")]
#[cfg_attr(docsrs, doc(cfg(feature = "health")))]
pub mod health;

#[cfg(feature = "config")]
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
pub mod config;

#[cfg(feature = "logger")]
#[cfg_attr(docsrs, doc(cfg(feature = "logger")))]
pub mod logger;

#[cfg(any(feature = "metrics", feature = "otel-metrics"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "metrics", feature = "otel-metrics"))))]
pub mod metrics;

#[cfg(feature = "otel-tracing")]
#[cfg_attr(docsrs, doc(cfg(feature = "otel-tracing")))]
pub mod otel_tracing;

#[cfg(feature = "transport")]
#[cfg_attr(docsrs, doc(cfg(feature = "transport")))]
pub mod transport;

#[cfg(feature = "http")]
#[cfg_attr(docsrs, doc(cfg(feature = "http")))]
pub mod http_client;

#[cfg(feature = "http-server")]
#[cfg_attr(docsrs, doc(cfg(feature = "http-server")))]
pub mod http_server;

#[cfg(feature = "database")]
#[cfg_attr(docsrs, doc(cfg(feature = "database")))]
pub mod database;

#[cfg(feature = "cache")]
#[cfg_attr(docsrs, doc(cfg(feature = "cache")))]
pub mod cache;

#[cfg(feature = "spool")]
#[cfg_attr(docsrs, doc(cfg(feature = "spool")))]
pub mod spool;

#[cfg(feature = "tiered-sink")]
#[cfg_attr(docsrs, doc(cfg(feature = "tiered-sink")))]
pub mod tiered_sink;

#[cfg(feature = "secrets")]
#[cfg_attr(docsrs, doc(cfg(feature = "secrets")))]
pub mod secrets;

#[cfg(feature = "directory-config")]
#[cfg_attr(docsrs, doc(cfg(feature = "directory-config")))]
pub mod directory_config;

#[cfg(feature = "memory")]
#[cfg_attr(docsrs, doc(cfg(feature = "memory")))]
pub mod memory;

#[cfg(feature = "scaling")]
#[cfg_attr(docsrs, doc(cfg(feature = "scaling")))]
pub mod scaling;

#[cfg(any(feature = "worker-pool", feature = "worker-batch", feature = "worker"))]
#[cfg_attr(
    docsrs,
    doc(cfg(any(feature = "worker-pool", feature = "worker-batch", feature = "worker")))
)]
pub mod worker;

#[cfg(feature = "cli")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli")))]
pub mod cli;

#[cfg(feature = "top")]
#[cfg_attr(docsrs, doc(cfg(feature = "top")))]
pub mod top;

#[cfg(feature = "io")]
#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
pub mod io;

#[cfg(feature = "dlq")]
#[cfg_attr(docsrs, doc(cfg(feature = "dlq")))]
pub mod dlq;

#[cfg(feature = "output-file")]
#[cfg_attr(docsrs, doc(cfg(feature = "output-file")))]
pub mod output;

#[cfg(feature = "expression")]
#[cfg_attr(docsrs, doc(cfg(feature = "expression")))]
pub mod expression;

#[cfg(feature = "deployment")]
#[cfg_attr(docsrs, doc(cfg(feature = "deployment")))]
pub mod deployment;

#[cfg(feature = "version-check")]
#[cfg_attr(docsrs, doc(cfg(feature = "version-check")))]
pub mod version_check;

#[cfg(feature = "concurrency")]
#[cfg_attr(docsrs, doc(cfg(feature = "concurrency")))]
pub mod concurrency;

// Re-export common types at crate root
pub use env::{Environment, RuntimeContext, runtime_context};
pub use kafka_config::{
    DfeSource, KafkaConfigError, KafkaConfigResult, ServiceRole, TOPIC_SUFFIX_LAND,
    TOPIC_SUFFIX_LOAD, config_from_file, config_from_properties_str,
};
pub use sensitive::SensitiveString;

#[cfg(feature = "runtime")]
#[cfg_attr(docsrs, doc(cfg(feature = "runtime")))]
pub use runtime::RuntimePaths;

#[cfg(feature = "health")]
#[cfg_attr(docsrs, doc(cfg(feature = "health")))]
pub use health::{HealthRegistry, HealthStatus};

#[cfg(feature = "config")]
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
pub use config::{Config, ConfigError, ConfigOptions};

#[cfg(feature = "config")]
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
pub use config::flat_env::{ApplyFlatEnv, EnvVarDoc, Normalize};

#[cfg(feature = "config-reload")]
#[cfg_attr(docsrs, doc(cfg(feature = "config-reload")))]
pub use config::reloader::{ConfigReloader, ReloaderConfig};

#[cfg(feature = "config-reload")]
#[cfg_attr(docsrs, doc(cfg(feature = "config-reload")))]
pub use config::shared::SharedConfig;

#[cfg(feature = "config-postgres")]
#[cfg_attr(docsrs, doc(cfg(feature = "config-postgres")))]
pub use config::postgres::{
    FallbackMode, PostgresConfig, PostgresConfigError, PostgresConfigSource,
};

#[cfg(feature = "logger")]
#[cfg_attr(docsrs, doc(cfg(feature = "logger")))]
pub use logger::{
    LogFormat, LoggerError, LoggerOptions, SecurityEvent, SecurityOutcome, ThrottleConfig,
};

#[cfg(any(feature = "metrics", feature = "otel-metrics"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "metrics", feature = "otel-metrics"))))]
pub use metrics::{DfeMetrics, MetricsConfig, MetricsError, MetricsManager};

#[cfg(feature = "otel-metrics")]
#[cfg_attr(docsrs, doc(cfg(feature = "otel-metrics")))]
pub use metrics::{OtelMetricsConfig, OtelProtocol};

#[cfg(feature = "transport")]
#[cfg_attr(docsrs, doc(cfg(feature = "transport")))]
pub use transport::{
    CommitToken, Message, PayloadFormat, SendResult, Transport, TransportConfig, TransportError,
    TransportResult, TransportType,
};

#[cfg(feature = "http-server")]
#[cfg_attr(docsrs, doc(cfg(feature = "http-server")))]
pub use http_server::{HttpServer, HttpServerConfig, HttpServerError};

#[cfg(feature = "spool")]
#[cfg_attr(docsrs, doc(cfg(feature = "spool")))]
pub use spool::{Spool, SpoolConfig, SpoolError};

#[cfg(feature = "tiered-sink")]
#[cfg_attr(docsrs, doc(cfg(feature = "tiered-sink")))]
pub use tiered_sink::{
    CircuitBreaker, CircuitState, CompressionCodec, DrainStrategy, OrderingMode, Sink, SinkError,
    TieredSink, TieredSinkConfig, TieredSinkError,
};

#[cfg(feature = "secrets")]
#[cfg_attr(docsrs, doc(cfg(feature = "secrets")))]
pub use secrets::{
    CacheConfig, FileProvider, RotationEvent, SecretMetadata, SecretProvider, SecretSource,
    SecretValue, SecretsConfig, SecretsError, SecretsManager, SecretsResult,
};

#[cfg(feature = "secrets-vault")]
#[cfg_attr(docsrs, doc(cfg(feature = "secrets-vault")))]
pub use secrets::{OpenBaoAuth, OpenBaoConfig, OpenBaoProvider};

#[cfg(feature = "secrets-aws")]
#[cfg_attr(docsrs, doc(cfg(feature = "secrets-aws")))]
pub use secrets::{AwsConfig, AwsProvider};

#[cfg(feature = "directory-config")]
#[cfg_attr(docsrs, doc(cfg(feature = "directory-config")))]
pub use directory_config::{
    ChangeEvent, ChangeOperation, DirectoryConfigError, DirectoryConfigResult,
    DirectoryConfigStore, DirectoryConfigStoreConfig, WriteMode, WriteResult,
};

#[cfg(feature = "memory")]
#[cfg_attr(docsrs, doc(cfg(feature = "memory")))]
pub use memory::{MemoryGuard, MemoryGuardConfig, MemoryPressure, detect_memory_limit};

#[cfg(feature = "scaling")]
#[cfg_attr(docsrs, doc(cfg(feature = "scaling")))]
pub use scaling::{
    ComponentSnapshot, GateType, PressureSnapshot, RateWindow, ScalingComponent, ScalingPressure,
    ScalingPressureConfig,
};

#[cfg(any(feature = "worker-pool", feature = "worker-batch", feature = "worker"))]
#[cfg_attr(
    docsrs,
    doc(cfg(any(feature = "worker-pool", feature = "worker-batch", feature = "worker")))
)]
pub use worker::{
    AccumulatorConfig, AccumulatorFull, AdaptiveWorkerPool, BatchAccumulator, BatchDrainer,
    BatchPipeline, BatchProcessor, PipelineStats, PipelineStatsSnapshot, ScalingDecision,
    ScalingInput, WorkerPoolConfig,
};

#[cfg(feature = "cli")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli")))]
pub use cli::{CliError, CommonArgs, StandardCommand, VersionInfo};

#[cfg(feature = "cli-service")]
#[cfg_attr(docsrs, doc(cfg(feature = "cli-service")))]
pub use cli::{DfeApp, ServiceRuntime};

#[cfg(feature = "io")]
#[cfg_attr(docsrs, doc(cfg(feature = "io")))]
pub use io::{AsyncNdjsonWriter, FileWriterConfig, NdjsonWriter, RotationPeriod};

#[cfg(feature = "dlq")]
#[cfg_attr(docsrs, doc(cfg(feature = "dlq")))]
pub use dlq::{Dlq, DlqBackend, DlqConfig, DlqEntry, DlqError, DlqMode, DlqSource, FileDlqConfig};

#[cfg(feature = "dlq-kafka")]
#[cfg_attr(docsrs, doc(cfg(feature = "dlq-kafka")))]
pub use dlq::{DlqRouting, KafkaDlqConfig};

#[cfg(feature = "dlq-http")]
#[cfg_attr(docsrs, doc(cfg(feature = "dlq-http")))]
pub use dlq::HttpDlqConfig;

#[cfg(feature = "dlq-redis")]
#[cfg_attr(docsrs, doc(cfg(feature = "dlq-redis")))]
pub use dlq::RedisDlqConfig;

#[cfg(feature = "output-file")]
#[cfg_attr(docsrs, doc(cfg(feature = "output-file")))]
pub use output::{FileOutput, FileOutputConfig, OutputError};

#[cfg(feature = "expression")]
#[cfg_attr(docsrs, doc(cfg(feature = "expression")))]
pub use expression::{
    ALLOWED_FUNCTIONS, DISALLOWED_FUNCTIONS, ExpressionError, ExpressionResult, build_context,
    compile, evaluate, evaluate_condition, validate,
};

#[cfg(feature = "deployment")]
#[cfg_attr(docsrs, doc(cfg(feature = "deployment")))]
pub use deployment::{
    ContractMismatch, DeploymentContract, DeploymentError, HealthContract, KedaConfig, KedaContract,
};

#[cfg(feature = "version-check")]
#[cfg_attr(docsrs, doc(cfg(feature = "version-check")))]
pub use version_check::{VersionCheck, VersionCheckConfig, VersionCheckResponse};

#[cfg(feature = "concurrency")]
#[cfg_attr(docsrs, doc(cfg(feature = "concurrency")))]
pub use concurrency::{
    Actor, ActorConfig, ActorError, ActorHandle, ActorJoinHandle, BackgroundSink,
    BackgroundSinkConfig, BackgroundSinkHandle, DrainError, Overflow, PeriodicTask, PeriodicWorker,
    SinkDrain, TickError,
};

/// Library version
pub const VERSION: &str = env!("CARGO_PKG_VERSION");

/// Initialise all library components with default settings.
///
/// This is a convenience function that:
/// 1. Detects the runtime environment
/// 2. Sets up the logger with auto-detection
/// 3. Loads configuration with the given env prefix
///
/// # Errors
///
/// Returns an error if logger or config initialisation fails.
#[cfg(all(feature = "config", feature = "logger"))]
pub fn init(env_prefix: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
    logger::setup_default()?;
    config::setup(config::ConfigOptions {
        env_prefix: env_prefix.to_string(),
        ..Default::default()
    })?;
    Ok(())
}