Skip to main content

hadb_io/
lib.rs

1//! Shared S3/retry/upload infrastructure for the hadb ecosystem.
2//!
3//! `hadb-io` provides the common infrastructure layer used by all hadb replication
4//! engines (walrust-core, graphstream) and product crates (haqlite, hakuzu):
5//!
6//! - **Retry** — Exponential backoff with full jitter, error classification, circuit breaker
7//! - **S3** — Client helpers for S3-compatible storage (feature-gated)
8//! - **ObjectStore** — Rich storage trait for bulk data operations + S3Backend
9//! - **Webhook** — HTTP POST notifications with HMAC-SHA256 signing
10//! - **Retention** — GFS (Grandfather/Father/Son) snapshot rotation
11//! - **Config** — Shared configuration types (S3, webhook, cache, duration parsing)
12
13pub mod config;
14pub mod retention;
15pub mod retry;
16pub mod storage;
17pub mod uploader;
18pub mod webhook;
19
20#[cfg(feature = "s3")]
21pub mod s3;
22
23// Re-export AWS SDK crates so consumers don't need direct dependencies
24#[cfg(feature = "s3")]
25pub use aws_sdk_s3;
26
27// Re-export primary types for convenience
28pub use config::{CacheConfig, S3Config, WebhookConfig};
29pub use config::parse_duration_string;
30pub use retention::{RetentionPlan, RetentionPolicy, SnapshotEntry, Tier, analyze_retention};
31pub use retry::{
32    CircuitBreaker, CircuitState, ErrorKind, OnCircuitOpen, RetryConfig, RetryOutcome,
33    RetryPolicy, classify_error, is_retryable,
34};
35pub use storage::ObjectStore;
36pub use uploader::{ConcurrentUploader, UploadHandler, UploadMessage, UploaderStats, spawn_uploader};
37pub use webhook::{WebhookEvent, WebhookPayload, WebhookSender, compute_hmac_signature};
38
39#[cfg(feature = "s3")]
40pub use storage::S3Backend;