adaptive_pipeline_domain/value_objects/
encryption_benchmark.rs

1// /////////////////////////////////////////////////////////////////////////////
2// Adaptive Pipeline
3// Copyright (c) 2025 Michael Gardner, A Bit of Help, Inc.
4// SPDX-License-Identifier: BSD-3-Clause
5// See LICENSE file in the project root.
6// /////////////////////////////////////////////////////////////////////////////
7
8//! # Encryption Benchmark Value Object
9//!
10//! This module provides the [`EncryptionBenchmark`] value object for capturing
11//! and analyzing encryption performance metrics in the adaptive pipeline
12//! system. It enables performance monitoring, algorithm comparison, and
13//! optimization decisions based on real-world benchmarks.
14//!
15//! ## Features
16//!
17//! - **Performance Metrics**: Comprehensive capture of throughput, latency,
18//!   memory, and CPU usage
19//! - **Algorithm Comparison**: Standardized benchmarking across different
20//!   encryption algorithms
21//! - **Temporal Tracking**: RFC3339-compliant timestamp recording for trend
22//!   analysis
23//! - **Serialization Support**: Full serde compatibility for persistence and
24//!   transmission
25//! - **Immutable Design**: Value object semantics with snapshot-based
26//!   performance data
27//!
28//! ## Architecture
29//!
30//! The `EncryptionBenchmark` follows Domain-Driven Design principles as a value
31//! object, representing an immutable snapshot of encryption performance at a
32//! specific point in time. It integrates with the pipeline's monitoring and
33//! optimization systems to guide algorithm selection and resource allocation
34//! decisions.
35//!
36//! ## Usage Examples
37//!
38//! ### Creating Benchmark Results
39//!
40//!
41//! ### Comparing Algorithm Performance
42//!
43//!
44//!
45//! ## Performance Analysis
46//!
47//! ### Throughput Analysis
48//!
49//! Throughput measurements provide insight into sustained data processing
50//! rates:
51//!
52//! - **High Throughput (>200 MB/s)**: Excellent for large file processing
53//! - **Medium Throughput (50-200 MB/s)**: Good for general-purpose encryption
54//! - **Low Throughput (<50 MB/s)**: May indicate CPU bottleneck or algorithm
55//!   inefficiency
56//!
57//! ### Latency Considerations
58//!
59//! Latency affects responsiveness in interactive scenarios:
60//!
61//! - **Low Latency (<10ms)**: Suitable for real-time processing
62//! - **Medium Latency (10-100ms)**: Acceptable for batch processing
63//! - **High Latency (>100ms)**: May require optimization or algorithm change
64//!
65//! ### Resource Utilization
66//!
67//! Memory and CPU usage inform resource allocation decisions:
68//!
69//! - **Memory Usage**: Higher usage may indicate buffering or inefficient
70//!   implementation
71//! - **CPU Usage**: Should correlate with throughput; high CPU with low
72//!   throughput indicates inefficiency
73//!
74//! ### Efficiency Metrics
75//!
76//! Calculate efficiency ratios for informed decisions:
77//!
78//!
79//!
80//! ## Serialization and Persistence
81//!
82//! ### JSON Serialization
83//!
84//!
85//! ### Database Storage
86//!
87//! Benchmark data maps well to relational databases:
88//!
89//! ```sql
90//! CREATE TABLE encryption_benchmarks (
91//!     id SERIAL PRIMARY KEY,
92//!     algorithm VARCHAR(50) NOT NULL,
93//!     throughput_mbps DOUBLE PRECISION NOT NULL,
94//!     latency_ms INTEGER NOT NULL,
95//!     memory_usage_mb DOUBLE PRECISION NOT NULL,
96//!     cpu_usage_percent DOUBLE PRECISION NOT NULL,
97//!     file_size_mb DOUBLE PRECISION NOT NULL,
98//!     timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
99//!     INDEX idx_algorithm (algorithm),
100//!     INDEX idx_timestamp (timestamp)
101//! );
102//! ```
103//!
104//! ### Time Series Analysis
105//!
106//! The RFC3339-compliant timestamp enables trend analysis:
107//!
108//! - Track performance changes over time
109//! - Identify performance regressions
110//! - Correlate performance with system changes
111//! - Compare algorithm performance across versions
112use crate::services::datetime_serde;
113use crate::services::encryption_service::EncryptionAlgorithm;
114use serde::{Deserialize, Serialize};
115use std::time::Duration;
116
117/// Encryption performance benchmark results for algorithm comparison and
118/// optimization.
119///
120/// `EncryptionBenchmark` is a value object that captures comprehensive
121/// performance metrics for encryption operations, enabling data-driven
122/// decisions about algorithm selection, resource allocation, and system
123/// optimization in the adaptive pipeline.
124///
125/// ## Key Features
126///
127/// - **Comprehensive Metrics**: Captures throughput, latency, memory usage, and
128///   CPU utilization
129/// - **Algorithm Tracking**: Associates metrics with specific encryption
130///   algorithms
131/// - **Temporal Context**: Includes RFC3339-compliant timestamp for trend
132///   analysis
133/// - **Immutable Snapshot**: Represents performance at a specific point in time
134/// - **Serialization Ready**: Full serde support for persistence and data
135///   exchange
136///
137/// ## Performance Metrics
138///
139/// - `throughput_mbps`: Data processing rate in megabytes per second
140/// - `latency`: Time delay from operation start to completion
141/// - `memory_usage_mb`: Peak memory consumption during encryption
142/// - `cpu_usage_percent`: CPU utilization percentage during operation
143/// - `file_size_mb`: Size of the file being encrypted (for context)
144///
145/// ## Usage Patterns
146///
147///
148/// ## Integration with Monitoring
149///
150/// The benchmark integrates with the pipeline's monitoring and optimization
151/// systems:
152///
153/// - **Performance Tracking**: Historical performance data for trend analysis
154/// - **Algorithm Selection**: Data-driven algorithm choice based on workload
155///   characteristics
156/// - **Resource Planning**: Memory and CPU requirement estimation
157/// - **Optimization Feedback**: Performance regression detection and
158///   optimization validation
159///
160/// ## Thread Safety
161///
162/// `EncryptionBenchmark` is thread-safe through immutability. All fields are
163/// read-only after construction, making it safe to share across threads without
164/// synchronization.
165///
166/// ## Cross-Language Compatibility
167///
168/// The benchmark data structure maps well to other languages:
169///
170/// - **JSON**: Direct serialization for REST APIs and configuration
171/// - **Go**: Struct with similar field types and JSON tags
172/// - **Python**: Dataclass or NamedTuple with datetime handling
173/// - **Database**: Relational table with appropriate column types
174///
175/// ## Performance Considerations
176///
177/// - Lightweight value object with minimal memory overhead
178/// - Efficient serialization through serde derive macros
179/// - Immutable design eliminates defensive copying
180/// - Timestamp generation uses UTC to avoid timezone complexity
181#[derive(Debug, Clone, Serialize, Deserialize)]
182pub struct EncryptionBenchmark {
183    /// The encryption algorithm that was benchmarked
184    pub algorithm: EncryptionAlgorithm,
185
186    /// Data processing throughput in megabytes per second
187    pub throughput_mbps: f64,
188
189    /// Operation latency from start to completion
190    pub latency: Duration,
191
192    /// Peak memory usage during encryption in megabytes
193    pub memory_usage_mb: f64,
194
195    /// CPU utilization percentage during the operation
196    pub cpu_usage_percent: f64,
197
198    /// Size of the file being encrypted in megabytes (for context)
199    pub file_size_mb: f64,
200
201    /// RFC3339-compliant timestamp when the benchmark was recorded
202    #[serde(with = "datetime_serde")]
203    pub timestamp: chrono::DateTime<chrono::Utc>,
204}
205
206impl EncryptionBenchmark {
207    /// Creates a new encryption benchmark with the specified performance
208    /// metrics.
209    ///
210    /// The benchmark captures a snapshot of encryption performance at the
211    /// current time, providing comprehensive metrics for algorithm
212    /// comparison and optimization decisions.
213    ///
214    /// # Arguments
215    ///
216    /// * `algorithm` - The encryption algorithm that was benchmarked
217    /// * `throughput_mbps` - Data processing rate in megabytes per second
218    /// * `latency` - Operation latency from start to completion
219    /// * `memory_usage_mb` - Peak memory consumption during encryption
220    /// * `cpu_usage_percent` - CPU utilization percentage during operation
221    /// * `file_size_mb` - Size of the file being encrypted (for context)
222    ///
223    /// # Returns
224    ///
225    /// A new `EncryptionBenchmark` instance with the current UTC timestamp.
226    ///
227    /// # Examples
228    ///
229    ///
230    /// # Performance Metrics Guidelines
231    ///
232    /// - **Throughput**: Measure sustained data processing rate, not peak burst
233    /// - **Latency**: Include full operation time from API call to completion
234    /// - **Memory**: Capture peak usage, not average or final usage
235    /// - **CPU**: Measure during active encryption, not including I/O wait
236    /// - **File Size**: Provide context for performance scaling analysis
237    pub fn new(
238        algorithm: EncryptionAlgorithm,
239        throughput_mbps: f64,
240        latency: Duration,
241        memory_usage_mb: f64,
242        cpu_usage_percent: f64,
243        file_size_mb: f64,
244    ) -> Self {
245        Self {
246            algorithm,
247            throughput_mbps,
248            latency,
249            memory_usage_mb,
250            cpu_usage_percent,
251            file_size_mb,
252            timestamp: chrono::Utc::now(),
253        }
254    }
255}