Skip to main content

nntp_proxy/
constants.rs

1//! Constants used throughout the NNTP proxy
2//!
3//! This module centralizes magic numbers and configuration values
4//! to improve maintainability and reduce duplication.
5
6use std::time::Duration;
7
8/// Buffer size constants
9///
10/// All buffer sizes are carefully chosen for NNTP workloads:
11/// - Commands are small (< 512 bytes)
12/// - Articles range from 1KB to 768KB typically (99th percentile)
13/// - Pooled buffers use aligned sizes for optimal memory access
14pub mod buffer {
15    // Buffer pool configuration
16
17    /// Page size for memory alignment (4KB = standard OS page)
18    /// Used in compile-time assertions below to verify alignment.
19    #[allow(dead_code)] // Used in const assertions which the compiler doesn't track
20    const PAGE_SIZE: usize = 4096;
21
22    /// Size of each pooled buffer (724KB, page-aligned)
23    /// Tuned for typical Usenet article sizes (average ~725KB)
24    /// Aligned to 4KB page boundaries for optimal memory access
25    pub const POOL: usize = 724 * 1024;
26
27    /// Number of buffers in the buffer pool
28    /// Sized for ~25 concurrent connections with one buffer each
29    /// Total memory: 25 × 724KB ≈ 18MB
30    pub const POOL_COUNT: usize = 25;
31
32    /// BufReader capacity for client command parsing (64KB)
33    /// Large enough to handle any NNTP command line without multiple reads
34    /// Sized for efficient line-based reading with minimal syscalls
35    pub const READER_CAPACITY: usize = 64 * 1024;
36
37    // Command and response limits
38
39    /// Maximum command line size (512 bytes)
40    /// NNTP commands are typically small: "ARTICLE <msgid@example.com>"
41    pub const COMMAND: usize = 512;
42
43    /// Maximum size for a single response (2MB)
44    /// Increased to handle larger articles, prevents memory exhaustion
45    pub const RESPONSE_MAX: usize = 2 * 1024 * 1024;
46
47    /// Initial capacity for response accumulation buffers (8KB, page-aligned)
48    /// Sized for typical status lines and small responses
49    pub const RESPONSE_INITIAL: usize = 8192;
50
51    // Streaming configuration
52
53    /// Chunk size for streaming responses (724KB, page-aligned)
54    /// Matches POOL size to handle most articles in a single read
55    /// Aligned to 4KB page boundaries for optimal zero-copy I/O
56    pub const STREAM_CHUNK: usize = 724 * 1024;
57
58    // Compile-time validation
59
60    /// Verify pool buffer is page-aligned at compile time
61    const _POOL_ALIGNED: () = assert!(POOL.is_multiple_of(PAGE_SIZE), "POOL must be page-aligned");
62
63    /// Verify stream chunk is page-aligned at compile time
64    const _CHUNK_ALIGNED: () = assert!(
65        STREAM_CHUNK.is_multiple_of(PAGE_SIZE),
66        "STREAM_CHUNK must be page-aligned"
67    );
68
69    /// Verify pool and chunk sizes match
70    const _SIZES_MATCH: () = assert!(
71        POOL == STREAM_CHUNK,
72        "POOL and STREAM_CHUNK should match for single-read optimization"
73    );
74}
75
76/// Socket buffer size constants
77pub mod socket {
78    /// TCP socket receive buffer size for high-throughput transfers (16MB)
79    pub const HIGH_THROUGHPUT_RECV_BUFFER: usize = 16 * 1024 * 1024;
80
81    /// TCP socket send buffer size for high-throughput transfers (16MB)
82    pub const HIGH_THROUGHPUT_SEND_BUFFER: usize = 16 * 1024 * 1024;
83
84    /// TCP socket receive buffer size for connection pools (7.25MB)
85    /// Sized to handle 724KB streaming chunks efficiently (10x chunk size)
86    pub const POOL_RECV_BUFFER: usize = 7 * 1024 * 1024 + 256 * 1024;
87
88    /// TCP socket send buffer size for connection pools (7.25MB)
89    /// Sized to handle 724KB streaming chunks efficiently (10x chunk size)
90    pub const POOL_SEND_BUFFER: usize = 7 * 1024 * 1024 + 256 * 1024;
91}
92
93/// Timeout constants
94pub mod timeout {
95    use super::Duration;
96
97    /// Timeout for reading responses from backend servers
98    pub const BACKEND_READ: Duration = Duration::from_secs(30);
99
100    /// Timeout for executing a command on backend
101    pub const COMMAND_EXECUTION: Duration = Duration::from_secs(60);
102
103    /// Connection timeout for backend connections
104    pub const CONNECTION: Duration = Duration::from_secs(10);
105
106    /// Timeout for adaptive precheck queries (STAT/HEAD)
107    /// If a backend doesn't respond within this time, treat as 430 (missing)
108    /// This prevents slow backends from blocking all client connections
109    pub const PRECHECK_QUERY: Duration = Duration::from_secs(2);
110}
111
112/// Connection pool constants
113pub mod pool {
114    use super::Duration;
115
116    /// Default maximum connections per backend pool
117    pub const DEFAULT_MAX_CONNECTIONS: usize = 10;
118
119    /// Default minimum idle connections to maintain
120    pub const DEFAULT_MIN_IDLE: usize = 2;
121
122    /// Connection pool timeout for getting a connection
123    pub const GET_TIMEOUT_SECS: u64 = 5;
124
125    /// Buffer size for TCP peek during health checks
126    /// Only 1 byte needed to detect if connection is readable/closed
127    pub const TCP_PEEK_BUFFER_SIZE: usize = 1;
128
129    /// Health check timeout - how long to wait for DATE command response
130    pub const HEALTH_CHECK_TIMEOUT: Duration = Duration::from_secs(2);
131
132    /// Buffer size for reading health check responses
133    pub const HEALTH_CHECK_BUFFER_SIZE: usize = 512;
134
135    /// DATE command bytes sent during health check
136    pub const DATE_COMMAND: &[u8] = b"DATE\r\n";
137
138    /// Expected prefix of DATE command response (NNTP 111 response code)
139    pub const EXPECTED_DATE_RESPONSE_PREFIX: &str = "111 ";
140
141    /// Minimum recommended keep-alive interval in seconds
142    /// Values below this may cause excessive health check traffic
143    pub const MIN_RECOMMENDED_KEEPALIVE_SECS: u64 = 30;
144
145    /// Maximum recommended keep-alive interval in seconds (5 minutes)
146    /// Values above this may not detect stale connections quickly enough
147    pub const MAX_RECOMMENDED_KEEPALIVE_SECS: u64 = 300;
148
149    /// Maximum number of idle connections to check per health check cycle
150    /// Checking too many at once can temporarily starve the pool
151    pub const MAX_CONNECTIONS_PER_HEALTH_CHECK_CYCLE: usize = 5;
152
153    /// Timeout when attempting to get a connection for health checking (milliseconds)
154    /// Short timeout to avoid blocking if pool is busy
155    pub const HEALTH_CHECK_POOL_TIMEOUT_MS: u64 = 100;
156}
157
158/// Per-command routing constants
159pub mod per_command_routing {
160    /// Number of chunks to read ahead when checking for response terminator
161    pub const TERMINATOR_LOOKAHEAD_CHUNKS: usize = 4;
162
163    /// Maximum number of bytes to check for spanning terminator
164    pub const MAX_TERMINATOR_SPAN_CHECK: usize = 9;
165}
166
167/// Session and metrics constants
168pub mod session {
169    /// Flush incremental metrics every N commands for long-running sessions
170    ///
171    /// Prevents metrics from accumulating indefinitely without being recorded.
172    /// Value of 100 balances between:
173    /// - Frequent enough to avoid significant data loss on crashes
174    /// - Infrequent enough to avoid performance overhead
175    pub const METRICS_FLUSH_INTERVAL: u32 = 100;
176}
177
178/// Display strings for user metrics and logging
179pub mod user {
180    /// Display name for anonymous/unauthenticated users
181    ///
182    /// Used as HashMap key and display value for users who haven't authenticated.
183    /// The `<anonymous>` format is chosen to:
184    /// - Sort first in alphabetical listings (< comes before letters)
185    /// - Be clearly distinguished from actual usernames
186    /// - Be consistent across all metrics and logging
187    pub const ANONYMOUS: &str = "<anonymous>";
188}
189
190#[cfg(test)]
191#[allow(clippy::assertions_on_constants)]
192mod tests {
193    use super::*;
194
195    #[test]
196    fn test_buffer_alignment() {
197        // Pool buffer should be page-aligned (4KB boundaries)
198        assert_eq!(buffer::POOL % 4096, 0, "POOL must be page-aligned");
199
200        // Stream chunk should be page-aligned
201        assert_eq!(
202            buffer::STREAM_CHUNK % 4096,
203            0,
204            "STREAM_CHUNK must be page-aligned"
205        );
206
207        // Pool and chunk should match for single-read optimization
208        assert_eq!(
209            buffer::POOL,
210            buffer::STREAM_CHUNK,
211            "POOL and STREAM_CHUNK should match"
212        );
213    }
214
215    #[test]
216    fn test_buffer_sizes() {
217        // Pool buffer should be 724KB (optimal for 725KB average articles)
218        assert_eq!(buffer::POOL, 724 * 1024);
219
220        // Reader capacity should be large enough for any command
221        assert!(buffer::READER_CAPACITY >= buffer::COMMAND);
222        assert_eq!(buffer::READER_CAPACITY, 64 * 1024);
223
224        // Response max should be larger than pool for safety margin
225        assert!(buffer::RESPONSE_MAX > buffer::POOL);
226
227        // Basic size relationships
228        assert!(buffer::RESPONSE_INITIAL >= buffer::COMMAND);
229        assert!(buffer::RESPONSE_MAX > buffer::RESPONSE_INITIAL);
230        assert!(buffer::POOL >= buffer::STREAM_CHUNK);
231    }
232
233    #[test]
234    fn test_socket_buffer_ratios() {
235        // Socket buffers should be ~10x the streaming chunk size for optimal throughput
236        let expected_min_buffer = buffer::STREAM_CHUNK * 10;
237        assert!(socket::POOL_RECV_BUFFER >= expected_min_buffer);
238        assert!(socket::POOL_SEND_BUFFER >= expected_min_buffer);
239
240        // High-throughput buffers should be larger than pool buffers
241        assert!(socket::HIGH_THROUGHPUT_RECV_BUFFER > socket::POOL_RECV_BUFFER);
242        assert!(socket::HIGH_THROUGHPUT_SEND_BUFFER > socket::POOL_SEND_BUFFER);
243
244        // Buffers should be symmetric (send == receive)
245        assert_eq!(
246            socket::HIGH_THROUGHPUT_RECV_BUFFER,
247            socket::HIGH_THROUGHPUT_SEND_BUFFER
248        );
249    }
250
251    #[test]
252    fn test_pool_memory_footprint() {
253        // Calculate total pool memory
254        let total_memory = buffer::POOL * buffer::POOL_COUNT;
255
256        // Should be approximately 18MB
257        let expected_mb = 18;
258        let actual_mb = total_memory / (1024 * 1024);
259
260        assert!(
261            actual_mb >= expected_mb - 1 && actual_mb <= expected_mb + 1,
262            "Pool memory should be ~{}MB, got {}MB",
263            expected_mb,
264            actual_mb
265        );
266    }
267
268    #[test]
269    fn test_timeouts() {
270        // Command execution timeout should be longer than backend read timeout
271        assert!(timeout::COMMAND_EXECUTION > timeout::BACKEND_READ);
272
273        // Backend read timeout should be longer than connection timeout
274        assert!(timeout::BACKEND_READ > timeout::CONNECTION);
275
276        // All timeouts should be non-zero
277        assert!(timeout::BACKEND_READ.as_secs() > 0);
278    }
279
280    #[test]
281    fn test_health_check_constraints() {
282        // Keepalive interval should be within recommended bounds
283        assert!(pool::MIN_RECOMMENDED_KEEPALIVE_SECS > 0);
284        assert!(pool::MAX_RECOMMENDED_KEEPALIVE_SECS > pool::MIN_RECOMMENDED_KEEPALIVE_SECS);
285
286        // Health check pool timeout should be short
287        assert!(
288            pool::HEALTH_CHECK_POOL_TIMEOUT_MS < 1000,
289            "Health check timeout should be < 1s"
290        );
291
292        // Connection health check cycle limit should be reasonable
293        assert!(pool::MAX_CONNECTIONS_PER_HEALTH_CHECK_CYCLE > 0);
294        assert!(
295            pool::MAX_CONNECTIONS_PER_HEALTH_CHECK_CYCLE <= 10,
296            "Should not check too many at once"
297        );
298    }
299}