sentinel_proxy/
rate_limit.rs

1//! Rate limiting using pingora-limits
2//!
3//! This module provides efficient per-route, per-client rate limiting using
4//! Pingora's optimized rate limiting primitives. Supports both local (single-instance)
5//! and distributed (Redis-backed) rate limiting.
6//!
7//! # Local Rate Limiting
8//!
9//! Uses `pingora-limits::Rate` for efficient in-memory rate limiting.
10//! Suitable for single-instance deployments.
11//!
12//! # Distributed Rate Limiting
13//!
14//! Uses Redis sorted sets for sliding window rate limiting across multiple instances.
15//! Requires the `distributed-rate-limit` feature.
16
17use dashmap::DashMap;
18use parking_lot::RwLock;
19use pingora_limits::rate::Rate;
20use std::sync::Arc;
21use std::time::{Duration, SystemTime, UNIX_EPOCH};
22use tracing::{debug, trace, warn};
23
24use sentinel_config::{RateLimitAction, RateLimitBackend, RateLimitKey};
25
26#[cfg(feature = "distributed-rate-limit")]
27use crate::distributed_rate_limit::{create_redis_rate_limiter, RedisRateLimiter};
28
29/// Rate limiter outcome
30#[derive(Debug, Clone, Copy, PartialEq, Eq)]
31pub enum RateLimitOutcome {
32    /// Request is allowed
33    Allowed,
34    /// Request is rate limited
35    Limited,
36}
37
38/// Detailed rate limit check result from a pool
39#[derive(Debug, Clone)]
40pub struct RateLimitCheckInfo {
41    /// Whether the request is allowed or limited
42    pub outcome: RateLimitOutcome,
43    /// Current request count in the window
44    pub current_count: i64,
45    /// Maximum requests allowed per window
46    pub limit: u32,
47    /// Remaining requests in current window (0 if over limit)
48    pub remaining: u32,
49    /// Unix timestamp (seconds) when the window resets
50    pub reset_at: u64,
51}
52
53/// Rate limiter configuration
54#[derive(Debug, Clone)]
55pub struct RateLimitConfig {
56    /// Maximum requests per second
57    pub max_rps: u32,
58    /// Burst size
59    pub burst: u32,
60    /// Key type for bucketing
61    pub key: RateLimitKey,
62    /// Action when limited
63    pub action: RateLimitAction,
64    /// HTTP status code to return when limited
65    pub status_code: u16,
66    /// Custom message
67    pub message: Option<String>,
68    /// Backend for rate limiting (local or distributed)
69    pub backend: RateLimitBackend,
70    /// Maximum delay in milliseconds for Delay action
71    pub max_delay_ms: u64,
72}
73
74impl Default for RateLimitConfig {
75    fn default() -> Self {
76        Self {
77            max_rps: 100,
78            burst: 10,
79            key: RateLimitKey::ClientIp,
80            action: RateLimitAction::Reject,
81            status_code: 429,
82            message: None,
83            backend: RateLimitBackend::Local,
84            max_delay_ms: 5000,
85        }
86    }
87}
88
89/// Per-key rate limiter using pingora-limits Rate
90///
91/// Uses a sliding window algorithm with 1-second granularity.
92struct KeyRateLimiter {
93    /// The rate limiter instance (tracks requests in current window)
94    rate: Rate,
95    /// Maximum requests per window
96    max_requests: isize,
97}
98
99impl KeyRateLimiter {
100    fn new(max_rps: u32) -> Self {
101        Self {
102            rate: Rate::new(Duration::from_secs(1)),
103            max_requests: max_rps as isize,
104        }
105    }
106
107    /// Check if a request should be allowed
108    fn check(&self) -> RateLimitOutcome {
109        // Rate::observe() returns the current count and whether it was a new window
110        let curr_count = self.rate.observe(&(), 1);
111
112        if curr_count > self.max_requests {
113            RateLimitOutcome::Limited
114        } else {
115            RateLimitOutcome::Allowed
116        }
117    }
118}
119
120/// Backend type for rate limiting
121pub enum RateLimitBackendType {
122    /// Local in-memory backend
123    Local {
124        /// Rate limiters by key (e.g., client IP -> limiter)
125        limiters: DashMap<String, Arc<KeyRateLimiter>>,
126    },
127    /// Distributed Redis backend
128    #[cfg(feature = "distributed-rate-limit")]
129    Distributed {
130        /// Redis rate limiter
131        redis: Arc<RedisRateLimiter>,
132        /// Local fallback
133        local_fallback: DashMap<String, Arc<KeyRateLimiter>>,
134    },
135}
136
137/// Thread-safe rate limiter pool managing multiple rate limiters by key
138pub struct RateLimiterPool {
139    /// Backend for rate limiting
140    backend: RateLimitBackendType,
141    /// Configuration
142    config: RwLock<RateLimitConfig>,
143}
144
145/// Get current unix timestamp in seconds
146fn current_unix_timestamp() -> u64 {
147    SystemTime::now()
148        .duration_since(UNIX_EPOCH)
149        .unwrap_or(Duration::ZERO)
150        .as_secs()
151}
152
153/// Calculate window reset timestamp (next second boundary for 1-second windows)
154fn calculate_reset_timestamp() -> u64 {
155    current_unix_timestamp() + 1
156}
157
158impl RateLimiterPool {
159    /// Create a new rate limiter pool with the given configuration (local backend)
160    pub fn new(config: RateLimitConfig) -> Self {
161        Self {
162            backend: RateLimitBackendType::Local {
163                limiters: DashMap::new(),
164            },
165            config: RwLock::new(config),
166        }
167    }
168
169    /// Create a new rate limiter pool with a distributed Redis backend
170    #[cfg(feature = "distributed-rate-limit")]
171    pub fn with_redis(config: RateLimitConfig, redis: Arc<RedisRateLimiter>) -> Self {
172        Self {
173            backend: RateLimitBackendType::Distributed {
174                redis,
175                local_fallback: DashMap::new(),
176            },
177            config: RwLock::new(config),
178        }
179    }
180
181    /// Check if a request should be rate limited (synchronous, local only)
182    ///
183    /// Returns detailed rate limit information including remaining quota.
184    /// For distributed backends, this falls back to local limiting.
185    pub fn check(&self, key: &str) -> RateLimitCheckInfo {
186        let config = self.config.read();
187        let max_rps = config.max_rps;
188        drop(config);
189
190        let limiters = match &self.backend {
191            RateLimitBackendType::Local { limiters } => limiters,
192            #[cfg(feature = "distributed-rate-limit")]
193            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback,
194        };
195
196        // Get or create limiter for this key
197        let limiter = limiters
198            .entry(key.to_string())
199            .or_insert_with(|| Arc::new(KeyRateLimiter::new(max_rps)))
200            .clone();
201
202        let outcome = limiter.check();
203        let count = limiter.rate.observe(&(), 0); // Get current count without incrementing
204        let remaining = if count >= max_rps as isize {
205            0
206        } else {
207            (max_rps as isize - count) as u32
208        };
209
210        RateLimitCheckInfo {
211            outcome,
212            current_count: count as i64,
213            limit: max_rps,
214            remaining,
215            reset_at: calculate_reset_timestamp(),
216        }
217    }
218
219    /// Check if a request should be rate limited (async, supports distributed backends)
220    ///
221    /// Returns detailed rate limit information including remaining quota.
222    #[cfg(feature = "distributed-rate-limit")]
223    pub async fn check_async(&self, key: &str) -> RateLimitCheckInfo {
224        let config = self.config.read();
225        let max_rps = config.max_rps;
226        drop(config);
227
228        match &self.backend {
229            RateLimitBackendType::Local { .. } => self.check(key),
230            RateLimitBackendType::Distributed {
231                redis,
232                local_fallback,
233            } => {
234                // Try Redis first
235                match redis.check(key).await {
236                    Ok((outcome, count)) => {
237                        let remaining = if count >= max_rps as i64 {
238                            0
239                        } else {
240                            (max_rps as i64 - count) as u32
241                        };
242                        RateLimitCheckInfo {
243                            outcome,
244                            current_count: count,
245                            limit: max_rps,
246                            remaining,
247                            reset_at: calculate_reset_timestamp(),
248                        }
249                    }
250                    Err(e) => {
251                        warn!(
252                            error = %e,
253                            key = key,
254                            "Redis rate limit check failed, falling back to local"
255                        );
256                        redis.mark_unhealthy();
257
258                        // Fallback to local
259                        if redis.fallback_enabled() {
260                            let limiter = local_fallback
261                                .entry(key.to_string())
262                                .or_insert_with(|| Arc::new(KeyRateLimiter::new(max_rps)))
263                                .clone();
264
265                            let outcome = limiter.check();
266                            let count = limiter.rate.observe(&(), 0);
267                            let remaining = if count >= max_rps as isize {
268                                0
269                            } else {
270                                (max_rps as isize - count) as u32
271                            };
272                            RateLimitCheckInfo {
273                                outcome,
274                                current_count: count as i64,
275                                limit: max_rps,
276                                remaining,
277                                reset_at: calculate_reset_timestamp(),
278                            }
279                        } else {
280                            // Fail open if no fallback
281                            RateLimitCheckInfo {
282                                outcome: RateLimitOutcome::Allowed,
283                                current_count: 0,
284                                limit: max_rps,
285                                remaining: max_rps,
286                                reset_at: calculate_reset_timestamp(),
287                            }
288                        }
289                    }
290                }
291            }
292        }
293    }
294
295    /// Check if this pool uses a distributed backend
296    pub fn is_distributed(&self) -> bool {
297        match &self.backend {
298            RateLimitBackendType::Local { .. } => false,
299            #[cfg(feature = "distributed-rate-limit")]
300            RateLimitBackendType::Distributed { .. } => true,
301        }
302    }
303
304    /// Get the rate limit key from request context
305    pub fn extract_key(
306        &self,
307        client_ip: &str,
308        path: &str,
309        route_id: &str,
310        headers: Option<&impl HeaderAccessor>,
311    ) -> String {
312        let config = self.config.read();
313        match &config.key {
314            RateLimitKey::ClientIp => client_ip.to_string(),
315            RateLimitKey::Path => path.to_string(),
316            RateLimitKey::Route => route_id.to_string(),
317            RateLimitKey::ClientIpAndPath => format!("{}:{}", client_ip, path),
318            RateLimitKey::Header(header_name) => headers
319                .and_then(|h| h.get_header(header_name))
320                .unwrap_or_else(|| "unknown".to_string()),
321        }
322    }
323
324    /// Get the action to take when rate limited
325    pub fn action(&self) -> RateLimitAction {
326        self.config.read().action.clone()
327    }
328
329    /// Get the HTTP status code for rate limit responses
330    pub fn status_code(&self) -> u16 {
331        self.config.read().status_code
332    }
333
334    /// Get the custom message for rate limit responses
335    pub fn message(&self) -> Option<String> {
336        self.config.read().message.clone()
337    }
338
339    /// Get the maximum delay in milliseconds for Delay action
340    pub fn max_delay_ms(&self) -> u64 {
341        self.config.read().max_delay_ms
342    }
343
344    /// Update the configuration
345    pub fn update_config(&self, config: RateLimitConfig) {
346        *self.config.write() = config;
347        // Clear existing limiters so they get recreated with new config
348        self.clear_local_limiters();
349    }
350
351    /// Clear local limiters (for config updates)
352    fn clear_local_limiters(&self) {
353        match &self.backend {
354            RateLimitBackendType::Local { limiters } => limiters.clear(),
355            #[cfg(feature = "distributed-rate-limit")]
356            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback.clear(),
357        }
358    }
359
360    /// Get the number of local limiter entries
361    fn local_limiter_count(&self) -> usize {
362        match &self.backend {
363            RateLimitBackendType::Local { limiters } => limiters.len(),
364            #[cfg(feature = "distributed-rate-limit")]
365            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback.len(),
366        }
367    }
368
369    /// Clean up expired entries (call periodically)
370    pub fn cleanup(&self) {
371        // Remove entries that haven't been accessed recently
372        // In practice, Rate handles its own window cleanup, so this is mainly
373        // for memory management when many unique keys are seen
374        let max_entries = 100_000; // Prevent unbounded growth
375
376        let limiters = match &self.backend {
377            RateLimitBackendType::Local { limiters } => limiters,
378            #[cfg(feature = "distributed-rate-limit")]
379            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback,
380        };
381
382        if limiters.len() > max_entries {
383            // Simple eviction: clear half
384            let to_remove: Vec<_> = limiters
385                .iter()
386                .take(max_entries / 2)
387                .map(|e| e.key().clone())
388                .collect();
389
390            for key in to_remove {
391                limiters.remove(&key);
392            }
393
394            debug!(
395                entries_before = max_entries,
396                entries_after = limiters.len(),
397                "Rate limiter pool cleanup completed"
398            );
399        }
400    }
401}
402
403/// Trait for accessing headers (allows abstracting over different header types)
404pub trait HeaderAccessor {
405    fn get_header(&self, name: &str) -> Option<String>;
406}
407
408/// Route-level rate limiter manager
409pub struct RateLimitManager {
410    /// Per-route rate limiter pools
411    route_limiters: DashMap<String, Arc<RateLimiterPool>>,
412    /// Global rate limiter (optional)
413    global_limiter: Option<Arc<RateLimiterPool>>,
414}
415
416impl RateLimitManager {
417    /// Create a new rate limit manager
418    pub fn new() -> Self {
419        Self {
420            route_limiters: DashMap::new(),
421            global_limiter: None,
422        }
423    }
424
425    /// Create a new rate limit manager with a global rate limit
426    pub fn with_global_limit(max_rps: u32, burst: u32) -> Self {
427        let config = RateLimitConfig {
428            max_rps,
429            burst,
430            key: RateLimitKey::ClientIp,
431            action: RateLimitAction::Reject,
432            status_code: 429,
433            message: None,
434            backend: RateLimitBackend::Local,
435            max_delay_ms: 5000,
436        };
437        Self {
438            route_limiters: DashMap::new(),
439            global_limiter: Some(Arc::new(RateLimiterPool::new(config))),
440        }
441    }
442
443    /// Register a rate limiter for a route
444    pub fn register_route(&self, route_id: &str, config: RateLimitConfig) {
445        trace!(
446            route_id = route_id,
447            max_rps = config.max_rps,
448            burst = config.burst,
449            key = ?config.key,
450            "Registering rate limiter for route"
451        );
452
453        self.route_limiters
454            .insert(route_id.to_string(), Arc::new(RateLimiterPool::new(config)));
455    }
456
457    /// Check if a request should be rate limited
458    ///
459    /// Checks both global and route-specific limits.
460    /// Returns detailed rate limit information for response headers.
461    pub fn check(
462        &self,
463        route_id: &str,
464        client_ip: &str,
465        path: &str,
466        headers: Option<&impl HeaderAccessor>,
467    ) -> RateLimitResult {
468        // Track the most restrictive limit info for headers
469        let mut best_limit_info: Option<RateLimitCheckInfo> = None;
470
471        // Check global limit first
472        if let Some(ref global) = self.global_limiter {
473            let key = global.extract_key(client_ip, path, route_id, headers);
474            let check_info = global.check(&key);
475
476            if check_info.outcome == RateLimitOutcome::Limited {
477                warn!(
478                    route_id = route_id,
479                    client_ip = client_ip,
480                    key = key,
481                    count = check_info.current_count,
482                    "Request rate limited by global limiter"
483                );
484                // Calculate suggested delay based on how far over limit
485                let suggested_delay_ms = if check_info.current_count > check_info.limit as i64 {
486                    let excess = check_info.current_count - check_info.limit as i64;
487                    Some((excess as u64 * 1000) / check_info.limit as u64)
488                } else {
489                    None
490                };
491                return RateLimitResult {
492                    allowed: false,
493                    action: global.action(),
494                    status_code: global.status_code(),
495                    message: global.message(),
496                    limiter: "global".to_string(),
497                    limit: check_info.limit,
498                    remaining: check_info.remaining,
499                    reset_at: check_info.reset_at,
500                    suggested_delay_ms,
501                    max_delay_ms: global.max_delay_ms(),
502                };
503            }
504
505            best_limit_info = Some(check_info);
506        }
507
508        // Check route-specific limit
509        if let Some(pool) = self.route_limiters.get(route_id) {
510            let key = pool.extract_key(client_ip, path, route_id, headers);
511            let check_info = pool.check(&key);
512
513            if check_info.outcome == RateLimitOutcome::Limited {
514                warn!(
515                    route_id = route_id,
516                    client_ip = client_ip,
517                    key = key,
518                    count = check_info.current_count,
519                    "Request rate limited by route limiter"
520                );
521                // Calculate suggested delay based on how far over limit
522                let suggested_delay_ms = if check_info.current_count > check_info.limit as i64 {
523                    let excess = check_info.current_count - check_info.limit as i64;
524                    Some((excess as u64 * 1000) / check_info.limit as u64)
525                } else {
526                    None
527                };
528                return RateLimitResult {
529                    allowed: false,
530                    action: pool.action(),
531                    status_code: pool.status_code(),
532                    message: pool.message(),
533                    limiter: route_id.to_string(),
534                    limit: check_info.limit,
535                    remaining: check_info.remaining,
536                    reset_at: check_info.reset_at,
537                    suggested_delay_ms,
538                    max_delay_ms: pool.max_delay_ms(),
539                };
540            }
541
542            trace!(
543                route_id = route_id,
544                key = key,
545                count = check_info.current_count,
546                remaining = check_info.remaining,
547                "Request allowed by rate limiter"
548            );
549
550            // Use the more restrictive limit info (lower remaining)
551            if let Some(ref existing) = best_limit_info {
552                if check_info.remaining < existing.remaining {
553                    best_limit_info = Some(check_info);
554                }
555            } else {
556                best_limit_info = Some(check_info);
557            }
558        }
559
560        // Return allowed with rate limit info for headers
561        let (limit, remaining, reset_at) = best_limit_info
562            .map(|info| (info.limit, info.remaining, info.reset_at))
563            .unwrap_or((0, 0, 0));
564
565        RateLimitResult {
566            allowed: true,
567            action: RateLimitAction::Reject,
568            status_code: 429,
569            message: None,
570            limiter: String::new(),
571            limit,
572            remaining,
573            reset_at,
574            suggested_delay_ms: None,
575            max_delay_ms: 5000, // Default max delay for allowed requests (unused)
576        }
577    }
578
579    /// Perform periodic cleanup
580    pub fn cleanup(&self) {
581        if let Some(ref global) = self.global_limiter {
582            global.cleanup();
583        }
584        for entry in self.route_limiters.iter() {
585            entry.value().cleanup();
586        }
587    }
588
589    /// Get the number of registered route limiters
590    pub fn route_count(&self) -> usize {
591        self.route_limiters.len()
592    }
593
594    /// Check if any rate limiting is configured (fast path)
595    ///
596    /// Returns true if there's a global limiter or any route-specific limiters.
597    /// Use this to skip rate limit checks entirely when no limiting is configured.
598    #[inline]
599    pub fn is_enabled(&self) -> bool {
600        self.global_limiter.is_some() || !self.route_limiters.is_empty()
601    }
602
603    /// Check if a specific route has rate limiting configured (fast path)
604    #[inline]
605    pub fn has_route_limiter(&self, route_id: &str) -> bool {
606        self.global_limiter.is_some() || self.route_limiters.contains_key(route_id)
607    }
608}
609
610impl Default for RateLimitManager {
611    fn default() -> Self {
612        Self::new()
613    }
614}
615
616/// Result of a rate limit check
617#[derive(Debug, Clone)]
618pub struct RateLimitResult {
619    /// Whether the request is allowed
620    pub allowed: bool,
621    /// Action to take if limited
622    pub action: RateLimitAction,
623    /// HTTP status code for rejection
624    pub status_code: u16,
625    /// Custom message
626    pub message: Option<String>,
627    /// Which limiter triggered (for logging)
628    pub limiter: String,
629    /// Maximum requests allowed per window
630    pub limit: u32,
631    /// Remaining requests in current window
632    pub remaining: u32,
633    /// Unix timestamp (seconds) when the window resets
634    pub reset_at: u64,
635    /// Suggested delay in milliseconds (for Delay action)
636    pub suggested_delay_ms: Option<u64>,
637    /// Maximum delay in milliseconds (configured cap for Delay action)
638    pub max_delay_ms: u64,
639}
640
641#[cfg(test)]
642mod tests {
643    use super::*;
644
645    #[test]
646    fn test_rate_limiter_allows_under_limit() {
647        let config = RateLimitConfig {
648            max_rps: 10,
649            burst: 5,
650            key: RateLimitKey::ClientIp,
651            ..Default::default()
652        };
653        let pool = RateLimiterPool::new(config);
654
655        // Should allow first 10 requests
656        for i in 0..10 {
657            let info = pool.check("127.0.0.1");
658            assert_eq!(info.outcome, RateLimitOutcome::Allowed);
659            assert_eq!(info.limit, 10);
660            assert_eq!(info.remaining, 10 - i - 1);
661        }
662    }
663
664    #[test]
665    fn test_rate_limiter_blocks_over_limit() {
666        let config = RateLimitConfig {
667            max_rps: 5,
668            burst: 2,
669            key: RateLimitKey::ClientIp,
670            ..Default::default()
671        };
672        let pool = RateLimiterPool::new(config);
673
674        // Should allow first 5 requests
675        for _ in 0..5 {
676            let info = pool.check("127.0.0.1");
677            assert_eq!(info.outcome, RateLimitOutcome::Allowed);
678        }
679
680        // 6th request should be limited
681        let info = pool.check("127.0.0.1");
682        assert_eq!(info.outcome, RateLimitOutcome::Limited);
683        assert_eq!(info.remaining, 0);
684    }
685
686    #[test]
687    fn test_rate_limiter_separate_keys() {
688        let config = RateLimitConfig {
689            max_rps: 2,
690            burst: 1,
691            key: RateLimitKey::ClientIp,
692            ..Default::default()
693        };
694        let pool = RateLimiterPool::new(config);
695
696        // Each IP gets its own bucket
697        let info1 = pool.check("192.168.1.1");
698        let info2 = pool.check("192.168.1.2");
699        let info3 = pool.check("192.168.1.1");
700        let info4 = pool.check("192.168.1.2");
701
702        assert_eq!(info1.outcome, RateLimitOutcome::Allowed);
703        assert_eq!(info2.outcome, RateLimitOutcome::Allowed);
704        assert_eq!(info3.outcome, RateLimitOutcome::Allowed);
705        assert_eq!(info4.outcome, RateLimitOutcome::Allowed);
706
707        // Both should hit limit now
708        let info5 = pool.check("192.168.1.1");
709        let info6 = pool.check("192.168.1.2");
710
711        assert_eq!(info5.outcome, RateLimitOutcome::Limited);
712        assert_eq!(info6.outcome, RateLimitOutcome::Limited);
713    }
714
715    #[test]
716    fn test_rate_limit_info_fields() {
717        let config = RateLimitConfig {
718            max_rps: 5,
719            burst: 2,
720            key: RateLimitKey::ClientIp,
721            ..Default::default()
722        };
723        let pool = RateLimiterPool::new(config);
724
725        let info = pool.check("10.0.0.1");
726        assert_eq!(info.limit, 5);
727        assert_eq!(info.remaining, 4); // 5 - 1 = 4
728        assert!(info.reset_at > 0);
729        assert_eq!(info.outcome, RateLimitOutcome::Allowed);
730    }
731
732    #[test]
733    fn test_rate_limit_manager() {
734        let manager = RateLimitManager::new();
735
736        manager.register_route(
737            "api",
738            RateLimitConfig {
739                max_rps: 5,
740                burst: 2,
741                key: RateLimitKey::ClientIp,
742                ..Default::default()
743            },
744        );
745
746        // Route without limiter should always pass (no rate limit info)
747        let result = manager.check("web", "127.0.0.1", "/", Option::<&NoHeaders>::None);
748        assert!(result.allowed);
749        assert_eq!(result.limit, 0); // No limiter configured
750
751        // Route with limiter should enforce limits and return rate limit info
752        for i in 0..5 {
753            let result = manager.check("api", "127.0.0.1", "/api/test", Option::<&NoHeaders>::None);
754            assert!(result.allowed);
755            assert_eq!(result.limit, 5);
756            assert_eq!(result.remaining, 5 - i as u32 - 1);
757        }
758
759        let result = manager.check("api", "127.0.0.1", "/api/test", Option::<&NoHeaders>::None);
760        assert!(!result.allowed);
761        assert_eq!(result.status_code, 429);
762        assert_eq!(result.limit, 5);
763        assert_eq!(result.remaining, 0);
764        assert!(result.reset_at > 0);
765    }
766
767    #[test]
768    fn test_rate_limit_result_with_delay() {
769        let manager = RateLimitManager::new();
770
771        manager.register_route(
772            "api",
773            RateLimitConfig {
774                max_rps: 2,
775                burst: 1,
776                key: RateLimitKey::ClientIp,
777                ..Default::default()
778            },
779        );
780
781        // Use up the limit
782        manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
783        manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
784
785        // Third request should be limited with suggested delay
786        let result = manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
787        assert!(!result.allowed);
788        assert!(result.suggested_delay_ms.is_some());
789    }
790
791    // Helper type for tests that don't need header access
792    struct NoHeaders;
793    impl HeaderAccessor for NoHeaders {
794        fn get_header(&self, _name: &str) -> Option<String> {
795            None
796        }
797    }
798
799    #[test]
800    fn test_global_rate_limiter() {
801        let manager = RateLimitManager::with_global_limit(3, 1);
802
803        // Global limiter should apply to all routes
804        for i in 0..3 {
805            let result = manager.check("any-route", "127.0.0.1", "/", Option::<&NoHeaders>::None);
806            assert!(result.allowed, "Request {} should be allowed", i);
807            assert_eq!(result.limit, 3);
808            assert_eq!(result.remaining, 3 - i as u32 - 1);
809        }
810
811        // 4th request should be blocked by global limiter
812        let result = manager.check(
813            "different-route",
814            "127.0.0.1",
815            "/",
816            Option::<&NoHeaders>::None,
817        );
818        assert!(!result.allowed);
819        assert_eq!(result.limiter, "global");
820    }
821
822    #[test]
823    fn test_global_and_route_limiters() {
824        let manager = RateLimitManager::with_global_limit(10, 5);
825
826        // Register a more restrictive route limiter
827        manager.register_route(
828            "strict-api",
829            RateLimitConfig {
830                max_rps: 2,
831                burst: 1,
832                key: RateLimitKey::ClientIp,
833                ..Default::default()
834            },
835        );
836
837        // Route limiter should trigger first (more restrictive)
838        let result1 = manager.check("strict-api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
839        let result2 = manager.check("strict-api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
840        assert!(result1.allowed);
841        assert!(result2.allowed);
842
843        // 3rd request should be blocked by route limiter
844        let result3 = manager.check("strict-api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
845        assert!(!result3.allowed);
846        assert_eq!(result3.limiter, "strict-api");
847
848        // Different route should still work (global not exhausted)
849        let result4 = manager.check("other-route", "127.0.0.1", "/", Option::<&NoHeaders>::None);
850        assert!(result4.allowed);
851    }
852
853    #[test]
854    fn test_suggested_delay_calculation() {
855        let manager = RateLimitManager::new();
856
857        manager.register_route(
858            "api",
859            RateLimitConfig {
860                max_rps: 10,
861                burst: 5,
862                key: RateLimitKey::ClientIp,
863                ..Default::default()
864            },
865        );
866
867        // Exhaust the limit
868        for _ in 0..10 {
869            manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
870        }
871
872        // Requests over limit should have suggested delay
873        let result = manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
874        assert!(!result.allowed);
875        assert!(result.suggested_delay_ms.is_some());
876
877        // Delay should be proportional to how far over limit
878        // Formula: (excess * 1000) / limit
879        // With 1 excess request and limit of 10: (1 * 1000) / 10 = 100ms
880        let delay = result.suggested_delay_ms.unwrap();
881        assert!(delay > 0, "Delay should be positive");
882        assert!(delay <= 1000, "Delay should be reasonable");
883    }
884
885    #[test]
886    fn test_reset_timestamp_is_future() {
887        let config = RateLimitConfig {
888            max_rps: 5,
889            burst: 2,
890            key: RateLimitKey::ClientIp,
891            ..Default::default()
892        };
893        let pool = RateLimiterPool::new(config);
894
895        let info = pool.check("10.0.0.1");
896        let now = std::time::SystemTime::now()
897            .duration_since(std::time::UNIX_EPOCH)
898            .unwrap()
899            .as_secs();
900
901        // Reset timestamp should be in the future (within the next second)
902        assert!(info.reset_at >= now, "Reset time should be >= now");
903        assert!(
904            info.reset_at <= now + 2,
905            "Reset time should be within 2 seconds"
906        );
907    }
908
909    #[test]
910    fn test_rate_limit_check_info_remaining_clamps_to_zero() {
911        let config = RateLimitConfig {
912            max_rps: 2,
913            burst: 1,
914            key: RateLimitKey::ClientIp,
915            ..Default::default()
916        };
917        let pool = RateLimiterPool::new(config);
918
919        // Exhaust the limit
920        pool.check("10.0.0.1");
921        pool.check("10.0.0.1");
922
923        // Over-limit requests should show remaining as 0, not negative
924        let info = pool.check("10.0.0.1");
925        assert_eq!(info.remaining, 0);
926        assert_eq!(info.outcome, RateLimitOutcome::Limited);
927    }
928
929    #[test]
930    fn test_rate_limit_result_fields() {
931        // Create a result by checking a rate limited request
932        let manager = RateLimitManager::new();
933        manager.register_route(
934            "test",
935            RateLimitConfig {
936                max_rps: 1,
937                burst: 1,
938                key: RateLimitKey::ClientIp,
939                ..Default::default()
940            },
941        );
942
943        // First request allowed
944        let allowed_result = manager.check("test", "127.0.0.1", "/", Option::<&NoHeaders>::None);
945        assert!(allowed_result.allowed);
946        assert_eq!(allowed_result.limit, 1);
947        assert!(allowed_result.reset_at > 0);
948
949        // Second request should be blocked
950        let blocked_result = manager.check("test", "127.0.0.1", "/", Option::<&NoHeaders>::None);
951        assert!(!blocked_result.allowed);
952        assert_eq!(blocked_result.status_code, 429);
953        assert_eq!(blocked_result.remaining, 0);
954    }
955
956    #[test]
957    fn test_has_route_limiter() {
958        let manager = RateLimitManager::new();
959        assert!(!manager.has_route_limiter("test-route"));
960
961        manager.register_route(
962            "test-route",
963            RateLimitConfig {
964                max_rps: 10,
965                burst: 5,
966                key: RateLimitKey::ClientIp,
967                ..Default::default()
968            },
969        );
970        assert!(manager.has_route_limiter("test-route"));
971        assert!(!manager.has_route_limiter("other-route"));
972    }
973
974    #[test]
975    fn test_global_limiter_is_enabled() {
976        let manager = RateLimitManager::with_global_limit(100, 50);
977        // Global limiter should be enabled
978        assert!(manager.is_enabled());
979    }
980
981    #[test]
982    fn test_is_enabled() {
983        let empty_manager = RateLimitManager::new();
984        assert!(!empty_manager.is_enabled());
985
986        let global_manager = RateLimitManager::with_global_limit(100, 50);
987        assert!(global_manager.is_enabled());
988
989        let route_manager = RateLimitManager::new();
990        route_manager.register_route(
991            "test",
992            RateLimitConfig {
993                max_rps: 10,
994                burst: 5,
995                key: RateLimitKey::ClientIp,
996                ..Default::default()
997            },
998        );
999        assert!(route_manager.is_enabled());
1000    }
1001}