Skip to main content

grapsus_proxy/
rate_limit.rs

1//! Rate limiting using pingora-limits
2//!
3//! This module provides efficient per-route, per-client rate limiting using
4//! Pingora's optimized rate limiting primitives. Supports both local (single-instance)
5//! and distributed (Redis-backed) rate limiting.
6//!
7//! # Local Rate Limiting
8//!
9//! Uses `pingora-limits::Rate` for efficient in-memory rate limiting.
10//! Suitable for single-instance deployments.
11//!
12//! # Distributed Rate Limiting
13//!
14//! Uses Redis sorted sets for sliding window rate limiting across multiple instances.
15//! Requires the `distributed-rate-limit` feature.
16
17use dashmap::DashMap;
18use parking_lot::RwLock;
19use pingora_limits::rate::Rate;
20use std::sync::Arc;
21use std::time::{Duration, SystemTime, UNIX_EPOCH};
22use tracing::{debug, trace, warn};
23
24use grapsus_config::{RateLimitAction, RateLimitBackend, RateLimitKey};
25
26#[cfg(feature = "distributed-rate-limit")]
27use crate::distributed_rate_limit::{create_redis_rate_limiter, RedisRateLimiter};
28
29/// Rate limiter outcome
30#[derive(Debug, Clone, Copy, PartialEq, Eq)]
31pub enum RateLimitOutcome {
32    /// Request is allowed
33    Allowed,
34    /// Request is rate limited
35    Limited,
36}
37
38/// Detailed rate limit check result from a pool
39#[derive(Debug, Clone)]
40pub struct RateLimitCheckInfo {
41    /// Whether the request is allowed or limited
42    pub outcome: RateLimitOutcome,
43    /// Current request count in the window
44    pub current_count: i64,
45    /// Maximum requests allowed per window
46    pub limit: u32,
47    /// Remaining requests in current window (0 if over limit)
48    pub remaining: u32,
49    /// Unix timestamp (seconds) when the window resets
50    pub reset_at: u64,
51}
52
53/// Rate limiter configuration
54#[derive(Debug, Clone)]
55pub struct RateLimitConfig {
56    /// Maximum requests per second
57    pub max_rps: u32,
58    /// Burst size
59    pub burst: u32,
60    /// Key type for bucketing
61    pub key: RateLimitKey,
62    /// Action when limited
63    pub action: RateLimitAction,
64    /// HTTP status code to return when limited
65    pub status_code: u16,
66    /// Custom message
67    pub message: Option<String>,
68    /// Backend for rate limiting (local or distributed)
69    pub backend: RateLimitBackend,
70    /// Maximum delay in milliseconds for Delay action
71    pub max_delay_ms: u64,
72}
73
74impl Default for RateLimitConfig {
75    fn default() -> Self {
76        Self {
77            max_rps: 100,
78            burst: 10,
79            key: RateLimitKey::ClientIp,
80            action: RateLimitAction::Reject,
81            status_code: 429,
82            message: None,
83            backend: RateLimitBackend::Local,
84            max_delay_ms: 5000,
85        }
86    }
87}
88
89/// Per-key rate limiter using pingora-limits Rate
90///
91/// Uses a sliding window algorithm with 1-second granularity.
92struct KeyRateLimiter {
93    /// The rate limiter instance (tracks requests in current window)
94    rate: Rate,
95    /// Maximum requests per window
96    max_requests: isize,
97}
98
99impl KeyRateLimiter {
100    fn new(max_rps: u32) -> Self {
101        Self {
102            rate: Rate::new(Duration::from_secs(1)),
103            max_requests: max_rps as isize,
104        }
105    }
106
107    /// Check if a request should be allowed
108    fn check(&self) -> RateLimitOutcome {
109        // Rate::observe() returns the current count and whether it was a new window
110        let curr_count = self.rate.observe(&(), 1);
111
112        if curr_count > self.max_requests {
113            RateLimitOutcome::Limited
114        } else {
115            RateLimitOutcome::Allowed
116        }
117    }
118}
119
120/// Backend type for rate limiting
121pub enum RateLimitBackendType {
122    /// Local in-memory backend
123    Local {
124        /// Rate limiters by key (e.g., client IP -> limiter)
125        limiters: DashMap<String, Arc<KeyRateLimiter>>,
126    },
127    /// Distributed Redis backend
128    #[cfg(feature = "distributed-rate-limit")]
129    Distributed {
130        /// Redis rate limiter
131        redis: Arc<RedisRateLimiter>,
132        /// Local fallback
133        local_fallback: DashMap<String, Arc<KeyRateLimiter>>,
134    },
135}
136
137/// Thread-safe rate limiter pool managing multiple rate limiters by key
138pub struct RateLimiterPool {
139    /// Backend for rate limiting
140    backend: RateLimitBackendType,
141    /// Configuration
142    config: RwLock<RateLimitConfig>,
143}
144
145/// Get current unix timestamp in seconds
146fn current_unix_timestamp() -> u64 {
147    SystemTime::now()
148        .duration_since(UNIX_EPOCH)
149        .unwrap_or(Duration::ZERO)
150        .as_secs()
151}
152
153/// Calculate window reset timestamp (next second boundary for 1-second windows)
154fn calculate_reset_timestamp() -> u64 {
155    current_unix_timestamp() + 1
156}
157
158impl RateLimiterPool {
159    /// Create a new rate limiter pool with the given configuration (local backend)
160    pub fn new(config: RateLimitConfig) -> Self {
161        Self {
162            backend: RateLimitBackendType::Local {
163                limiters: DashMap::new(),
164            },
165            config: RwLock::new(config),
166        }
167    }
168
169    /// Create a new rate limiter pool with a distributed Redis backend
170    #[cfg(feature = "distributed-rate-limit")]
171    pub fn with_redis(config: RateLimitConfig, redis: Arc<RedisRateLimiter>) -> Self {
172        Self {
173            backend: RateLimitBackendType::Distributed {
174                redis,
175                local_fallback: DashMap::new(),
176            },
177            config: RwLock::new(config),
178        }
179    }
180
181    /// Check if a request should be rate limited (synchronous, local only)
182    ///
183    /// Returns detailed rate limit information including remaining quota.
184    /// For distributed backends, this falls back to local limiting.
185    pub fn check(&self, key: &str) -> RateLimitCheckInfo {
186        let config = self.config.read();
187        let max_rps = config.max_rps;
188        drop(config);
189
190        let limiters = match &self.backend {
191            RateLimitBackendType::Local { limiters } => limiters,
192            #[cfg(feature = "distributed-rate-limit")]
193            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback,
194        };
195
196        // Get or create limiter for this key
197        let limiter = limiters
198            .entry(key.to_string())
199            .or_insert_with(|| Arc::new(KeyRateLimiter::new(max_rps)))
200            .clone();
201
202        let outcome = limiter.check();
203        let count = limiter.rate.observe(&(), 0); // Get current count without incrementing
204        let remaining = if count >= max_rps as isize {
205            0
206        } else {
207            (max_rps as isize - count) as u32
208        };
209
210        RateLimitCheckInfo {
211            outcome,
212            current_count: count as i64,
213            limit: max_rps,
214            remaining,
215            reset_at: calculate_reset_timestamp(),
216        }
217    }
218
219    /// Check if a request should be rate limited (async, supports distributed backends)
220    ///
221    /// Returns detailed rate limit information including remaining quota.
222    #[cfg(feature = "distributed-rate-limit")]
223    pub async fn check_async(&self, key: &str) -> RateLimitCheckInfo {
224        let max_rps = self.config.read().max_rps;
225
226        match &self.backend {
227            RateLimitBackendType::Local { .. } => self.check(key),
228            RateLimitBackendType::Distributed {
229                redis,
230                local_fallback,
231            } => {
232                // Try Redis first
233                match redis.check(key).await {
234                    Ok((outcome, count)) => {
235                        let remaining = if count >= max_rps as i64 {
236                            0
237                        } else {
238                            (max_rps as i64 - count) as u32
239                        };
240                        RateLimitCheckInfo {
241                            outcome,
242                            current_count: count,
243                            limit: max_rps,
244                            remaining,
245                            reset_at: calculate_reset_timestamp(),
246                        }
247                    }
248                    Err(e) => {
249                        warn!(
250                            error = %e,
251                            key = key,
252                            "Redis rate limit check failed, falling back to local"
253                        );
254                        redis.mark_unhealthy();
255
256                        // Fallback to local
257                        if redis.fallback_enabled() {
258                            let limiter = local_fallback
259                                .entry(key.to_string())
260                                .or_insert_with(|| Arc::new(KeyRateLimiter::new(max_rps)))
261                                .clone();
262
263                            let outcome = limiter.check();
264                            let count = limiter.rate.observe(&(), 0);
265                            let remaining = if count >= max_rps as isize {
266                                0
267                            } else {
268                                (max_rps as isize - count) as u32
269                            };
270                            RateLimitCheckInfo {
271                                outcome,
272                                current_count: count as i64,
273                                limit: max_rps,
274                                remaining,
275                                reset_at: calculate_reset_timestamp(),
276                            }
277                        } else {
278                            // Fail open if no fallback
279                            RateLimitCheckInfo {
280                                outcome: RateLimitOutcome::Allowed,
281                                current_count: 0,
282                                limit: max_rps,
283                                remaining: max_rps,
284                                reset_at: calculate_reset_timestamp(),
285                            }
286                        }
287                    }
288                }
289            }
290        }
291    }
292
293    /// Check if this pool uses a distributed backend
294    pub fn is_distributed(&self) -> bool {
295        match &self.backend {
296            RateLimitBackendType::Local { .. } => false,
297            #[cfg(feature = "distributed-rate-limit")]
298            RateLimitBackendType::Distributed { .. } => true,
299        }
300    }
301
302    /// Get the rate limit key from request context
303    pub fn extract_key(
304        &self,
305        client_ip: &str,
306        path: &str,
307        route_id: &str,
308        headers: Option<&impl HeaderAccessor>,
309    ) -> String {
310        let config = self.config.read();
311        match &config.key {
312            RateLimitKey::ClientIp => client_ip.to_string(),
313            RateLimitKey::Path => path.to_string(),
314            RateLimitKey::Route => route_id.to_string(),
315            RateLimitKey::ClientIpAndPath => format!("{}:{}", client_ip, path),
316            RateLimitKey::Header(header_name) => headers
317                .and_then(|h| h.get_header(header_name))
318                .unwrap_or_else(|| "unknown".to_string()),
319        }
320    }
321
322    /// Get the action to take when rate limited
323    pub fn action(&self) -> RateLimitAction {
324        self.config.read().action.clone()
325    }
326
327    /// Get the HTTP status code for rate limit responses
328    pub fn status_code(&self) -> u16 {
329        self.config.read().status_code
330    }
331
332    /// Get the custom message for rate limit responses
333    pub fn message(&self) -> Option<String> {
334        self.config.read().message.clone()
335    }
336
337    /// Get the maximum delay in milliseconds for Delay action
338    pub fn max_delay_ms(&self) -> u64 {
339        self.config.read().max_delay_ms
340    }
341
342    /// Update the configuration
343    pub fn update_config(&self, config: RateLimitConfig) {
344        *self.config.write() = config;
345        // Clear existing limiters so they get recreated with new config
346        self.clear_local_limiters();
347    }
348
349    /// Clear local limiters (for config updates)
350    fn clear_local_limiters(&self) {
351        match &self.backend {
352            RateLimitBackendType::Local { limiters } => limiters.clear(),
353            #[cfg(feature = "distributed-rate-limit")]
354            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback.clear(),
355        }
356    }
357
358    /// Get the number of local limiter entries
359    fn local_limiter_count(&self) -> usize {
360        match &self.backend {
361            RateLimitBackendType::Local { limiters } => limiters.len(),
362            #[cfg(feature = "distributed-rate-limit")]
363            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback.len(),
364        }
365    }
366
367    /// Clean up expired entries (call periodically)
368    pub fn cleanup(&self) {
369        // Remove entries that haven't been accessed recently
370        // In practice, Rate handles its own window cleanup, so this is mainly
371        // for memory management when many unique keys are seen
372        let max_entries = 100_000; // Prevent unbounded growth
373
374        let limiters = match &self.backend {
375            RateLimitBackendType::Local { limiters } => limiters,
376            #[cfg(feature = "distributed-rate-limit")]
377            RateLimitBackendType::Distributed { local_fallback, .. } => local_fallback,
378        };
379
380        if limiters.len() > max_entries {
381            // Simple eviction: clear half
382            let to_remove: Vec<_> = limiters
383                .iter()
384                .take(max_entries / 2)
385                .map(|e| e.key().clone())
386                .collect();
387
388            for key in to_remove {
389                limiters.remove(&key);
390            }
391
392            debug!(
393                entries_before = max_entries,
394                entries_after = limiters.len(),
395                "Rate limiter pool cleanup completed"
396            );
397        }
398    }
399}
400
401/// Trait for accessing headers (allows abstracting over different header types)
402pub trait HeaderAccessor {
403    fn get_header(&self, name: &str) -> Option<String>;
404}
405
406/// Route-level rate limiter manager
407pub struct RateLimitManager {
408    /// Per-route rate limiter pools
409    route_limiters: DashMap<String, Arc<RateLimiterPool>>,
410    /// Global rate limiter (optional)
411    global_limiter: Option<Arc<RateLimiterPool>>,
412}
413
414impl RateLimitManager {
415    /// Create a new rate limit manager
416    pub fn new() -> Self {
417        Self {
418            route_limiters: DashMap::new(),
419            global_limiter: None,
420        }
421    }
422
423    /// Create a new rate limit manager with a global rate limit
424    pub fn with_global_limit(max_rps: u32, burst: u32) -> Self {
425        let config = RateLimitConfig {
426            max_rps,
427            burst,
428            key: RateLimitKey::ClientIp,
429            action: RateLimitAction::Reject,
430            status_code: 429,
431            message: None,
432            backend: RateLimitBackend::Local,
433            max_delay_ms: 5000,
434        };
435        Self {
436            route_limiters: DashMap::new(),
437            global_limiter: Some(Arc::new(RateLimiterPool::new(config))),
438        }
439    }
440
441    /// Register a rate limiter for a route
442    pub fn register_route(&self, route_id: &str, config: RateLimitConfig) {
443        trace!(
444            route_id = route_id,
445            max_rps = config.max_rps,
446            burst = config.burst,
447            key = ?config.key,
448            "Registering rate limiter for route"
449        );
450
451        self.route_limiters
452            .insert(route_id.to_string(), Arc::new(RateLimiterPool::new(config)));
453    }
454
455    /// Check if a request should be rate limited
456    ///
457    /// Checks both global and route-specific limits.
458    /// Returns detailed rate limit information for response headers.
459    pub fn check(
460        &self,
461        route_id: &str,
462        client_ip: &str,
463        path: &str,
464        headers: Option<&impl HeaderAccessor>,
465    ) -> RateLimitResult {
466        // Track the most restrictive limit info for headers
467        let mut best_limit_info: Option<RateLimitCheckInfo> = None;
468
469        // Check global limit first
470        if let Some(ref global) = self.global_limiter {
471            let key = global.extract_key(client_ip, path, route_id, headers);
472            let check_info = global.check(&key);
473
474            if check_info.outcome == RateLimitOutcome::Limited {
475                warn!(
476                    route_id = route_id,
477                    client_ip = client_ip,
478                    key = key,
479                    count = check_info.current_count,
480                    "Request rate limited by global limiter"
481                );
482                // Calculate suggested delay based on how far over limit
483                let suggested_delay_ms = if check_info.current_count > check_info.limit as i64 {
484                    let excess = check_info.current_count - check_info.limit as i64;
485                    Some((excess as u64 * 1000) / check_info.limit as u64)
486                } else {
487                    None
488                };
489                return RateLimitResult {
490                    allowed: false,
491                    action: global.action(),
492                    status_code: global.status_code(),
493                    message: global.message(),
494                    limiter: "global".to_string(),
495                    limit: check_info.limit,
496                    remaining: check_info.remaining,
497                    reset_at: check_info.reset_at,
498                    suggested_delay_ms,
499                    max_delay_ms: global.max_delay_ms(),
500                };
501            }
502
503            best_limit_info = Some(check_info);
504        }
505
506        // Check route-specific limit
507        if let Some(pool) = self.route_limiters.get(route_id) {
508            let key = pool.extract_key(client_ip, path, route_id, headers);
509            let check_info = pool.check(&key);
510
511            if check_info.outcome == RateLimitOutcome::Limited {
512                warn!(
513                    route_id = route_id,
514                    client_ip = client_ip,
515                    key = key,
516                    count = check_info.current_count,
517                    "Request rate limited by route limiter"
518                );
519                // Calculate suggested delay based on how far over limit
520                let suggested_delay_ms = if check_info.current_count > check_info.limit as i64 {
521                    let excess = check_info.current_count - check_info.limit as i64;
522                    Some((excess as u64 * 1000) / check_info.limit as u64)
523                } else {
524                    None
525                };
526                return RateLimitResult {
527                    allowed: false,
528                    action: pool.action(),
529                    status_code: pool.status_code(),
530                    message: pool.message(),
531                    limiter: route_id.to_string(),
532                    limit: check_info.limit,
533                    remaining: check_info.remaining,
534                    reset_at: check_info.reset_at,
535                    suggested_delay_ms,
536                    max_delay_ms: pool.max_delay_ms(),
537                };
538            }
539
540            trace!(
541                route_id = route_id,
542                key = key,
543                count = check_info.current_count,
544                remaining = check_info.remaining,
545                "Request allowed by rate limiter"
546            );
547
548            // Use the more restrictive limit info (lower remaining)
549            if let Some(ref existing) = best_limit_info {
550                if check_info.remaining < existing.remaining {
551                    best_limit_info = Some(check_info);
552                }
553            } else {
554                best_limit_info = Some(check_info);
555            }
556        }
557
558        // Return allowed with rate limit info for headers
559        let (limit, remaining, reset_at) = best_limit_info
560            .map(|info| (info.limit, info.remaining, info.reset_at))
561            .unwrap_or((0, 0, 0));
562
563        RateLimitResult {
564            allowed: true,
565            action: RateLimitAction::Reject,
566            status_code: 429,
567            message: None,
568            limiter: String::new(),
569            limit,
570            remaining,
571            reset_at,
572            suggested_delay_ms: None,
573            max_delay_ms: 5000, // Default max delay for allowed requests (unused)
574        }
575    }
576
577    /// Perform periodic cleanup
578    pub fn cleanup(&self) {
579        if let Some(ref global) = self.global_limiter {
580            global.cleanup();
581        }
582        for entry in self.route_limiters.iter() {
583            entry.value().cleanup();
584        }
585    }
586
587    /// Get the number of registered route limiters
588    pub fn route_count(&self) -> usize {
589        self.route_limiters.len()
590    }
591
592    /// Check if any rate limiting is configured (fast path)
593    ///
594    /// Returns true if there's a global limiter or any route-specific limiters.
595    /// Use this to skip rate limit checks entirely when no limiting is configured.
596    #[inline]
597    pub fn is_enabled(&self) -> bool {
598        self.global_limiter.is_some() || !self.route_limiters.is_empty()
599    }
600
601    /// Check if a specific route has rate limiting configured (fast path)
602    #[inline]
603    pub fn has_route_limiter(&self, route_id: &str) -> bool {
604        self.global_limiter.is_some() || self.route_limiters.contains_key(route_id)
605    }
606}
607
608impl Default for RateLimitManager {
609    fn default() -> Self {
610        Self::new()
611    }
612}
613
614/// Result of a rate limit check
615#[derive(Debug, Clone)]
616pub struct RateLimitResult {
617    /// Whether the request is allowed
618    pub allowed: bool,
619    /// Action to take if limited
620    pub action: RateLimitAction,
621    /// HTTP status code for rejection
622    pub status_code: u16,
623    /// Custom message
624    pub message: Option<String>,
625    /// Which limiter triggered (for logging)
626    pub limiter: String,
627    /// Maximum requests allowed per window
628    pub limit: u32,
629    /// Remaining requests in current window
630    pub remaining: u32,
631    /// Unix timestamp (seconds) when the window resets
632    pub reset_at: u64,
633    /// Suggested delay in milliseconds (for Delay action)
634    pub suggested_delay_ms: Option<u64>,
635    /// Maximum delay in milliseconds (configured cap for Delay action)
636    pub max_delay_ms: u64,
637}
638
639#[cfg(test)]
640mod tests {
641    use super::*;
642
643    #[test]
644    fn test_rate_limiter_allows_under_limit() {
645        let config = RateLimitConfig {
646            max_rps: 10,
647            burst: 5,
648            key: RateLimitKey::ClientIp,
649            ..Default::default()
650        };
651        let pool = RateLimiterPool::new(config);
652
653        // Should allow first 10 requests
654        for i in 0..10 {
655            let info = pool.check("127.0.0.1");
656            assert_eq!(info.outcome, RateLimitOutcome::Allowed);
657            assert_eq!(info.limit, 10);
658            assert_eq!(info.remaining, 10 - i - 1);
659        }
660    }
661
662    #[test]
663    fn test_rate_limiter_blocks_over_limit() {
664        let config = RateLimitConfig {
665            max_rps: 5,
666            burst: 2,
667            key: RateLimitKey::ClientIp,
668            ..Default::default()
669        };
670        let pool = RateLimiterPool::new(config);
671
672        // Should allow first 5 requests
673        for _ in 0..5 {
674            let info = pool.check("127.0.0.1");
675            assert_eq!(info.outcome, RateLimitOutcome::Allowed);
676        }
677
678        // 6th request should be limited
679        let info = pool.check("127.0.0.1");
680        assert_eq!(info.outcome, RateLimitOutcome::Limited);
681        assert_eq!(info.remaining, 0);
682    }
683
684    #[test]
685    fn test_rate_limiter_separate_keys() {
686        let config = RateLimitConfig {
687            max_rps: 2,
688            burst: 1,
689            key: RateLimitKey::ClientIp,
690            ..Default::default()
691        };
692        let pool = RateLimiterPool::new(config);
693
694        // Each IP gets its own bucket
695        let info1 = pool.check("192.168.1.1");
696        let info2 = pool.check("192.168.1.2");
697        let info3 = pool.check("192.168.1.1");
698        let info4 = pool.check("192.168.1.2");
699
700        assert_eq!(info1.outcome, RateLimitOutcome::Allowed);
701        assert_eq!(info2.outcome, RateLimitOutcome::Allowed);
702        assert_eq!(info3.outcome, RateLimitOutcome::Allowed);
703        assert_eq!(info4.outcome, RateLimitOutcome::Allowed);
704
705        // Both should hit limit now
706        let info5 = pool.check("192.168.1.1");
707        let info6 = pool.check("192.168.1.2");
708
709        assert_eq!(info5.outcome, RateLimitOutcome::Limited);
710        assert_eq!(info6.outcome, RateLimitOutcome::Limited);
711    }
712
713    #[test]
714    fn test_rate_limit_info_fields() {
715        let config = RateLimitConfig {
716            max_rps: 5,
717            burst: 2,
718            key: RateLimitKey::ClientIp,
719            ..Default::default()
720        };
721        let pool = RateLimiterPool::new(config);
722
723        let info = pool.check("10.0.0.1");
724        assert_eq!(info.limit, 5);
725        assert_eq!(info.remaining, 4); // 5 - 1 = 4
726        assert!(info.reset_at > 0);
727        assert_eq!(info.outcome, RateLimitOutcome::Allowed);
728    }
729
730    #[test]
731    fn test_rate_limit_manager() {
732        let manager = RateLimitManager::new();
733
734        manager.register_route(
735            "api",
736            RateLimitConfig {
737                max_rps: 5,
738                burst: 2,
739                key: RateLimitKey::ClientIp,
740                ..Default::default()
741            },
742        );
743
744        // Route without limiter should always pass (no rate limit info)
745        let result = manager.check("web", "127.0.0.1", "/", Option::<&NoHeaders>::None);
746        assert!(result.allowed);
747        assert_eq!(result.limit, 0); // No limiter configured
748
749        // Route with limiter should enforce limits and return rate limit info
750        for i in 0..5 {
751            let result = manager.check("api", "127.0.0.1", "/api/test", Option::<&NoHeaders>::None);
752            assert!(result.allowed);
753            assert_eq!(result.limit, 5);
754            assert_eq!(result.remaining, 5 - i as u32 - 1);
755        }
756
757        let result = manager.check("api", "127.0.0.1", "/api/test", Option::<&NoHeaders>::None);
758        assert!(!result.allowed);
759        assert_eq!(result.status_code, 429);
760        assert_eq!(result.limit, 5);
761        assert_eq!(result.remaining, 0);
762        assert!(result.reset_at > 0);
763    }
764
765    #[test]
766    fn test_rate_limit_result_with_delay() {
767        let manager = RateLimitManager::new();
768
769        manager.register_route(
770            "api",
771            RateLimitConfig {
772                max_rps: 2,
773                burst: 1,
774                key: RateLimitKey::ClientIp,
775                ..Default::default()
776            },
777        );
778
779        // Use up the limit
780        manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
781        manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
782
783        // Third request should be limited with suggested delay
784        let result = manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
785        assert!(!result.allowed);
786        assert!(result.suggested_delay_ms.is_some());
787    }
788
789    // Helper type for tests that don't need header access
790    struct NoHeaders;
791    impl HeaderAccessor for NoHeaders {
792        fn get_header(&self, _name: &str) -> Option<String> {
793            None
794        }
795    }
796
797    #[test]
798    fn test_global_rate_limiter() {
799        let manager = RateLimitManager::with_global_limit(3, 1);
800
801        // Global limiter should apply to all routes
802        for i in 0..3 {
803            let result = manager.check("any-route", "127.0.0.1", "/", Option::<&NoHeaders>::None);
804            assert!(result.allowed, "Request {} should be allowed", i);
805            assert_eq!(result.limit, 3);
806            assert_eq!(result.remaining, 3 - i as u32 - 1);
807        }
808
809        // 4th request should be blocked by global limiter
810        let result = manager.check(
811            "different-route",
812            "127.0.0.1",
813            "/",
814            Option::<&NoHeaders>::None,
815        );
816        assert!(!result.allowed);
817        assert_eq!(result.limiter, "global");
818    }
819
820    #[test]
821    fn test_global_and_route_limiters() {
822        let manager = RateLimitManager::with_global_limit(10, 5);
823
824        // Register a more restrictive route limiter
825        manager.register_route(
826            "strict-api",
827            RateLimitConfig {
828                max_rps: 2,
829                burst: 1,
830                key: RateLimitKey::ClientIp,
831                ..Default::default()
832            },
833        );
834
835        // Route limiter should trigger first (more restrictive)
836        let result1 = manager.check("strict-api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
837        let result2 = manager.check("strict-api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
838        assert!(result1.allowed);
839        assert!(result2.allowed);
840
841        // 3rd request should be blocked by route limiter
842        let result3 = manager.check("strict-api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
843        assert!(!result3.allowed);
844        assert_eq!(result3.limiter, "strict-api");
845
846        // Different route should still work (global not exhausted)
847        let result4 = manager.check("other-route", "127.0.0.1", "/", Option::<&NoHeaders>::None);
848        assert!(result4.allowed);
849    }
850
851    #[test]
852    fn test_suggested_delay_calculation() {
853        let manager = RateLimitManager::new();
854
855        manager.register_route(
856            "api",
857            RateLimitConfig {
858                max_rps: 10,
859                burst: 5,
860                key: RateLimitKey::ClientIp,
861                ..Default::default()
862            },
863        );
864
865        // Exhaust the limit
866        for _ in 0..10 {
867            manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
868        }
869
870        // Requests over limit should have suggested delay
871        let result = manager.check("api", "127.0.0.1", "/", Option::<&NoHeaders>::None);
872        assert!(!result.allowed);
873        assert!(result.suggested_delay_ms.is_some());
874
875        // Delay should be proportional to how far over limit
876        // Formula: (excess * 1000) / limit
877        // With 1 excess request and limit of 10: (1 * 1000) / 10 = 100ms
878        let delay = result.suggested_delay_ms.unwrap();
879        assert!(delay > 0, "Delay should be positive");
880        assert!(delay <= 1000, "Delay should be reasonable");
881    }
882
883    #[test]
884    fn test_reset_timestamp_is_future() {
885        let config = RateLimitConfig {
886            max_rps: 5,
887            burst: 2,
888            key: RateLimitKey::ClientIp,
889            ..Default::default()
890        };
891        let pool = RateLimiterPool::new(config);
892
893        let info = pool.check("10.0.0.1");
894        let now = std::time::SystemTime::now()
895            .duration_since(std::time::UNIX_EPOCH)
896            .unwrap()
897            .as_secs();
898
899        // Reset timestamp should be in the future (within the next second)
900        assert!(info.reset_at >= now, "Reset time should be >= now");
901        assert!(
902            info.reset_at <= now + 2,
903            "Reset time should be within 2 seconds"
904        );
905    }
906
907    #[test]
908    fn test_rate_limit_check_info_remaining_clamps_to_zero() {
909        let config = RateLimitConfig {
910            max_rps: 2,
911            burst: 1,
912            key: RateLimitKey::ClientIp,
913            ..Default::default()
914        };
915        let pool = RateLimiterPool::new(config);
916
917        // Exhaust the limit
918        pool.check("10.0.0.1");
919        pool.check("10.0.0.1");
920
921        // Over-limit requests should show remaining as 0, not negative
922        let info = pool.check("10.0.0.1");
923        assert_eq!(info.remaining, 0);
924        assert_eq!(info.outcome, RateLimitOutcome::Limited);
925    }
926
927    #[test]
928    fn test_rate_limit_result_fields() {
929        // Create a result by checking a rate limited request
930        let manager = RateLimitManager::new();
931        manager.register_route(
932            "test",
933            RateLimitConfig {
934                max_rps: 1,
935                burst: 1,
936                key: RateLimitKey::ClientIp,
937                ..Default::default()
938            },
939        );
940
941        // First request allowed
942        let allowed_result = manager.check("test", "127.0.0.1", "/", Option::<&NoHeaders>::None);
943        assert!(allowed_result.allowed);
944        assert_eq!(allowed_result.limit, 1);
945        assert!(allowed_result.reset_at > 0);
946
947        // Second request should be blocked
948        let blocked_result = manager.check("test", "127.0.0.1", "/", Option::<&NoHeaders>::None);
949        assert!(!blocked_result.allowed);
950        assert_eq!(blocked_result.status_code, 429);
951        assert_eq!(blocked_result.remaining, 0);
952    }
953
954    #[test]
955    fn test_has_route_limiter() {
956        let manager = RateLimitManager::new();
957        assert!(!manager.has_route_limiter("test-route"));
958
959        manager.register_route(
960            "test-route",
961            RateLimitConfig {
962                max_rps: 10,
963                burst: 5,
964                key: RateLimitKey::ClientIp,
965                ..Default::default()
966            },
967        );
968        assert!(manager.has_route_limiter("test-route"));
969        assert!(!manager.has_route_limiter("other-route"));
970    }
971
972    #[test]
973    fn test_global_limiter_is_enabled() {
974        let manager = RateLimitManager::with_global_limit(100, 50);
975        // Global limiter should be enabled
976        assert!(manager.is_enabled());
977    }
978
979    #[test]
980    fn test_is_enabled() {
981        let empty_manager = RateLimitManager::new();
982        assert!(!empty_manager.is_enabled());
983
984        let global_manager = RateLimitManager::with_global_limit(100, 50);
985        assert!(global_manager.is_enabled());
986
987        let route_manager = RateLimitManager::new();
988        route_manager.register_route(
989            "test",
990            RateLimitConfig {
991                max_rps: 10,
992                burst: 5,
993                key: RateLimitKey::ClientIp,
994                ..Default::default()
995            },
996        );
997        assert!(route_manager.is_enabled());
998    }
999}