ig_client/utils/
rate_limiter.rs

1// Rate limiter for API requests
2// This module provides utilities to prevent hitting IG Markets API rate limits
3
4use crate::constants::{BASE_DELAY_MS, SAFETY_BUFFER_MS};
5use serde::{Deserialize, Serialize};
6use std::collections::VecDeque;
7use std::sync::Arc;
8use std::time::{Duration, Instant};
9use tokio::sync::Mutex;
10use tokio::time::sleep;
11use tracing::{debug, info, warn};
12
13/// Rate limiter type for different API endpoints with their respective limits
14///
15/// These limits are based on the official IG Markets API documentation:
16/// - Per-app non-trading requests per minute: 60
17/// - Per-account trading requests per minute: 100 (Applies to create/amend position or working order requests)
18/// - Per-account non-trading requests per minute: 30
19/// - Historical price data points per week: 10,000 (Applies to price history endpoints)
20#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
21pub enum RateLimitType {
22    /// Non-trading requests (per-account): 30 per minute
23    NonTradingAccount,
24    /// Trading requests (per-account): 100 per minute (create/amend position or working order requests)
25    TradingAccount,
26    /// Non-trading requests (per-app): 60 per minute
27    NonTradingApp,
28    /// Historical price data: 10,000 points per week (price history endpoints)
29    HistoricalPrice,
30    /// One request per second (special rate limit to prevent bursts)
31    OnePerSecond,
32}
33
34impl RateLimitType {
35    /// Gets the request limit per time window
36    pub fn request_limit(&self) -> usize {
37        match self {
38            Self::NonTradingAccount => 30,  // 30 requests per minute
39            Self::TradingAccount => 100,    // 100 requests per minute
40            Self::NonTradingApp => 60,      // 60 requests per minute
41            Self::HistoricalPrice => 10000, // 10,000 points per week
42            Self::OnePerSecond => 1,        // 1 request per second
43        }
44    }
45
46    /// Gets the time window in milliseconds
47    pub fn time_window_ms(&self) -> u64 {
48        match self {
49            Self::NonTradingAccount => 60_000,    // 1 minute
50            Self::TradingAccount => 60_000,       // 1 minute
51            Self::NonTradingApp => 60_000,        // 1 minute
52            Self::HistoricalPrice => 604_800_000, // 1 week
53            Self::OnePerSecond => 1_000,          // 1 second
54        }
55    }
56
57    /// Gets a description of the rate limit
58    pub fn description(&self) -> String {
59        match self {
60            Self::NonTradingAccount => "30 requests per minute (per account)".to_string(),
61            Self::TradingAccount => "100 requests per minute (per account)".to_string(),
62            Self::NonTradingApp => "60 requests per minute (per app)".to_string(),
63            Self::HistoricalPrice => "10,000 points per week".to_string(),
64            Self::OnePerSecond => "1 request per second".to_string(),
65        }
66    }
67}
68
69/// Advanced rate limiter for API calls that maintains a request history
70#[derive(Debug)]
71pub struct RateLimiter {
72    /// History of request timestamps
73    request_history: Mutex<VecDeque<Instant>>,
74    /// Type of rate limit to enforce
75    limit_type: RateLimitType,
76    /// Whether to apply a safety margin to the rate limit
77    safety_margin: f64,
78}
79
80impl RateLimiter {
81    /// Creates a new rate limiter with the specified limit type
82    pub fn new(limit_type: RateLimitType) -> Self {
83        RateLimiter {
84            request_history: Mutex::new(VecDeque::new()),
85            limit_type,
86            safety_margin: 1.0,
87        }
88    }
89
90    /// Creates a new rate limiter with a custom safety margin
91    ///
92    /// # Arguments
93    ///
94    /// * `safety_margin` - A value between 0.0 and 1.0 representing the percentage of the actual limit to use
95    ///   (e.g., 0.8 means use 80% of the actual limit)
96    pub fn with_safety_margin(&mut self, safety_margin: f64) -> Self {
97        let safety_margin = safety_margin.clamp(0.1, 1.0);
98        Self {
99            request_history: Mutex::new(VecDeque::new()),
100            limit_type: self.limit_type,
101            safety_margin,
102        }
103    }
104
105    /// Returns the rate limit type for this limiter
106    pub fn limit_type(&self) -> RateLimitType {
107        self.limit_type
108    }
109
110    /// Returns the effective request limit (after applying safety margin)
111    pub fn effective_limit(&self) -> usize {
112        let raw_limit = self.limit_type.request_limit();
113        (raw_limit as f64 * self.safety_margin).floor() as usize
114    }
115
116    /// Removes expired requests from the history
117    async fn cleanup_history(&self, now: Instant) {
118        let mut history = self.request_history.lock().await;
119        let window_duration = Duration::from_millis(self.limit_type.time_window_ms());
120
121        // Remove requests that are older than the time window
122        while let Some(oldest) = history.front() {
123            if now.duration_since(*oldest) >= window_duration {
124                history.pop_front();
125            } else {
126                break;
127            }
128        }
129    }
130
131    /// Gets the current number of requests in the time window
132    pub async fn current_request_count(&self) -> usize {
133        let history = self.request_history.lock().await;
134        history.len()
135    }
136
137    /// Gets the time until the next request can be made (in milliseconds)
138    /// Returns 0 if a request can be made immediately
139    pub async fn time_until_next_request_ms(&self) -> u64 {
140        let now = Instant::now();
141        self.cleanup_history(now).await;
142
143        // Use async lock to avoid blocking the thread
144        let history = self.request_history.lock().await;
145        let effective_limit = self.effective_limit();
146
147        // Be more conservative: leave a safety margin for concurrent requests
148        // This is especially important in recursive or concurrent contexts
149        let usage_threshold = effective_limit.saturating_sub(2);
150
151        if history.len() < usage_threshold {
152            // We're well below the limit, no need to wait
153            return 0;
154        }
155
156        // If we're close to the limit but haven't reached it, add a small delay
157        // to prevent multiple concurrent requests from exceeding the limit
158        if history.len() < effective_limit {
159            // Add a small delay proportional to how close we are to the limit
160            let proximity_factor = (history.len() as f64) / (effective_limit as f64);
161            return (BASE_DELAY_MS as f64 * proximity_factor * proximity_factor).round() as u64;
162        }
163
164        // We're at the limit, need to wait for the oldest request to expire
165        if let Some(oldest) = history.front() {
166            let window_duration = Duration::from_millis(self.limit_type.time_window_ms());
167            let time_since_oldest = now.duration_since(*oldest);
168
169            if time_since_oldest < window_duration {
170                // Calculate how long until the oldest request expires
171                let wait_time = window_duration.saturating_sub(time_since_oldest);
172                // Add a buffer for extra safety
173                return wait_time.as_millis() as u64 + SAFETY_BUFFER_MS;
174            }
175        }
176
177        0 // Should never reach here after cleanup, but just in case
178    }
179
180    /// Records a new request in the history
181    async fn record_request(&self) {
182        let now = Instant::now();
183        let mut history = self.request_history.lock().await;
184        history.push_back(now);
185    }
186
187    /// Notifies the rate limiter that a rate limit error has been encountered
188    /// This will cause the rate limiter to enforce a mandatory cooldown period
189    pub async fn notify_rate_limit_exceeded(&self) {
190        // Add multiple "fake" requests to the history to force a cooldown
191        let now = Instant::now();
192        let mut history = self.request_history.lock().await;
193
194        // Clear the history and add enough requests to reach the limit
195        // This ensures we'll enforce a full cooldown period
196        history.clear();
197
198        // Add enough requests to reach the limit
199        let limit = self.effective_limit();
200        for _ in 0..limit {
201            history.push_back(now);
202        }
203
204        warn!(
205            "Rate limit exceeded! Enforcing mandatory cooldown period for {:?}",
206            self.limit_type
207        );
208    }
209
210    /// Waits if necessary to respect the rate limit
211    /// This method is thread-safe and can be called from multiple threads concurrently
212    pub async fn wait(&self) {
213        // Register the request BEFORE waiting
214        // This is crucial to prevent multiple concurrent requests from exceeding the rate limit
215        self.record_request().await;
216
217        // Now calculate the wait time based on the updated history
218        let wait_time = self.time_until_next_request_ms().await;
219
220        if wait_time > 0 {
221            info!(
222                "Rate limiter ({:?}): waiting for {}ms ({}/{} requests used in window)",
223                self.limit_type,
224                wait_time,
225                self.current_request_count().await,
226                self.effective_limit()
227            );
228            sleep(Duration::from_millis(wait_time)).await;
229        } else {
230            debug!(
231                "Rate limiter ({:?}): no wait needed ({}/{} requests used)",
232                self.limit_type,
233                self.current_request_count().await,
234                self.effective_limit()
235            );
236        }
237    }
238
239    /// Gets statistics about the current rate limit usage
240    pub async fn get_stats(&self) -> RateLimiterStats {
241        let now = Instant::now();
242        self.cleanup_history(now).await;
243
244        let history = self.request_history.lock().await;
245        let count = history.len();
246        let limit = self.effective_limit();
247        let usage_percent = if limit > 0 {
248            (count as f64 / limit as f64) * 100.0
249        } else {
250            0.0
251        };
252
253        RateLimiterStats {
254            limit_type: self.limit_type,
255            request_count: count,
256            effective_limit: limit,
257            usage_percent,
258        }
259    }
260}
261
262/// Statistics about the rate limiter usage
263#[derive(Debug)]
264pub struct RateLimiterStats {
265    /// Type of rate limit
266    pub limit_type: RateLimitType,
267    /// Current number of requests in the time window
268    pub request_count: usize,
269    /// Effective limit (raw limit * safety margin)
270    pub effective_limit: usize,
271    /// Usage percentage (current / effective limit)
272    pub usage_percent: f64,
273}
274
275impl std::fmt::Display for RateLimiterStats {
276    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
277        write!(
278            f,
279            "RateLimiter({:?}): {}/{} requests ({:.1}%), window: {}ms",
280            self.limit_type,
281            self.request_count,
282            self.effective_limit,
283            self.usage_percent,
284            self.limit_type.time_window_ms()
285        )
286    }
287}
288
289/// Global rate limiter for non-trading account requests (30 per minute)
290pub fn account_non_trading_limiter() -> Arc<RateLimiter> {
291    static INSTANCE: once_cell::sync::Lazy<Arc<RateLimiter>> = once_cell::sync::Lazy::new(|| {
292        let mut limiter = RateLimiter::new(RateLimitType::NonTradingAccount);
293        Arc::new(limiter.with_safety_margin(0.8))
294    });
295
296    INSTANCE.clone()
297}
298
299/// Global rate limiter for trading account requests (100 per minute)
300pub fn account_trading_limiter() -> Arc<RateLimiter> {
301    static INSTANCE: once_cell::sync::Lazy<Arc<RateLimiter>> = once_cell::sync::Lazy::new(|| {
302        let mut limiter = RateLimiter::new(RateLimitType::TradingAccount);
303        Arc::new(limiter.with_safety_margin(0.8))
304    });
305
306    INSTANCE.clone()
307}
308
309/// Global rate limiter for non-trading app requests (60 per minute)
310pub fn app_non_trading_limiter() -> Arc<RateLimiter> {
311    static INSTANCE: once_cell::sync::Lazy<Arc<RateLimiter>> = once_cell::sync::Lazy::new(|| {
312        let mut limiter = RateLimiter::new(RateLimitType::NonTradingApp);
313        Arc::new(limiter.with_safety_margin(0.8))
314    });
315
316    INSTANCE.clone()
317}
318
319/// Global rate limiter for historical price data requests (10,000 points per week)
320pub fn historical_price_limiter() -> Arc<RateLimiter> {
321    static INSTANCE: once_cell::sync::Lazy<Arc<RateLimiter>> = once_cell::sync::Lazy::new(|| {
322        let mut limiter = RateLimiter::new(RateLimitType::HistoricalPrice);
323        Arc::new(limiter.with_safety_margin(0.8))
324    });
325
326    INSTANCE.clone()
327}
328
329/// Creates a rate limiter with the specified type
330pub fn create_rate_limiter(
331    limit_type: RateLimitType,
332    safety_margin: Option<f64>,
333) -> Arc<RateLimiter> {
334    let mut limiter = RateLimiter::new(limit_type);
335    match safety_margin {
336        Some(margin) => Arc::new(limiter.with_safety_margin(margin)),
337        None => Arc::new(limiter),
338    }
339}
340
341/// Global rate limiter for one request per second
342pub fn one_per_second_limiter() -> Arc<RateLimiter> {
343    static INSTANCE: once_cell::sync::Lazy<Arc<RateLimiter>> = once_cell::sync::Lazy::new(|| {
344        let limiter = RateLimiter::new(RateLimitType::OnePerSecond);
345        Arc::new(limiter)
346    });
347
348    INSTANCE.clone()
349}
350
351/// Default global rate limiter (uses the most conservative limit: non-trading account)
352pub fn global_rate_limiter() -> Arc<RateLimiter> {
353    account_non_trading_limiter()
354}
355
356/// Macro to mark tests that should be run individually to avoid rate limiting
357#[macro_export]
358macro_rules! rate_limited_test {
359    (fn $name:ident() $body:block) => {
360        #[test]
361        #[ignore]
362        fn $name() $body
363    };
364}
365
366#[cfg(test)]
367mod tests {
368    use super::*;
369    use tokio::runtime::Runtime;
370
371    #[test]
372    fn test_rate_limiter_effective_limit() {
373        let limiter = RateLimiter::new(RateLimitType::NonTradingAccount);
374        assert_eq!(limiter.effective_limit(), 30); // Default safety margin is 1.0
375
376        let mut limiter = RateLimiter::new(RateLimitType::NonTradingAccount);
377        let limiter = limiter.with_safety_margin(0.5);
378        assert_eq!(limiter.effective_limit(), 15); // 30 * 0.5 = 15
379    }
380
381    #[test]
382    fn test_rate_limiter_history_tracking() {
383        let rt = Runtime::new().unwrap();
384        rt.block_on(async {
385            let mut limiter = RateLimiter::new(RateLimitType::NonTradingAccount);
386            let limiter = limiter.with_safety_margin(1.0);
387            assert_eq!(limiter.current_request_count().await, 0);
388
389            // Record some requests manually
390            for _ in 0..5 {
391                limiter.record_request().await;
392            }
393
394            assert_eq!(limiter.current_request_count().await, 5);
395        });
396    }
397
398    #[test]
399    fn test_rate_limiter_stats() {
400        let rt = Runtime::new().unwrap();
401        rt.block_on(async {
402            let mut limiter = RateLimiter::new(RateLimitType::NonTradingAccount);
403            let limiter = limiter.with_safety_margin(0.8);
404
405            // Record some requests
406            for _ in 0..10 {
407                limiter.record_request().await;
408            }
409
410            let stats = limiter.get_stats().await;
411            assert_eq!(stats.request_count, 10);
412            assert_eq!(stats.effective_limit, 24); // 30 * 0.8 = 24
413            assert!(stats.usage_percent > 0.0);
414        });
415    }
416}