Skip to main content

nautilus_hyperliquid/http/
rate_limits.rs

1// -------------------------------------------------------------------------------------------------
2//  Copyright (C) 2015-2026 Nautech Systems Pty Ltd. All rights reserved.
3//  https://nautechsystems.io
4//
5//  Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
6//  You may not use this file except in compliance with the License.
7//  You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
8//
9//  Unless required by applicable law or agreed to in writing, software
10//  distributed under the License is distributed on an "AS IS" BASIS,
11//  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12//  See the License for the specific language governing permissions and
13//  limitations under the License.
14// -------------------------------------------------------------------------------------------------
15
16use std::{
17    collections::hash_map::DefaultHasher,
18    hash::{Hash, Hasher},
19    time::{Duration, Instant, SystemTime, UNIX_EPOCH},
20};
21
22use serde_json::Value;
23
24use crate::{common::enums::HyperliquidInfoRequestType, http::query::ExchangeActionParams};
25
26#[derive(Debug)]
27pub struct WeightedLimiter {
28    capacity: f64,       // tokens per minute (e.g., 1200)
29    refill_per_sec: f64, // capacity / 60
30    state: tokio::sync::Mutex<State>,
31}
32
33#[derive(Debug)]
34struct State {
35    tokens: f64,
36    last_refill: Instant,
37}
38
39impl WeightedLimiter {
40    pub fn per_minute(capacity: u32) -> Self {
41        let cap = capacity as f64;
42        Self {
43            capacity: cap,
44            refill_per_sec: cap / 60.0,
45            state: tokio::sync::Mutex::new(State {
46                tokens: cap,
47                last_refill: Instant::now(),
48            }),
49        }
50    }
51
52    /// Acquire `weight` tokens, sleeping until available.
53    pub async fn acquire(&self, weight: u32) {
54        let need = weight as f64;
55        loop {
56            let mut st = self.state.lock().await;
57            Self::refill_locked(&mut st, self.refill_per_sec, self.capacity);
58
59            if st.tokens >= need {
60                st.tokens -= need;
61                return;
62            }
63            let deficit = need - st.tokens;
64            let secs = deficit / self.refill_per_sec;
65            drop(st);
66            tokio::time::sleep(Duration::from_secs_f64(secs.max(0.01))).await;
67        }
68    }
69
70    /// Post-response debit for per-items adders (can temporarily clamp to 0).
71    pub async fn debit_extra(&self, extra: u32) {
72        if extra == 0 {
73            return;
74        }
75        let mut st = self.state.lock().await;
76        Self::refill_locked(&mut st, self.refill_per_sec, self.capacity);
77        st.tokens = (st.tokens - extra as f64).max(0.0);
78    }
79
80    pub async fn snapshot(&self) -> RateLimitSnapshot {
81        let mut st = self.state.lock().await;
82        Self::refill_locked(&mut st, self.refill_per_sec, self.capacity);
83        RateLimitSnapshot {
84            capacity: self.capacity as u32,
85            tokens: st.tokens.max(0.0) as u32,
86        }
87    }
88
89    fn refill_locked(st: &mut State, per_sec: f64, cap: f64) {
90        let dt = Instant::now().duration_since(st.last_refill).as_secs_f64();
91        if dt > 0.0 {
92            st.tokens = (st.tokens + dt * per_sec).min(cap);
93            st.last_refill = Instant::now();
94        }
95    }
96}
97
98#[derive(Debug, Clone, Copy)]
99pub struct RateLimitSnapshot {
100    pub capacity: u32,
101    pub tokens: u32,
102}
103
104pub fn backoff_full_jitter(attempt: u32, base: Duration, cap: Duration) -> Duration {
105    let mut hasher = DefaultHasher::new();
106    attempt.hash(&mut hasher);
107    let nanos = SystemTime::now()
108        .duration_since(UNIX_EPOCH)
109        .unwrap_or_default()
110        .as_nanos();
111    nanos.hash(&mut hasher);
112    let hash = hasher.finish();
113
114    let max = (base.as_millis() as u64)
115        .saturating_mul(1u64 << attempt.min(16))
116        .min(cap.as_millis() as u64)
117        .max(base.as_millis() as u64);
118
119    // Floor at 1ms to prevent zero-duration backoff
120    Duration::from_millis((hash % max).max(1))
121}
122
123/// Classify Info requests into weight classes based on request type.
124pub fn info_base_weight(req: &crate::http::query::InfoRequest) -> u32 {
125    match req.request_type {
126        HyperliquidInfoRequestType::L2Book
127        | HyperliquidInfoRequestType::AllMids
128        | HyperliquidInfoRequestType::ClearinghouseState
129        | HyperliquidInfoRequestType::OrderStatus
130        | HyperliquidInfoRequestType::SpotClearinghouseState
131        | HyperliquidInfoRequestType::ExchangeStatus => 2,
132        HyperliquidInfoRequestType::UserRole => 60,
133        _ => 20,
134    }
135}
136
137/// Extra weight for heavy Info endpoints: +1 per 20 (most), +1 per 60 for candleSnapshot.
138/// We count the largest array in the response (robust to schema variants).
139pub fn info_extra_weight(req: &crate::http::query::InfoRequest, json: &Value) -> u32 {
140    let items = match json {
141        Value::Array(a) => a.len(),
142        Value::Object(m) => m
143            .values()
144            .filter_map(|v| v.as_array().map(|a| a.len()))
145            .max()
146            .unwrap_or(0),
147        _ => 0,
148    };
149
150    let unit = match req.request_type {
151        HyperliquidInfoRequestType::CandleSnapshot => 60usize,
152        HyperliquidInfoRequestType::RecentTrades
153        | HyperliquidInfoRequestType::HistoricalOrders
154        | HyperliquidInfoRequestType::UserFills
155        | HyperliquidInfoRequestType::UserFillsByTime
156        | HyperliquidInfoRequestType::FundingHistory
157        | HyperliquidInfoRequestType::UserFunding
158        | HyperliquidInfoRequestType::NonUserFundingUpdates
159        | HyperliquidInfoRequestType::TwapHistory
160        | HyperliquidInfoRequestType::UserTwapSliceFills
161        | HyperliquidInfoRequestType::UserTwapSliceFillsByTime
162        | HyperliquidInfoRequestType::DelegatorHistory
163        | HyperliquidInfoRequestType::DelegatorRewards
164        | HyperliquidInfoRequestType::ValidatorStats => 20usize,
165        _ => return 0,
166    };
167    (items / unit) as u32
168}
169
170/// Exchange: 1 + floor(batch_len / 40)
171pub fn exchange_weight(action: &crate::http::query::ExchangeAction) -> u32 {
172    // Extract batch size from typed params
173    let batch_size = match &action.params {
174        ExchangeActionParams::Order(params) => params.orders.len(),
175        ExchangeActionParams::Cancel(params) => params.cancels.len(),
176        ExchangeActionParams::Modify(_) => {
177            // Modify is for a single order
178            1
179        }
180        ExchangeActionParams::UpdateLeverage(_) | ExchangeActionParams::UpdateIsolatedMargin(_) => {
181            0
182        }
183    };
184    1 + (batch_size as u32 / 40)
185}
186
187#[cfg(test)]
188mod tests {
189    use rstest::rstest;
190    use rust_decimal::Decimal;
191
192    use super::{
193        super::models::{
194            Cloid, HyperliquidExecCancelByCloidRequest, HyperliquidExecGrouping,
195            HyperliquidExecLimitParams, HyperliquidExecOrderKind, HyperliquidExecPlaceOrderRequest,
196            HyperliquidExecTif,
197        },
198        *,
199    };
200    use crate::http::query::{
201        CancelParams, ExchangeAction, ExchangeActionParams, ExchangeActionType, OrderParams,
202        UpdateLeverageParams,
203    };
204
205    #[rstest]
206    #[case(1, 1)]
207    #[case(39, 1)]
208    #[case(40, 2)]
209    #[case(79, 2)]
210    #[case(80, 3)]
211    fn test_exchange_weight_order_steps_every_40(
212        #[case] array_len: usize,
213        #[case] expected_weight: u32,
214    ) {
215        let orders: Vec<HyperliquidExecPlaceOrderRequest> = (0..array_len)
216            .map(|_| HyperliquidExecPlaceOrderRequest {
217                asset: 0,
218                is_buy: true,
219                price: Decimal::new(50000, 0),
220                size: Decimal::new(1, 0),
221                reduce_only: false,
222                kind: HyperliquidExecOrderKind::Limit {
223                    limit: HyperliquidExecLimitParams {
224                        tif: HyperliquidExecTif::Gtc,
225                    },
226                },
227                cloid: Some(Cloid::from_hex("0x00000000000000000000000000000000").unwrap()),
228            })
229            .collect();
230
231        let action = ExchangeAction {
232            action_type: ExchangeActionType::Order,
233            params: ExchangeActionParams::Order(OrderParams {
234                orders,
235                grouping: HyperliquidExecGrouping::Na,
236                builder: None,
237            }),
238        };
239        assert_eq!(exchange_weight(&action), expected_weight);
240    }
241
242    #[rstest]
243    fn test_exchange_weight_cancel() {
244        let cancels: Vec<HyperliquidExecCancelByCloidRequest> = (0..40)
245            .map(|_| HyperliquidExecCancelByCloidRequest {
246                asset: 0,
247                cloid: Cloid::from_hex("0x00000000000000000000000000000000").unwrap(),
248            })
249            .collect();
250
251        let action = ExchangeAction {
252            action_type: ExchangeActionType::Cancel,
253            params: ExchangeActionParams::Cancel(CancelParams { cancels }),
254        };
255        assert_eq!(exchange_weight(&action), 2);
256    }
257
258    #[rstest]
259    fn test_exchange_weight_non_batch_action() {
260        let update_leverage = ExchangeAction {
261            action_type: ExchangeActionType::UpdateLeverage,
262            params: ExchangeActionParams::UpdateLeverage(UpdateLeverageParams {
263                asset: 1,
264                is_cross: true,
265                leverage: 10,
266            }),
267        };
268        assert_eq!(exchange_weight(&update_leverage), 1);
269    }
270
271    #[tokio::test]
272    async fn test_limiter_roughly_caps_to_capacity() {
273        let limiter = WeightedLimiter::per_minute(1200);
274
275        // Consume ~1200 in quick succession
276        for _ in 0..60 {
277            limiter.acquire(20).await; // 60 * 20 = 1200
278        }
279
280        // The next acquire should take time for tokens to refill
281        let t0 = std::time::Instant::now();
282        limiter.acquire(20).await;
283        let elapsed = t0.elapsed();
284
285        // Should take at least some time to refill (allow some jitter/timing variance)
286        assert!(
287            elapsed.as_millis() >= 500,
288            "Expected significant delay, was {}ms",
289            elapsed.as_millis()
290        );
291    }
292
293    #[tokio::test]
294    async fn test_limiter_debit_extra_works() {
295        let limiter = WeightedLimiter::per_minute(100);
296
297        // Start with full bucket
298        let snapshot = limiter.snapshot().await;
299        assert_eq!(snapshot.capacity, 100);
300        assert_eq!(snapshot.tokens, 100);
301
302        // Acquire some tokens
303        limiter.acquire(30).await;
304        let snapshot = limiter.snapshot().await;
305        assert_eq!(snapshot.tokens, 70);
306
307        // Debit extra
308        limiter.debit_extra(20).await;
309        let snapshot = limiter.snapshot().await;
310        assert_eq!(snapshot.tokens, 50);
311
312        // Debit more than available (should clamp to 0)
313        limiter.debit_extra(100).await;
314        let snapshot = limiter.snapshot().await;
315        assert_eq!(snapshot.tokens, 0);
316    }
317
318    #[rstest]
319    #[case(0, 100)]
320    #[case(1, 200)]
321    #[case(2, 400)]
322    fn test_backoff_full_jitter_increases(#[case] attempt: u32, #[case] max_expected_ms: u64) {
323        let base = Duration::from_millis(100);
324        let cap = Duration::from_secs(5);
325
326        let delay = backoff_full_jitter(attempt, base, cap);
327
328        assert!(delay.as_millis() >= 1);
329        assert!(delay.as_millis() <= max_expected_ms as u128);
330    }
331
332    #[rstest]
333    fn test_backoff_full_jitter_respects_cap() {
334        let base = Duration::from_millis(100);
335        let cap = Duration::from_secs(5);
336
337        let delay_high = backoff_full_jitter(10, base, cap);
338        assert!(delay_high.as_millis() <= cap.as_millis());
339    }
340}