Skip to main content

nntp_proxy/router/
mod.rs

1//! Backend server selection and load balancing
2//!
3//! This module handles selecting backend servers using round-robin
4//! with simple load tracking for monitoring.
5//!
6//! # Overview
7//!
8//! The `BackendSelector` provides thread-safe backend selection for routing
9//! NNTP commands across multiple backend servers. It uses a lock-free
10//! round-robin algorithm with atomic operations for concurrent access.
11//!
12//! # Usage
13//!
14//! ```no_run
15//! use nntp_proxy::router::BackendSelector;
16//! use nntp_proxy::types::{BackendId, ClientId, ServerName};
17//! # use nntp_proxy::pool::DeadpoolConnectionProvider;
18//!
19//! let mut selector = BackendSelector::new();
20//! # let provider = DeadpoolConnectionProvider::new(
21//! #     "localhost".to_string(), 119, "test".to_string(), 10, None, None
22//! # );
23//! selector.add_backend(
24//!     BackendId::from_index(0),
25//!     ServerName::try_new("server1".to_string()).unwrap(),
26//!     provider,
27//!     0, // tier (lower = higher priority)
28//! );
29//!
30//! // Route a command
31//! let client_id = ClientId::new();
32//! let backend_id = selector.route_command(client_id, "LIST").unwrap();
33//!
34//! // After command completes
35//! selector.complete_command(backend_id);
36//! ```
37
38mod strategies;
39
40use anyhow::Result;
41use derive_more::{AsRef, Deref, Display, From};
42use nutype::nutype;
43use std::cmp::Ordering as CmpOrdering;
44use std::sync::Arc;
45use std::sync::atomic::{AtomicUsize, Ordering};
46use tracing::{debug, info};
47
48use crate::config::BackendSelectionStrategy;
49use crate::pool::DeadpoolConnectionProvider;
50use crate::types::{BackendId, ClientId, ServerName};
51use strategies::{LeastLoaded, WeightedRoundRobin};
52
53/// Selection strategy enum that holds either strategy type
54#[derive(Debug)]
55enum SelectionStrategy {
56    WeightedRoundRobin(WeightedRoundRobin),
57    LeastLoaded(LeastLoaded),
58}
59
60/// Load ratio (pending requests / max connections)
61///
62/// Lower ratios indicate less loaded backends. Range: 0.0 (empty) to f64::MAX (no capacity).
63#[derive(Debug, Clone, Copy, PartialEq, Display, From, AsRef, Deref)]
64pub struct LoadRatio(f64);
65
66impl LoadRatio {
67    /// Maximum load ratio when no capacity available
68    pub const MAX: Self = Self(f64::MAX);
69
70    /// Minimum load ratio for empty backend
71    pub const MIN: Self = Self(0.0);
72
73    /// Create a new load ratio
74    #[inline]
75    #[must_use]
76    pub const fn new(ratio: f64) -> Self {
77        Self(ratio)
78    }
79
80    /// Get the inner f64 value
81    #[inline]
82    #[must_use]
83    pub const fn get(&self) -> f64 {
84        self.0
85    }
86}
87
88impl PartialOrd for LoadRatio {
89    fn partial_cmp(&self, other: &Self) -> Option<CmpOrdering> {
90        self.0.partial_cmp(&other.0)
91    }
92}
93
94/// Atomic counter for pending requests on a backend
95#[derive(Debug, Clone, Display, From, AsRef, Deref)]
96#[display("PendingCount({})", "_0.load(Ordering::Relaxed)")]
97pub struct PendingCount(Arc<AtomicUsize>);
98
99// Manual PartialEq because Arc<AtomicUsize> doesn't auto-derive
100impl PartialEq for PendingCount {
101    fn eq(&self, other: &Self) -> bool {
102        self.get() == other.get()
103    }
104}
105
106impl PartialEq<usize> for PendingCount {
107    fn eq(&self, other: &usize) -> bool {
108        self.get() == *other
109    }
110}
111
112impl Eq for PendingCount {}
113
114impl PendingCount {
115    /// Create a new pending count initialized to zero
116    #[inline]
117    #[must_use]
118    pub fn new() -> Self {
119        Self(Arc::new(AtomicUsize::new(0)))
120    }
121
122    /// Increment the pending count
123    #[inline]
124    pub fn increment(&self) {
125        self.0.fetch_add(1, Ordering::Relaxed);
126    }
127
128    /// Decrement the pending count
129    #[inline]
130    pub fn decrement(&self) {
131        self.0.fetch_sub(1, Ordering::Relaxed);
132    }
133
134    /// Get the current pending count
135    #[inline]
136    #[must_use]
137    pub fn get(&self) -> usize {
138        self.0.load(Ordering::Relaxed)
139    }
140}
141
142impl Default for PendingCount {
143    fn default() -> Self {
144        Self::new()
145    }
146}
147
148/// Atomic counter for stateful connections on a backend
149#[derive(Debug, Clone, Display, From, AsRef, Deref)]
150#[display("StatefulCount({})", "_0.load(Ordering::Relaxed)")]
151pub struct StatefulCount(Arc<AtomicUsize>);
152
153// Manual PartialEq because Arc<AtomicUsize> doesn't auto-derive
154impl PartialEq for StatefulCount {
155    fn eq(&self, other: &Self) -> bool {
156        self.get() == other.get()
157    }
158}
159
160impl PartialEq<usize> for StatefulCount {
161    fn eq(&self, other: &usize) -> bool {
162        self.get() == *other
163    }
164}
165
166impl Eq for StatefulCount {}
167
168impl StatefulCount {
169    /// Create a new stateful count initialized to zero
170    #[inline]
171    #[must_use]
172    pub fn new() -> Self {
173        Self(Arc::new(AtomicUsize::new(0)))
174    }
175
176    /// Get the current stateful count
177    #[inline]
178    #[must_use]
179    pub fn get(&self) -> usize {
180        self.0.load(Ordering::Relaxed)
181    }
182
183    /// Try to acquire a stateful slot (compare-exchange loop)
184    ///
185    /// Returns true if successfully incremented below max_stateful limit
186    pub fn try_acquire(&self, max_stateful: usize) -> bool {
187        let mut current = self.0.load(Ordering::Acquire);
188        loop {
189            if current >= max_stateful {
190                return false;
191            }
192
193            match self.0.compare_exchange_weak(
194                current,
195                current + 1,
196                Ordering::AcqRel,
197                Ordering::Acquire,
198            ) {
199                Ok(_) => return true,
200                Err(actual) => current = actual,
201            }
202        }
203    }
204
205    /// Release a stateful slot (decrement if > 0)
206    ///
207    /// Returns Ok(previous_value) if successfully decremented, Err(0) if already zero
208    pub fn release(&self) -> Result<usize, usize> {
209        self.0
210            .fetch_update(Ordering::AcqRel, Ordering::Acquire, |current| {
211                if current == 0 {
212                    None
213                } else {
214                    Some(current - 1)
215                }
216            })
217    }
218}
219
220impl Default for StatefulCount {
221    fn default() -> Self {
222        Self::new()
223    }
224}
225
226/// Number of backend servers in the router
227#[nutype(derive(
228    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Display, From, AsRef
229))]
230pub struct BackendCount(usize);
231
232impl PartialEq<usize> for BackendCount {
233    fn eq(&self, other: &usize) -> bool {
234        self.into_inner() == *other
235    }
236}
237
238impl PartialOrd<usize> for BackendCount {
239    fn partial_cmp(&self, other: &usize) -> Option<CmpOrdering> {
240        self.into_inner().partial_cmp(other)
241    }
242}
243
244impl BackendCount {
245    /// Zero backends
246    pub fn zero() -> Self {
247        Self::new(0)
248    }
249
250    /// Get the inner usize value
251    #[inline]
252    #[must_use]
253    pub fn get(&self) -> usize {
254        self.into_inner()
255    }
256}
257
258/// Total weight across all backends (sum of max_connections)
259#[nutype(derive(
260    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Display, From, AsRef
261))]
262pub struct TotalWeight(usize);
263
264impl PartialEq<usize> for TotalWeight {
265    fn eq(&self, other: &usize) -> bool {
266        self.into_inner() == *other
267    }
268}
269
270impl PartialOrd<usize> for TotalWeight {
271    fn partial_cmp(&self, other: &usize) -> Option<CmpOrdering> {
272        self.into_inner().partial_cmp(other)
273    }
274}
275
276impl TotalWeight {
277    /// Zero weight
278    pub fn zero() -> Self {
279        Self::new(0)
280    }
281
282    /// Get the inner usize value
283    #[inline]
284    #[must_use]
285    pub fn get(&self) -> usize {
286        self.into_inner()
287    }
288}
289
290/// Traffic share percentage for a backend
291#[nutype(derive(Debug, Clone, Copy, PartialEq, Display, From, AsRef))]
292pub struct TrafficShare(f64);
293
294impl TrafficShare {
295    /// Get the inner f64 value
296    #[inline]
297    #[must_use]
298    pub fn get(&self) -> f64 {
299        self.into_inner()
300    }
301
302    /// Calculate traffic share from max_connections and total_weight
303    #[inline]
304    #[must_use]
305    pub fn from_weight(max_connections: usize, total_weight: TotalWeight) -> Self {
306        if total_weight.get() > 0 {
307            Self::new((max_connections as f64 / total_weight.get() as f64) * 100.0)
308        } else {
309            Self::new(0.0)
310        }
311    }
312}
313
314/// Backend connection information
315#[derive(Debug, Clone)]
316struct BackendInfo {
317    /// Backend identifier
318    id: BackendId,
319    /// Server name for logging
320    name: ServerName,
321    /// Connection provider for this backend
322    provider: DeadpoolConnectionProvider,
323    /// Number of pending requests on this backend (for load balancing)
324    pending_count: PendingCount,
325    /// Number of connections in stateful mode (for hybrid routing reservation)
326    stateful_count: StatefulCount,
327    /// Server tier for prioritization (lower = higher priority)
328    tier: u8,
329}
330
331impl BackendInfo {
332    /// Calculate load ratio (pending requests / max connections)
333    ///
334    /// Lower ratios indicate less loaded backends.
335    #[must_use]
336    fn load_ratio(&self) -> LoadRatio {
337        let max_conns = self.provider.max_size() as f64;
338        if max_conns > 0.0 {
339            let pending = self.pending_count.get() as f64;
340            LoadRatio::new(pending / max_conns)
341        } else {
342            LoadRatio::MAX
343        }
344    }
345}
346
347/// Selects backend servers using weighted round-robin with load tracking
348///
349/// # Thread Safety
350///
351/// This struct is designed for concurrent access across multiple threads.
352/// The round-robin counter and pending counts use atomic operations for
353/// lock-free performance.
354///
355/// # Load Balancing
356///
357/// - **Strategy**: Weighted round-robin based on max_connections
358/// - **Tracking**: Atomic counters track pending commands per backend
359/// - **Monitoring**: Load statistics available via `backend_load()`
360/// - **Fairness**: Backends with larger pools receive proportionally more requests
361///
362/// # Examples
363///
364/// ```no_run
365/// # use nntp_proxy::router::BackendSelector;
366/// # use nntp_proxy::types::{BackendId, ClientId, ServerName};
367/// # use nntp_proxy::pool::DeadpoolConnectionProvider;
368/// let mut selector = BackendSelector::new();
369///
370/// # let provider = DeadpoolConnectionProvider::new(
371/// #     "localhost".to_string(), 119, "test".to_string(), 10, None, None
372/// # );
373/// selector.add_backend(
374///     BackendId::from_index(0),
375///     ServerName::try_new("backend-1".to_string()).unwrap(),
376///     provider,
377///     0, // tier (lower = higher priority)
378/// );
379///
380/// // Route commands
381/// let backend = selector.route_command(ClientId::new(), "LIST")?;
382/// # Ok::<(), anyhow::Error>(())
383/// ```
384#[derive(Debug)]
385pub struct BackendSelector {
386    /// Backend connection providers
387    backends: Vec<BackendInfo>,
388    /// Selection strategy (weighted round-robin or least-loaded)
389    strategy: SelectionStrategy,
390}
391
392impl Default for BackendSelector {
393    fn default() -> Self {
394        Self::new()
395    }
396}
397
398impl BackendSelector {
399    /// Find backend by ID
400    ///
401    /// Common helper to avoid repeating find logic across methods.
402    #[inline]
403    fn find_backend(&self, backend_id: BackendId) -> Option<&BackendInfo> {
404        self.backends.iter().find(|b| b.id == backend_id)
405    }
406
407    /// Get the tier for a backend
408    ///
409    /// Returns the tier value for the specified backend, or None if the backend doesn't exist.
410    /// Used by cache to implement tier-aware TTL (higher tier = longer TTL).
411    #[inline]
412    #[must_use]
413    pub fn get_tier(&self, backend_id: BackendId) -> Option<u8> {
414        self.find_backend(backend_id).map(|b| b.tier)
415    }
416
417    /// Create a new backend selector with weighted round-robin strategy (default)
418    #[must_use]
419    pub fn new() -> Self {
420        Self::with_strategy(BackendSelectionStrategy::WeightedRoundRobin)
421    }
422
423    /// Create a new backend selector with specified strategy
424    #[must_use]
425    pub fn with_strategy(strategy: BackendSelectionStrategy) -> Self {
426        let selection_strategy = match strategy {
427            BackendSelectionStrategy::WeightedRoundRobin => {
428                SelectionStrategy::WeightedRoundRobin(WeightedRoundRobin::new(0))
429            }
430            BackendSelectionStrategy::LeastLoaded => {
431                SelectionStrategy::LeastLoaded(LeastLoaded::new())
432            }
433        };
434
435        Self {
436            // Pre-allocate for typical number of backend servers (most setups have 2-8)
437            backends: Vec::with_capacity(4),
438            strategy: selection_strategy,
439        }
440    }
441
442    /// Add a backend server to the router
443    ///
444    /// # Arguments
445    /// * `backend_id` - Unique identifier for this backend
446    /// * `name` - Human-readable name for logging
447    /// * `provider` - Connection pool provider
448    /// * `tier` - Server tier (lower = higher priority, 0 is highest)
449    pub fn add_backend(
450        &mut self,
451        backend_id: BackendId,
452        name: ServerName,
453        provider: DeadpoolConnectionProvider,
454        tier: u8,
455    ) {
456        let max_connections = provider.max_size();
457
458        // Update strategy-specific state
459        match &mut self.strategy {
460            SelectionStrategy::WeightedRoundRobin(wrr) => {
461                let old_weight = TotalWeight::new(wrr.total_weight());
462                let new_weight = TotalWeight::new(old_weight.get() + max_connections);
463                wrr.set_total_weight(new_weight.get());
464
465                // Calculate this backend's share of traffic
466                let traffic_share = TrafficShare::from_weight(max_connections, new_weight);
467
468                info!(
469                    "Added backend {:?} ({}) tier {} with {} connections - will receive {:.1}% of traffic (total weight: {} -> {}) [weighted round-robin]",
470                    backend_id,
471                    name,
472                    tier,
473                    max_connections,
474                    traffic_share.get(),
475                    old_weight,
476                    new_weight
477                );
478            }
479            SelectionStrategy::LeastLoaded(_) => {
480                info!(
481                    "Added backend {:?} ({}) tier {} with {} connections [least-loaded strategy]",
482                    backend_id, name, tier, max_connections
483                );
484            }
485        }
486
487        self.backends.push(BackendInfo {
488            id: backend_id,
489            name,
490            provider,
491            pending_count: PendingCount::new(),
492            stateful_count: StatefulCount::new(),
493            tier,
494        });
495    }
496
497    /// Select the next backend using the configured strategy with tier-aware prioritization
498    ///
499    /// Selection is tier-aware: backends with lower tier numbers are tried first.
500    /// Within each tier, the configured strategy applies:
501    /// - **Weighted round-robin**: Distributes proportionally to max_connections
502    /// - **Least-loaded**: Routes to backend with fewest pending requests
503    ///
504    /// # Arguments
505    /// * `availability` - Optional filter to restrict selection to available backends
506    fn select_backend(
507        &self,
508        availability: Option<&crate::cache::ArticleAvailability>,
509    ) -> Option<&BackendInfo> {
510        if self.backends.is_empty() {
511            return None;
512        }
513
514        // Filter backends by availability if provided
515        let is_available =
516            |backend: &&BackendInfo| availability.is_none_or(|avail| avail.should_try(backend.id));
517
518        // Tier filtering only applies to article requests (when availability is provided).
519        // For non-article commands (LIST, CAPABILITIES, etc), use all backends regardless of tier.
520        let should_apply_tier_filtering = availability.is_some();
521
522        // If tiering applies, find the lowest available tier; otherwise this value is unused
523        let lowest_available_tier = if should_apply_tier_filtering {
524            self.backends
525                .iter()
526                .filter(|b| is_available(b))
527                .map(|b| b.tier)
528                .min()?
529        } else {
530            0 // Unused when should_apply_tier_filtering is false; see tier_filter closure
531        };
532
533        // Filter: if tiering applies, only backends in lowest tier; otherwise all available backends
534        let tier_filter = |backend: &&BackendInfo| {
535            if should_apply_tier_filtering {
536                backend.tier == lowest_available_tier && is_available(backend)
537            } else {
538                is_available(backend)
539            }
540        };
541
542        match &self.strategy {
543            SelectionStrategy::WeightedRoundRobin(wrr) => {
544                // Calculate total weight for this tier only
545                let tier_total_weight: usize = self
546                    .backends
547                    .iter()
548                    .filter(tier_filter)
549                    .map(|b| b.provider.max_size())
550                    .sum();
551
552                if tier_total_weight == 0 {
553                    // No backends with positive weight - cannot route
554                    return None;
555                }
556
557                // Use tier-specific weight for selection to avoid modulo bias
558                // (directly select within tier's weight range instead of global % tier)
559                let tier_position = wrr.select_with_weight(tier_total_weight)?;
560
561                // Find backend owning this weighted position using cumulative weights
562                self.backends
563                    .iter()
564                    .filter(tier_filter)
565                    .scan(0, |cumulative, backend| {
566                        *cumulative += backend.provider.max_size();
567                        Some((*cumulative, backend))
568                    })
569                    .find(|(cumulative_weight, _)| tier_position < *cumulative_weight)
570                    .map(|(_, backend)| backend)
571                    .or_else(|| {
572                        // Fallback to first available backend in tier
573                        self.backends.iter().find(tier_filter)
574                    })
575            }
576            SelectionStrategy::LeastLoaded(_) => {
577                // Find backend with lowest load ratio in the lowest available tier
578                self.backends.iter().filter(tier_filter).min_by(|a, b| {
579                    a.load_ratio()
580                        .partial_cmp(&b.load_ratio())
581                        .unwrap_or(std::cmp::Ordering::Equal)
582                })
583            }
584        }
585    }
586
587    /// Select a backend for the given command using round-robin
588    /// Returns the backend ID to use for this command
589    pub fn route_command(&self, _client_id: ClientId, _command: &str) -> Result<BackendId> {
590        self.route_command_with_availability(_client_id, _command, None)
591    }
592
593    /// Select a backend for the given command, optionally filtering by availability
594    pub fn route_command_with_availability(
595        &self,
596        _client_id: ClientId,
597        _command: &str,
598        availability: Option<&crate::cache::ArticleAvailability>,
599    ) -> Result<BackendId> {
600        let backend = self.select_backend(availability).ok_or_else(|| {
601            anyhow::anyhow!(
602                "No backends available for routing (total backends: {})",
603                self.backends.len()
604            )
605        })?;
606
607        // Increment pending count for load tracking
608        backend.pending_count.increment();
609
610        debug!(
611            "Selected backend {:?} ({}) for command",
612            backend.id, backend.name
613        );
614
615        Ok(backend.id)
616    }
617
618    /// Mark a command as complete, decrementing the pending count
619    pub fn complete_command(&self, backend_id: BackendId) {
620        if let Some(backend) = self.find_backend(backend_id) {
621            backend.pending_count.decrement();
622        }
623    }
624
625    /// Manually increment pending count for a specific backend
626    /// Used when directly selecting a backend instead of using route_command
627    pub fn mark_backend_pending(&self, backend_id: BackendId) {
628        if let Some(backend) = self.find_backend(backend_id) {
629            backend.pending_count.increment();
630        }
631    }
632
633    /// Get the connection provider for a backend
634    #[must_use]
635    pub fn backend_provider(&self, backend_id: BackendId) -> Option<&DeadpoolConnectionProvider> {
636        self.find_backend(backend_id).map(|b| &b.provider)
637    }
638
639    /// Get the number of backends
640    #[must_use]
641    #[inline]
642    pub fn backend_count(&self) -> BackendCount {
643        BackendCount::new(self.backends.len())
644    }
645
646    /// Get total weight (sum of all max_connections)
647    /// Only applicable for weighted round-robin strategy
648    #[must_use]
649    #[inline]
650    pub fn total_weight(&self) -> TotalWeight {
651        match &self.strategy {
652            SelectionStrategy::WeightedRoundRobin(wrr) => TotalWeight::new(wrr.total_weight()),
653            SelectionStrategy::LeastLoaded(_) => {
654                // For least-loaded, return sum of all max_connections for compatibility
655                TotalWeight::new(self.backends.iter().map(|b| b.provider.max_size()).sum())
656            }
657        }
658    }
659
660    /// Get backend load (pending requests) for monitoring
661    ///
662    /// Returns a clone of the PendingCount for the backend, allowing the caller
663    /// to query the current value or track it over time.
664    #[must_use]
665    pub fn backend_load(&self, backend_id: BackendId) -> Option<PendingCount> {
666        self.find_backend(backend_id)
667            .map(|b| b.pending_count.clone())
668    }
669
670    /// Try to acquire a stateful connection slot for hybrid mode
671    /// Returns true if acquisition succeeded (within max_connections-1 limit)
672    /// Returns false if all stateful slots are taken (need to keep 1 for PCR)
673    pub fn try_acquire_stateful(&self, backend_id: BackendId) -> bool {
674        if let Some(backend) = self.find_backend(backend_id) {
675            // Get max connections from the provider's pool
676            let max_connections = backend.provider.max_size();
677
678            // Reserve 1 connection for per-command routing
679            let max_stateful = max_connections.saturating_sub(1);
680
681            // Try to acquire slot using StatefulCount's atomic logic
682            let acquired = backend.stateful_count.try_acquire(max_stateful);
683
684            if acquired {
685                debug!(
686                    "Backend {:?} ({}) acquired stateful slot: {}/{}",
687                    backend_id,
688                    backend.name,
689                    backend.stateful_count.get(),
690                    max_stateful
691                );
692            } else {
693                debug!(
694                    "Backend {:?} ({}) stateful limit reached: {}/{}",
695                    backend_id,
696                    backend.name,
697                    backend.stateful_count.get(),
698                    max_stateful
699                );
700            }
701
702            acquired
703        } else {
704            false
705        }
706    }
707
708    /// Release a stateful connection slot
709    pub fn release_stateful(&self, backend_id: BackendId) {
710        if let Some(backend) = self.find_backend(backend_id) {
711            // Atomically decrement using StatefulCount's release method
712            match backend.stateful_count.release() {
713                Ok(prev) => {
714                    debug!(
715                        "Backend {:?} ({}) released stateful slot: {}/{}",
716                        backend_id,
717                        backend.name,
718                        prev - 1,
719                        backend.provider.max_size().saturating_sub(1)
720                    );
721                }
722                Err(0) => {
723                    debug!(
724                        "Backend {:?} ({}) release_stateful called when count already 0",
725                        backend_id, backend.name
726                    );
727                }
728                Err(other) => unreachable!(
729                    "Unexpected error in release: got Err({other}), expected only Err(0)"
730                ),
731            }
732        }
733    }
734
735    /// Get the number of stateful connections for a backend
736    ///
737    /// Returns a clone of the StatefulCount for the backend, allowing the caller
738    /// to query the current value or track it over time.
739    #[must_use]
740    pub fn stateful_count(&self, backend_id: BackendId) -> Option<StatefulCount> {
741        self.find_backend(backend_id)
742            .map(|b| b.stateful_count.clone())
743    }
744
745    /// Get the load ratio for a backend (pending / max_connections)
746    ///
747    /// Lower ratios indicate less loaded backends. Range: 0.0 (empty) to f64::MAX (no capacity).
748    #[must_use]
749    pub fn backend_load_ratio(&self, backend_id: BackendId) -> Option<LoadRatio> {
750        self.find_backend(backend_id).map(|b| b.load_ratio())
751    }
752
753    /// Get the traffic share percentage for a backend
754    ///
755    /// Only applicable for weighted round-robin strategy. Returns the percentage
756    /// of traffic this backend should receive based on its max_connections.
757    #[must_use]
758    pub fn backend_traffic_share(&self, backend_id: BackendId) -> Option<TrafficShare> {
759        self.find_backend(backend_id).map(|b| {
760            let total = self.total_weight();
761            TrafficShare::from_weight(b.provider.max_size(), total)
762        })
763    }
764}