Skip to main content

netspeed_cli/
task_runner.rs

1//! Test runner orchestration for download and upload bandwidth tests.
2//!
3//! This module provides a reusable template for running bandwidth tests,
4//! eliminating the duplication between download and upload test orchestration
5//! in `main.rs`. Both tests follow the same pattern:
6//! 1. Set up progress tracking
7//! 2. Spawn latency-under-load monitoring in background
8//! 3. Run the actual bandwidth test
9//! 4. Stop latency monitoring
10//! 5. Aggregate results
11
12use crate::error::Error;
13use crate::progress::Tracker;
14use crate::types::Server;
15use std::sync::Arc;
16use std::sync::Mutex;
17use std::sync::atomic::AtomicBool;
18
19/// Trait for bandwidth test results - enables dependency injection and mocking.
20pub trait TestMetrics: Send + Sync {
21    fn avg_bps(&self) -> f64;
22    fn peak_bps(&self) -> f64;
23    fn total_bytes(&self) -> u64;
24    fn duration_secs(&self) -> f64;
25    fn speed_samples(&self) -> &[f64];
26    fn latency_under_load(&self) -> Option<f64>;
27}
28
29/// Result from a bandwidth test (download or upload).
30#[derive(PartialEq, Debug, Clone)]
31pub struct TestRunResult {
32    /// Average speed in bits per second
33    pub avg_bps: f64,
34    /// Peak speed in bits per second
35    pub peak_bps: f64,
36    /// Total bytes transferred
37    pub total_bytes: u64,
38    /// Duration of the test in seconds
39    pub duration_secs: f64,
40    /// Speed samples over time
41    pub speed_samples: Vec<f64>,
42    /// Average latency under load (ms), if measured
43    pub latency_under_load: Option<f64>,
44}
45
46impl Default for TestRunResult {
47    fn default() -> Self {
48        Self {
49            avg_bps: 0.0,
50            peak_bps: 0.0,
51            total_bytes: 0,
52            duration_secs: 0.0,
53            speed_samples: Vec::new(),
54            latency_under_load: None,
55        }
56    }
57}
58
59impl TestMetrics for TestRunResult {
60    fn avg_bps(&self) -> f64 {
61        self.avg_bps
62    }
63
64    fn peak_bps(&self) -> f64 {
65        self.peak_bps
66    }
67
68    fn total_bytes(&self) -> u64 {
69        self.total_bytes
70    }
71
72    fn duration_secs(&self) -> f64 {
73        self.duration_secs
74    }
75
76    fn speed_samples(&self) -> &[f64] {
77        &self.speed_samples
78    }
79
80    fn latency_under_load(&self) -> Option<f64> {
81        self.latency_under_load
82    }
83}
84
85/// Run a bandwidth test with latency-under-load monitoring.
86///
87/// This is a template method that handles:
88/// - Progress bar setup
89/// - Background latency monitoring
90/// - Test execution via the provided closure
91/// - Result aggregation
92///
93/// # Arguments
94///
95/// * `client` - HTTP client to reuse (avoids creating a second connection pool)
96/// * `server` - Server to test against
97/// * `test_label` - Label for progress display (e.g., "Download", "Upload")
98/// * `is_verbose` - Whether to show visible progress
99/// * `test_fn` - Async closure that runs the actual bandwidth test
100///
101/// # Errors
102///
103/// Returns [`Error`] if the test fails.
104pub async fn run_bandwidth_test<F, Fut>(
105    client: reqwest::Client,
106    server: &Server,
107    test_label: &str,
108    is_verbose: bool,
109    test_fn: F,
110) -> Result<TestRunResult, Error>
111where
112    F: FnOnce(Arc<Tracker>) -> Fut,
113    Fut: std::future::Future<Output = Result<(f64, f64, u64, Vec<f64>), Error>>,
114{
115    let progress = Arc::new(if is_verbose {
116        Tracker::new(test_label)
117    } else {
118        Tracker::with_target(test_label, indicatif::ProgressDrawTarget::hidden())
119    });
120
121    // Set up latency-under-load monitoring
122    let latency_samples = Arc::new(Mutex::new(Vec::new()));
123    let stop_signal = Arc::new(AtomicBool::new(false));
124
125    let ping_url = server.url.clone();
126    let samples_clone = Arc::clone(&latency_samples);
127    let stop_clone = Arc::clone(&stop_signal);
128    let ping_handle = tokio::spawn(async move {
129        crate::servers::measure_latency_under_load(
130            client.clone(),
131            ping_url,
132            samples_clone,
133            stop_clone,
134        )
135        .await;
136    });
137
138    // Run the actual test
139    let test_start = std::time::Instant::now();
140    let (avg, peak, total_bytes, speed_samples) = test_fn(progress).await?;
141    let duration = test_start.elapsed().as_secs_f64();
142
143    // Stop latency monitoring
144    stop_signal.store(true, std::sync::atomic::Ordering::Release);
145    let _ = ping_handle.await;
146
147    // Calculate average latency under load
148    let latency_under_load = {
149        let lock = latency_samples
150            .lock()
151            .map_err(|e| Error::context(format!("latency samples lock poisoned: {e}")))?;
152        if lock.is_empty() {
153            None
154        } else {
155            // Safe: latency sample count is small (≤100), well under 2^53.
156            Some(lock.iter().sum::<f64>() / lock.len() as f64)
157        }
158    };
159
160    Ok(TestRunResult {
161        avg_bps: avg,
162        peak_bps: peak,
163        total_bytes,
164        duration_secs: duration,
165        speed_samples,
166        latency_under_load,
167    })
168}
169
170#[cfg(test)]
171mod tests {
172    use super::*;
173
174    #[test]
175    fn test_test_run_result_structure() {
176        let result = TestRunResult {
177            avg_bps: 100_000_000.0,
178            peak_bps: 120_000_000.0,
179            total_bytes: 10_000_000,
180            duration_secs: 1.0,
181            speed_samples: vec![100_000_000.0],
182            latency_under_load: Some(15.0),
183        };
184        assert!((result.avg_bps - 100_000_000.0).abs() < f64::EPSILON);
185        assert!((result.peak_bps - 120_000_000.0).abs() < f64::EPSILON);
186    }
187
188    #[test]
189    fn test_test_run_result_default_values() {
190        let result = TestRunResult::default();
191        assert!(result.avg_bps.abs() < f64::EPSILON);
192        assert!(result.peak_bps.abs() < f64::EPSILON);
193        assert_eq!(result.total_bytes, 0);
194        assert!(result.duration_secs.abs() < f64::EPSILON);
195        assert!(result.speed_samples.is_empty());
196        assert!(result.latency_under_load.is_none());
197    }
198
199    #[test]
200    fn test_test_run_result_default_explicit() {
201        let result = TestRunResult {
202            avg_bps: 0.0,
203            peak_bps: 0.0,
204            total_bytes: 0,
205            duration_secs: 0.0,
206            speed_samples: Vec::new(),
207            latency_under_load: None,
208        };
209        assert_eq!(result, TestRunResult::default());
210    }
211
212    #[test]
213    fn test_test_run_result_with_samples() {
214        let samples = vec![50_000_000.0, 75_000_000.0, 100_000_000.0];
215        let result = TestRunResult {
216            avg_bps: 75_000_000.0,
217            peak_bps: 100_000_000.0,
218            total_bytes: 5_000_000,
219            duration_secs: 0.5,
220            speed_samples: samples.clone(),
221            latency_under_load: Some(12.0),
222        };
223        assert_eq!(result.speed_samples, samples);
224        assert_eq!(result.speed_samples.len(), 3);
225    }
226
227    #[test]
228    fn test_test_run_result_peak_greater_than_average() {
229        let result = TestRunResult {
230            avg_bps: 100_000_000.0,
231            peak_bps: 150_000_000.0,
232            total_bytes: 8_000_000,
233            duration_secs: 0.8,
234            speed_samples: vec![100_000_000.0],
235            latency_under_load: None,
236        };
237        assert!(result.peak_bps > result.avg_bps);
238    }
239
240    #[test]
241    fn test_test_metrics_impl_returns_correct_values() {
242        let result = TestRunResult {
243            avg_bps: 75_000_000.0,
244            peak_bps: 100_000_000.0,
245            total_bytes: 5_000_000,
246            duration_secs: 0.5,
247            speed_samples: vec![50_000_000.0, 75_000_000.0, 100_000_000.0],
248            latency_under_load: Some(12.0),
249        };
250        assert!((result.avg_bps() - 75_000_000.0).abs() < f64::EPSILON);
251        assert!((result.peak_bps() - 100_000_000.0).abs() < f64::EPSILON);
252        assert_eq!(result.total_bytes(), 5_000_000);
253        assert!((result.duration_secs() - 0.5).abs() < f64::EPSILON);
254        assert_eq!(result.speed_samples().len(), 3);
255        assert_eq!(result.latency_under_load(), Some(12.0));
256    }
257
258    #[test]
259    fn test_test_metrics_impl_default() {
260        let result = TestRunResult::default();
261        assert!(result.avg_bps().abs() < f64::EPSILON);
262        assert!(result.peak_bps().abs() < f64::EPSILON);
263        assert_eq!(result.total_bytes(), 0);
264        assert!(result.duration_secs().abs() < f64::EPSILON);
265        assert!(result.speed_samples().is_empty());
266        assert!(result.latency_under_load().is_none());
267    }
268}