netspeed_cli/domain/
measurement.rs1use crate::error::Error;
7use crate::progress::Tracker;
8use crate::servers::measure_latency_under_load;
9use crate::task_runner::TestRunResult;
10use crate::types::Server;
11use std::sync::atomic::{AtomicBool, Ordering};
12use std::sync::{Arc, Mutex};
13
14pub async fn run_bandwidth_test<F, Fut>(
34 client: reqwest::Client,
35 server: &Server,
36 test_label: &str,
37 is_verbose: bool,
38 test_fn: F,
39) -> Result<TestRunResult, Error>
40where
41 F: FnOnce(Arc<Tracker>) -> Fut,
42 Fut: std::future::Future<Output = Result<(f64, f64, u64, Vec<f64>), Error>>,
43{
44 let progress = Arc::new(if is_verbose {
45 Tracker::new(test_label)
46 } else {
47 Tracker::with_target(test_label, indicatif::ProgressDrawTarget::hidden())
48 });
49
50 let latency_samples = Arc::new(Mutex::new(Vec::new()));
51 let stop_signal = Arc::new(AtomicBool::new(false));
52
53 let ping_url = server.url.clone();
54 let samples_clone = Arc::clone(&latency_samples);
55 let stop_clone = Arc::clone(&stop_signal);
56 let ping_handle = tokio::spawn(async move {
57 measure_latency_under_load(client.clone(), ping_url, samples_clone, stop_clone).await;
58 });
59
60 let test_start = std::time::Instant::now();
61 let (avg, peak, total_bytes, speed_samples) = test_fn(progress).await?;
62 let duration = test_start.elapsed().as_secs_f64();
63
64 stop_signal.store(true, Ordering::Release);
65 let _ = ping_handle.await;
66
67 let latency_under_load = {
68 let lock = latency_samples
69 .lock()
70 .map_err(|e| Error::context(format!("latency samples lock poisoned: {e}")))?;
71 if lock.is_empty() {
72 None
73 } else {
74 Some(lock.iter().sum::<f64>() / lock.len() as f64)
75 }
76 };
77
78 Ok(TestRunResult {
79 avg_bps: avg,
80 peak_bps: peak,
81 total_bytes,
82 duration_secs: duration,
83 speed_samples,
84 latency_under_load,
85 })
86}
87
88#[cfg(test)]
89mod tests {
90 use super::*;
91
92 #[test]
93 fn test_test_run_result_structure() {
94 let result = TestRunResult {
95 avg_bps: 100_000_000.0,
96 peak_bps: 120_000_000.0,
97 total_bytes: 10_000_000,
98 duration_secs: 1.0,
99 speed_samples: vec![100_000_000.0],
100 latency_under_load: Some(15.0),
101 };
102 assert!((result.avg_bps - 100_000_000.0).abs() < f64::EPSILON);
103 assert!((result.peak_bps - 120_000_000.0).abs() < f64::EPSILON);
104 }
105
106 #[test]
107 fn test_test_run_result_default_values() {
108 let result = TestRunResult::default();
109 assert!(result.avg_bps.abs() < f64::EPSILON);
110 assert!(result.peak_bps.abs() < f64::EPSILON);
111 assert_eq!(result.total_bytes, 0);
112 assert!(result.duration_secs.abs() < f64::EPSILON);
113 assert!(result.speed_samples.is_empty());
114 assert!(result.latency_under_load.is_none());
115 }
116
117 #[test]
118 fn test_test_run_result_default_explicit() {
119 let result = TestRunResult {
120 avg_bps: 0.0,
121 peak_bps: 0.0,
122 total_bytes: 0,
123 duration_secs: 0.0,
124 speed_samples: Vec::new(),
125 latency_under_load: None,
126 };
127 assert_eq!(result, TestRunResult::default());
128 }
129
130 #[test]
131 fn test_test_run_result_with_samples() {
132 let samples = vec![50_000_000.0, 75_000_000.0, 100_000_000.0];
133 let result = TestRunResult {
134 avg_bps: 75_000_000.0,
135 peak_bps: 100_000_000.0,
136 total_bytes: 5_000_000,
137 duration_secs: 0.5,
138 speed_samples: samples.clone(),
139 latency_under_load: Some(12.0),
140 };
141 assert_eq!(result.speed_samples, samples);
142 assert_eq!(result.speed_samples.len(), 3);
143 }
144
145 #[test]
146 fn test_test_run_result_peak_greater_than_average() {
147 let result = TestRunResult {
148 avg_bps: 100_000_000.0,
149 peak_bps: 150_000_000.0,
150 total_bytes: 8_000_000,
151 duration_secs: 0.8,
152 speed_samples: vec![100_000_000.0],
153 latency_under_load: None,
154 };
155 assert!(result.peak_bps > result.avg_bps);
156 }
157}