#![allow(
clippy::cast_precision_loss,
clippy::cast_possible_truncation,
clippy::cast_sign_loss
)]
use crate::common;
use crate::error::SpeedtestError;
use crate::progress::SpeedProgress;
use crate::types::Server;
use reqwest::Client;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Instant;
const ESTIMATED_DOWNLOAD_BYTES: u64 = 15_000_000;
const SAMPLE_INTERVAL_MS: u64 = 50;
#[must_use]
pub fn extract_base_url(url: &str) -> &str {
url.strip_suffix("/upload.php").unwrap_or(url)
}
#[must_use]
pub fn build_test_url(server_url: &str, file_index: usize) -> String {
let base = extract_base_url(server_url);
let sizes = ["2000x2000", "3000x3000", "3500x3500", "4000x4000"];
let size = sizes[file_index % sizes.len()];
format!("{base}/random{size}.jpg")
}
struct StreamResult {
bytes: u64,
elapsed_secs: f64,
}
use futures_util::StreamExt;
pub async fn download_test(
client: &Client,
server: &Server,
single: bool,
progress: Arc<SpeedProgress>,
) -> Result<(f64, f64, u64, Vec<f64>), SpeedtestError> {
let concurrent_streams = common::determine_stream_count(single);
let total_bytes = Arc::new(AtomicU64::new(0));
let peak_bps = Arc::new(AtomicU64::new(0));
let speed_samples = Arc::new(Mutex::new(Vec::new()));
let start = Instant::now();
let estimated_total: u64 = ESTIMATED_DOWNLOAD_BYTES;
let last_sample_ms = Arc::new(AtomicU64::new(0));
let mut handles = Vec::new();
for _ in 0..concurrent_streams {
let client = client.clone();
let server_url = server.url.clone();
let total_ref = Arc::clone(&total_bytes);
let peak_ref = Arc::clone(&peak_bps);
let samples_ref = Arc::clone(&speed_samples);
let start_ref = start;
let prog = Arc::clone(&progress);
let throttle_ref = Arc::clone(&last_sample_ms);
let handle = tokio::spawn(async move {
let mut stream_bytes = 0u64;
for j in 0..4 {
let test_url = build_test_url(&server_url, j);
if let Ok(response) = client.get(&test_url).send().await {
let mut stream = response.bytes_stream();
while let Some(item) = stream.next().await {
if let Ok(chunk) = item {
let len = chunk.len() as u64;
stream_bytes += len;
total_ref.fetch_add(len, Ordering::Relaxed);
let elapsed_ms = start_ref.elapsed().as_millis() as u64;
let last_ms = throttle_ref.load(Ordering::Relaxed);
let should_sample = last_ms == 0
|| elapsed_ms.saturating_sub(last_ms) >= SAMPLE_INTERVAL_MS;
if should_sample {
throttle_ref.store(elapsed_ms, Ordering::Relaxed);
let total_so_far = total_ref.load(Ordering::Relaxed);
let elapsed = start_ref.elapsed().as_secs_f64();
let speed = common::calculate_bandwidth(total_so_far, elapsed);
let current_peak = peak_ref.load(Ordering::Relaxed);
if speed > current_peak as f64 {
peak_ref.store(speed as u64, Ordering::Relaxed);
}
if let Ok(mut samples) = samples_ref.lock() {
samples.push(speed);
}
let pct = (total_so_far as f64 / estimated_total as f64).min(1.0);
prog.update(speed / 1_000_000.0, pct, total_so_far);
}
}
}
}
}
StreamResult {
bytes: stream_bytes,
elapsed_secs: start_ref.elapsed().as_secs_f64(),
}
});
handles.push(handle);
}
let mut results = Vec::new();
for handle in handles {
if let Ok(result) = handle.await {
results.push(result);
}
}
if results.is_empty() {
return Ok((0.0, 0.0, 0, Vec::new()));
}
let total_bandwidth: f64 = results
.iter()
.map(|r| common::calculate_bandwidth(r.bytes, r.elapsed_secs))
.sum();
let final_total_bytes = total_bytes.load(Ordering::Relaxed);
let final_peak_speed = peak_bps.load(Ordering::Relaxed) as f64;
let avg_bandwidth = total_bandwidth / results.len() as f64;
let samples = speed_samples.lock().unwrap().to_vec();
Ok((avg_bandwidth, final_peak_speed, final_total_bytes, samples))
}
#[cfg(test)]
mod tests {
use crate::common;
use super::*;
#[test]
fn test_download_bandwidth_calculation() {
let result = common::calculate_bandwidth(10_000_000, 2.0);
assert_eq!(result, 40_000_000.0);
}
#[test]
fn test_download_bandwidth_zero_elapsed() {
let result = common::calculate_bandwidth(10_000_000, 0.0);
assert_eq!(result, 0.0);
}
#[test]
fn test_download_concurrent_streams_single() {
assert_eq!(common::determine_stream_count(true), 1);
}
#[test]
fn test_download_concurrent_streams_multiple() {
assert_eq!(common::determine_stream_count(false), 4);
}
#[test]
fn test_download_url_generation() {
let server_url = "http://server.example.com/speedtest/upload.php";
let test_url = build_test_url(server_url, 0);
assert_eq!(
test_url,
"http://server.example.com/speedtest/random2000x2000.jpg"
);
}
#[test]
fn test_download_url_generation_cycles() {
let server_url = "http://server.example.com/speedtest/upload.php";
let url_0 = build_test_url(server_url, 0);
let url_4 = build_test_url(server_url, 4);
assert_eq!(url_0, url_4);
}
#[test]
fn test_download_url_generation_all_sizes() {
let server_url = "http://server.example.com/speedtest/upload.php";
let expected = [
"http://server.example.com/speedtest/random2000x2000.jpg",
"http://server.example.com/speedtest/random3000x3000.jpg",
"http://server.example.com/speedtest/random3500x3500.jpg",
"http://server.example.com/speedtest/random4000x4000.jpg",
];
for (i, expected_url) in expected.iter().enumerate() {
assert_eq!(build_test_url(server_url, i), *expected_url);
}
}
#[test]
fn test_extract_base_url() {
let url = "http://server.example.com:8080/speedtest/upload.php";
assert_eq!(
extract_base_url(url),
"http://server.example.com:8080/speedtest"
);
}
#[test]
fn test_extract_base_url_no_suffix() {
let url = "http://server.example.com/speedtest";
assert_eq!(extract_base_url(url), "http://server.example.com/speedtest");
}
#[test]
fn test_extract_base_url_different_path() {
let url = "https://cdn.speedtest.net/upload.php";
assert_eq!(extract_base_url(url), "https://cdn.speedtest.net");
}
#[test]
fn test_estimated_download_bytes_constant() {
const _: () = assert!(ESTIMATED_DOWNLOAD_BYTES > 10_000_000);
const _: () = assert!(ESTIMATED_DOWNLOAD_BYTES < 20_000_000);
}
#[test]
fn test_sample_interval_constant() {
const _: () = assert!(SAMPLE_INTERVAL_MS == 50);
}
}