use serde::{Deserialize, Serialize};
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
pub mod cpu;
pub mod gpu;
pub mod io;
pub mod memory;
pub mod network;
pub mod utils;
pub use cpu::{CpuBenchmarkResult, DEFAULT_MAX_PRIME, run_cpu_benchmark};
pub use gpu::{
DEFAULT_AMD_GPU_MEMORY, DEFAULT_INTEL_GPU_MEMORY, DEFAULT_NVIDIA_GPU_MEMORY,
DEFAULT_UNKNOWN_GPU_MEMORY, run_gpu_benchmark,
};
pub use io::{IoBenchmarkResult, IoTestMode, run_io_benchmark};
pub use memory::run_memory_benchmark;
pub use network::run_network_benchmark;
pub use utils::{get_io_stats, get_network_stats, run_and_monitor_command};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BenchmarkProfile {
pub job_id: String, pub execution_mode: String, pub duration_secs: u64,
pub timestamp: u64, pub success: bool, pub cpu_details: Option<CpuBenchmarkResult>, pub io_details: Option<IoBenchmarkResult>, pub memory_details: Option<MemoryBenchmarkResult>, pub network_details: Option<NetworkBenchmarkResult>, pub gpu_details: Option<GpuBenchmarkResult>, pub storage_details: Option<StorageBenchmarkResult>, }
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct MemoryBenchmarkResult {
pub avg_memory_mb: f32,
pub peak_memory_mb: f32,
pub block_size_kb: u64, pub total_size_mb: u64, pub operations_per_second: f32, pub transfer_rate_mb_s: f32, pub access_mode: MemoryAccessMode, pub operation_type: MemoryOperationType, pub latency_ns: f32, pub duration_ms: u64, }
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum MemoryAccessMode {
Sequential,
Random,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
pub enum MemoryOperationType {
Read,
Write,
None,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct NetworkBenchmarkResult {
pub network_rx_mb: f32,
pub network_tx_mb: f32,
pub download_speed_mbps: f32, pub upload_speed_mbps: f32, pub latency_ms: f32, pub duration_ms: u64, pub packet_loss_percent: f32, pub jitter_ms: f32, }
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GpuBenchmarkResult {
pub gpu_available: bool, pub gpu_memory_mb: f32, pub gpu_model: String, pub gpu_frequency_mhz: f32, }
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct StorageBenchmarkResult {
pub storage_available_gb: f32, }
#[derive(Debug, Clone)]
pub struct BenchmarkRunConfig {
pub command: String, pub args: Vec<String>, pub job_id: String, pub mode: String, pub max_duration: Duration, pub sample_interval: Duration, pub run_cpu_test: bool, pub run_memory_test: bool, pub run_io_test: bool, pub run_network_test: bool, pub run_gpu_test: bool, }
impl Default for BenchmarkRunConfig {
fn default() -> Self {
Self {
command: "echo".to_string(),
args: vec!["benchmark".to_string()],
job_id: "benchmark".to_string(),
mode: "native".to_string(),
max_duration: Duration::from_secs(30),
sample_interval: Duration::from_millis(500),
run_cpu_test: true,
run_memory_test: true,
run_io_test: true,
run_network_test: true,
run_gpu_test: true,
}
}
}
#[allow(clippy::too_many_arguments)]
pub fn run_benchmark_suite(
job_id: String,
mode: String,
max_duration: Duration,
sample_interval: Duration,
run_cpu_test: bool,
run_memory_test: bool,
run_io_test: bool,
run_network_test: bool,
run_gpu_test: bool,
) -> crate::error::Result<BenchmarkProfile> {
use blueprint_core::{info, warn};
info!(
"Starting benchmark suite for job '{}' with max_duration={}s",
job_id,
max_duration.as_secs()
);
let config = BenchmarkRunConfig {
command: "".to_string(),
args: vec![],
job_id: job_id.clone(),
mode: mode.clone(),
max_duration,
sample_interval,
run_cpu_test,
run_memory_test,
run_io_test,
run_network_test,
run_gpu_test,
};
let mut profile = BenchmarkProfile {
job_id: job_id.clone(),
execution_mode: mode.clone(),
duration_secs: 0,
timestamp: SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs(),
success: true,
cpu_details: None,
io_details: None,
memory_details: None,
network_details: None,
gpu_details: None,
storage_details: None,
};
if let Ok(output) = std::process::Command::new("df").args(["-h", "/"]).output() {
if let Ok(output_str) = String::from_utf8(output.stdout) {
if let Some(line) = output_str.lines().nth(1) {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 4 {
let avail = parts[3];
if avail.ends_with('G') {
if let Ok(gb) = avail.trim_end_matches('G').parse::<f32>() {
profile.storage_details = Some(StorageBenchmarkResult {
storage_available_gb: gb,
});
}
} else if avail.ends_with('T') {
if let Ok(tb) = avail.trim_end_matches('T').parse::<f32>() {
profile.storage_details = Some(StorageBenchmarkResult {
storage_available_gb: tb * 1024.0,
});
}
} else if avail.ends_with('M') {
if let Ok(mb) = avail.trim_end_matches('M').parse::<f32>() {
profile.storage_details = Some(StorageBenchmarkResult {
storage_available_gb: mb / 1024.0,
});
}
}
}
}
}
}
let start_time = Instant::now();
if run_cpu_test {
info!("Running CPU benchmark test");
match run_cpu_benchmark(&config) {
Ok(cpu_details) => {
profile.cpu_details = Some(cpu_details);
info!("CPU benchmark completed successfully");
}
Err(e) => {
warn!("CPU benchmark failed: {}", e);
}
}
}
if run_memory_test {
match run_memory_benchmark(&config) {
Ok(memory_details) => {
profile.memory_details = Some(memory_details);
}
Err(e) => {
warn!("Memory benchmark failed: {}", e);
}
}
}
if run_io_test {
match run_io_benchmark(&config) {
Ok(io_result) => {
profile.io_details = Some(io_result);
}
Err(e) => {
warn!("I/O benchmark failed: {}", e);
}
}
}
if run_network_test {
match run_network_benchmark(&config) {
Ok(network_details) => {
profile.network_details = Some(network_details);
}
Err(e) => {
warn!("Network benchmark failed: {}", e);
}
}
}
if run_gpu_test {
match run_gpu_benchmark(&config) {
Ok(gpu_details) => {
profile.gpu_details = Some(gpu_details);
}
Err(e) => {
warn!("GPU benchmark failed: {}", e);
profile.gpu_details = Some(GpuBenchmarkResult {
gpu_available: false,
gpu_memory_mb: 0.0,
gpu_model: "".to_string(),
gpu_frequency_mhz: 0.0,
});
}
}
}
profile.duration_secs = start_time.elapsed().as_secs();
info!("Benchmark suite completed: {:?}", profile);
Ok(profile)
}
pub fn run_benchmark(config: BenchmarkRunConfig) -> crate::error::Result<BenchmarkProfile> {
utils::run_and_monitor_command(&config)
}