pub mod hardware;
use std::{boxed::Box, path::Path};
use clap::Parser;
use comfy_table::{Row, Table};
use log::{error, info, warn};
use sc_cli::Result;
use sc_sysinfo::{
benchmark_cpu, benchmark_disk_random_writes, benchmark_disk_sequential_writes,
benchmark_memory, benchmark_sr25519_verify, ExecutionLimit, HwBench, Metric, Requirement,
Requirements, Throughput,
};
pub use hardware::SUBSTRATE_REFERENCE_HARDWARE;
#[derive(Debug, Parser)]
pub struct MachineCmd {
#[arg(long, short = 'd')]
pub base_path: Option<String>,
#[arg(long, short = 'f')]
pub full: bool,
#[arg(long)]
pub allow_fail: bool,
#[arg(long, default_value_t = 10.0, value_name = "PERCENT")]
pub tolerance: f64,
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub verify_duration: f32,
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub hash_duration: f32,
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub memory_duration: f32,
#[arg(long, default_value_t = 5.0, value_name = "SECONDS")]
pub disk_duration: f32,
}
#[derive(Debug)]
pub struct BenchResult {
passed: bool,
score: Throughput,
rel_score: f64,
}
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
pub enum Error {
#[error("One of the benchmarks had a score that was lower than its requirement")]
UnmetRequirement,
#[error("Benchmark results are off by at least factor 100")]
BadResults,
}
impl MachineCmd {
pub fn run_benchmark(&self, requirement: &Requirement, dir: &Path) -> Result<BenchResult> {
let score = self.measure(&requirement.metric, dir)?;
let rel_score = score.as_bytes() / requirement.minimum.as_bytes();
if rel_score >= 100.0 || rel_score <= 0.01 {
self.check_failed(Error::BadResults)?;
}
let passed = rel_score >= (1.0 - (self.tolerance / 100.0));
Ok(BenchResult {
passed,
score,
rel_score,
})
}
fn measure(&self, metric: &Metric, dir: &Path) -> Result<Throughput> {
let verify_limit = ExecutionLimit::from_secs_f32(self.verify_duration);
let disk_limit = ExecutionLimit::from_secs_f32(self.disk_duration);
let hash_limit = ExecutionLimit::from_secs_f32(self.hash_duration);
let memory_limit = ExecutionLimit::from_secs_f32(self.memory_duration);
let score = match metric {
Metric::Blake2256 => benchmark_cpu(hash_limit),
Metric::Sr25519Verify => benchmark_sr25519_verify(verify_limit),
Metric::MemCopy => benchmark_memory(memory_limit),
Metric::DiskSeqWrite => benchmark_disk_sequential_writes(disk_limit, dir)?,
Metric::DiskRndWrite => benchmark_disk_random_writes(disk_limit, dir)?,
};
Ok(score)
}
pub fn print_full_table(&self, dir: &Path) -> Result<()> {
info!("Running full machine benchmarks...");
let requirements = &SUBSTRATE_REFERENCE_HARDWARE.clone();
let mut results = Vec::new();
for requirement in &requirements.0 {
let result = self.run_benchmark(requirement, &dir)?;
results.push(result);
}
self.print_summary(requirements.clone(), results)?;
Ok(())
}
pub fn print_summary(
&self,
requirements: Requirements,
results: Vec<BenchResult>,
) -> Result<()> {
let mut table = Table::new();
table.set_header(["Category", "Function", "Score", "Minimum", "Result"]);
let (mut passed, mut failed) = (0, 0);
for (requirement, result) in requirements.0.iter().zip(results.iter()) {
if result.passed {
passed += 1
} else {
failed += 1
}
table.add_row(result.to_row(requirement));
}
info!(
"\n{}\nFrom {} benchmarks in total, {} passed and {} failed ({:.0?}% fault tolerance).",
table,
passed + failed,
passed,
failed,
self.tolerance
);
if failed != 0 {
info!("The hardware fails to meet the requirements");
self.check_failed(Error::UnmetRequirement)?;
} else {
info!("The hardware meets the requirements ");
}
Ok(())
}
fn check_failed(&self, e: Error) -> Result<()> {
if !self.allow_fail {
error!("Failing since --allow-fail is not set");
Err(sc_cli::Error::Application(Box::new(e)))
} else {
warn!("Ignoring error since --allow-fail is set: {:?}", e);
Ok(())
}
}
pub fn validate_args(&self) -> Result<()> {
if self.tolerance > 100.0 || self.tolerance < 0.0 {
return Err("The --tolerance argument is out of range".into());
}
Ok(())
}
}
impl BenchResult {
fn to_row(&self, req: &Requirement) -> Row {
let passed = if self.passed { "✅ Pass" } else { "❌ Fail" };
vec![
req.metric.category().into(),
req.metric.name().into(),
format!("{}", self.score),
format!("{}", req.minimum),
format!("{} ({: >5.1?} %)", passed, self.rel_score * 100.0),
]
.into()
}
}
fn status_emoji(s: bool) -> String {
if s {
"✅".into()
} else {
"❌".into()
}
}
pub fn check_hardware(hwbench: &HwBench) -> bool {
info!("Performing quick hardware check...");
let req = &SUBSTRATE_REFERENCE_HARDWARE;
let mut cpu_ok = true;
let mut mem_ok = true;
let mut dsk_seq_write_ok = true;
let mut dsk_rnd_write_ok = true;
for requirement in req.0.iter() {
match requirement.metric {
Metric::Blake2256 => {
if requirement.minimum > hwbench.cpu_hashrate_score {
cpu_ok = false;
}
info!(
"🏁 CPU score: {} ({})",
hwbench.cpu_hashrate_score,
format!(
"{} Blake2256: expected minimum {}",
status_emoji(cpu_ok),
requirement.minimum
)
);
}
Metric::MemCopy => {
if requirement.minimum > hwbench.memory_memcpy_score {
mem_ok = false;
}
info!(
"🏁 Memory score: {} ({})",
hwbench.memory_memcpy_score,
format!(
"{} MemCopy: expected minimum {}",
status_emoji(mem_ok),
requirement.minimum
)
);
}
Metric::DiskSeqWrite => {
if let Some(score) = hwbench.disk_sequential_write_score {
if requirement.minimum > score {
dsk_seq_write_ok = false;
}
info!(
"🏁 Disk score (seq. writes): {} ({})",
score,
format!(
"{} DiskSeqWrite: expected minimum {}",
status_emoji(dsk_seq_write_ok),
requirement.minimum
)
);
}
}
Metric::DiskRndWrite => {
if let Some(score) = hwbench.disk_random_write_score {
if requirement.minimum > score {
dsk_rnd_write_ok = false;
}
info!(
"🏁 Disk score (rand. writes): {} ({})",
score,
format!(
"{} DiskRndWrite: expected minimum {}",
status_emoji(dsk_rnd_write_ok),
requirement.minimum
)
);
}
}
Metric::Sr25519Verify => {}
}
}
cpu_ok && mem_ok && dsk_seq_write_ok && dsk_rnd_write_ok
}