pezsc_sysinfo/
sysinfo.rs

1// This file is part of Bizinikiwi.
2
3// Copyright (C) Parity Technologies (UK) Ltd. and Dijital Kurdistan Tech Institute
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19use crate::{ExecutionLimit, HwBench};
20
21use pezsc_telemetry::SysInfo;
22use pezsp_core::{sr25519, Pair};
23use pezsp_io::crypto::sr25519_verify;
24
25use core::f64;
26use derive_more::From;
27use rand::{seq::SliceRandom, Rng, RngCore};
28use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
29use std::{
30	borrow::Cow,
31	fmt::{self, Display, Formatter},
32	fs::File,
33	io::{Seek, SeekFrom, Write},
34	ops::{Deref, DerefMut},
35	path::{Path, PathBuf},
36	sync::{Arc, Barrier},
37	time::{Duration, Instant},
38};
39
40/// A single hardware metric.
41#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
42pub enum Metric {
43	/// SR25519 signature verification.
44	Sr25519Verify,
45	/// Blake2-256 hashing algorithm.
46	Blake2256,
47	/// Blake2-256 hashing algorithm executed in parallel
48	Blake2256Parallel { num_cores: usize },
49	/// Copying data in RAM.
50	MemCopy,
51	/// Disk sequential write.
52	DiskSeqWrite,
53	/// Disk random write.
54	DiskRndWrite,
55}
56
57/// Describes a checking failure for the hardware requirements.
58#[derive(Debug, Clone, Copy, PartialEq)]
59pub struct CheckFailure {
60	/// The metric that failed the check.
61	pub metric: Metric,
62	/// The expected minimum value.
63	pub expected: Throughput,
64	/// The measured value.
65	pub found: Throughput,
66}
67
68/// A list of metrics that failed to meet the minimum hardware requirements.
69#[derive(Debug, Clone, PartialEq, From)]
70pub struct CheckFailures(pub Vec<CheckFailure>);
71
72impl Display for CheckFailures {
73	fn fmt(&self, formatter: &mut Formatter) -> fmt::Result {
74		write!(formatter, "Failed checks: ")?;
75		for failure in &self.0 {
76			write!(
77				formatter,
78				"{}(expected: {}, found: {}), ",
79				failure.metric.name(),
80				failure.expected,
81				failure.found
82			)?
83		}
84		Ok(())
85	}
86}
87
88impl Metric {
89	/// The category of the metric.
90	pub fn category(&self) -> &'static str {
91		match self {
92			Self::Sr25519Verify | Self::Blake2256 | Self::Blake2256Parallel { .. } => "CPU",
93			Self::MemCopy => "Memory",
94			Self::DiskSeqWrite | Self::DiskRndWrite => "Disk",
95		}
96	}
97
98	/// The name of the metric. It is always prefixed by the [`self.category()`].
99	pub fn name(&self) -> Cow<'static, str> {
100		match self {
101			Self::Sr25519Verify => Cow::Borrowed("SR25519-Verify"),
102			Self::Blake2256 => Cow::Borrowed("BLAKE2-256"),
103			Self::Blake2256Parallel { num_cores } => {
104				Cow::Owned(format!("BLAKE2-256-Parallel-{}", num_cores))
105			},
106			Self::MemCopy => Cow::Borrowed("Copy"),
107			Self::DiskSeqWrite => Cow::Borrowed("Seq Write"),
108			Self::DiskRndWrite => Cow::Borrowed("Rnd Write"),
109		}
110	}
111}
112
113/// The unit in which the [`Throughput`] (bytes per second) is denoted.
114pub enum Unit {
115	GiBs,
116	MiBs,
117	KiBs,
118}
119
120impl fmt::Display for Unit {
121	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
122		f.write_str(match self {
123			Unit::GiBs => "GiBs",
124			Unit::MiBs => "MiBs",
125			Unit::KiBs => "KiBs",
126		})
127	}
128}
129
130/// Throughput as measured in bytes per second.
131#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
132pub struct Throughput(f64);
133
134const KIBIBYTE: f64 = (1 << 10) as f64;
135const MEBIBYTE: f64 = (1 << 20) as f64;
136const GIBIBYTE: f64 = (1 << 30) as f64;
137
138impl Throughput {
139	/// Construct [`Self`] from kibibyte/s.
140	pub fn from_kibs(kibs: f64) -> Throughput {
141		Throughput(kibs * KIBIBYTE)
142	}
143
144	/// Construct [`Self`] from mebibyte/s.
145	pub fn from_mibs(mibs: f64) -> Throughput {
146		Throughput(mibs * MEBIBYTE)
147	}
148
149	/// Construct [`Self`] from gibibyte/s.
150	pub fn from_gibs(gibs: f64) -> Throughput {
151		Throughput(gibs * GIBIBYTE)
152	}
153
154	/// [`Self`] as number of byte/s.
155	pub fn as_bytes(&self) -> f64 {
156		self.0
157	}
158
159	/// [`Self`] as number of kibibyte/s.
160	pub fn as_kibs(&self) -> f64 {
161		self.0 / KIBIBYTE
162	}
163
164	/// [`Self`] as number of mebibyte/s.
165	pub fn as_mibs(&self) -> f64 {
166		self.0 / MEBIBYTE
167	}
168
169	/// [`Self`] as number of gibibyte/s.
170	pub fn as_gibs(&self) -> f64 {
171		self.0 / GIBIBYTE
172	}
173
174	/// Normalizes [`Self`] to use the largest unit possible.
175	pub fn normalize(&self) -> (f64, Unit) {
176		let bs = self.0;
177
178		if bs >= GIBIBYTE {
179			(self.as_gibs(), Unit::GiBs)
180		} else if bs >= MEBIBYTE {
181			(self.as_mibs(), Unit::MiBs)
182		} else {
183			(self.as_kibs(), Unit::KiBs)
184		}
185	}
186}
187
188impl fmt::Display for Throughput {
189	fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
190		let (value, unit) = self.normalize();
191		write!(f, "{:.2?} {}", value, unit)
192	}
193}
194
195/// Serializes `Throughput` and uses MiBs as the unit.
196pub fn serialize_throughput<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
197where
198	S: Serializer,
199{
200	serializer.serialize_u64(throughput.as_mibs() as u64)
201}
202
203/// Serializes `Option<Throughput>` and uses MiBs as the unit.
204pub fn serialize_throughput_option<S>(
205	maybe_throughput: &Option<Throughput>,
206	serializer: S,
207) -> Result<S::Ok, S::Error>
208where
209	S: Serializer,
210{
211	if let Some(throughput) = maybe_throughput {
212		return serializer.serialize_some(&(throughput.as_mibs() as u64));
213	}
214	serializer.serialize_none()
215}
216
217/// Serializes throughput into MiBs and represents it as `f64`.
218fn serialize_throughput_as_f64<S>(throughput: &Throughput, serializer: S) -> Result<S::Ok, S::Error>
219where
220	S: Serializer,
221{
222	serializer.serialize_f64(throughput.as_mibs())
223}
224
225struct ThroughputVisitor;
226impl<'de> Visitor<'de> for ThroughputVisitor {
227	type Value = Throughput;
228
229	fn expecting(&self, formatter: &mut Formatter) -> fmt::Result {
230		formatter.write_str("A value that is a f64.")
231	}
232
233	fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
234	where
235		E: serde::de::Error,
236	{
237		Ok(Throughput::from_mibs(value))
238	}
239}
240
241fn deserialize_throughput<'de, D>(deserializer: D) -> Result<Throughput, D::Error>
242where
243	D: Deserializer<'de>,
244{
245	Ok(deserializer.deserialize_f64(ThroughputVisitor))?
246}
247
248/// Multiple requirements for the hardware.
249#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
250pub struct Requirements(pub Vec<Requirement>);
251
252/// A single requirement for the hardware.
253#[derive(Deserialize, Serialize, Debug, Clone, Copy, PartialEq)]
254pub struct Requirement {
255	/// The metric to measure.
256	pub metric: Metric,
257	/// The minimal throughput that needs to be archived for this requirement.
258	#[serde(
259		serialize_with = "serialize_throughput_as_f64",
260		deserialize_with = "deserialize_throughput"
261	)]
262	pub minimum: Throughput,
263	/// Check this requirement only for relay chain validator nodes.
264	#[serde(default)]
265	#[serde(skip_serializing_if = "core::ops::Not::not")]
266	pub validator_only: bool,
267}
268
269#[inline(always)]
270pub(crate) fn benchmark<E>(
271	name: &str,
272	size: usize,
273	max_iterations: usize,
274	max_duration: Duration,
275	mut run: impl FnMut() -> Result<(), E>,
276) -> Result<Throughput, E> {
277	// Run the benchmark once as a warmup to get the code into the L1 cache.
278	run()?;
279
280	// Then run it multiple times and average the result.
281	let timestamp = Instant::now();
282	let mut elapsed = Duration::default();
283	let mut count = 0;
284	for _ in 0..max_iterations {
285		run()?;
286
287		count += 1;
288		elapsed = timestamp.elapsed();
289
290		if elapsed >= max_duration {
291			break;
292		}
293	}
294
295	let score = Throughput::from_kibs((size * count) as f64 / (elapsed.as_secs_f64() * 1024.0));
296	log::trace!(
297		"Calculated {} of {} in {} iterations in {}ms",
298		name,
299		score,
300		count,
301		elapsed.as_millis()
302	);
303	Ok(score)
304}
305
306/// Gathers information about node's hardware and software.
307pub fn gather_sysinfo() -> SysInfo {
308	#[allow(unused_mut)]
309	let mut sysinfo = SysInfo {
310		cpu: None,
311		memory: None,
312		core_count: None,
313		linux_kernel: None,
314		linux_distro: None,
315		is_virtual_machine: None,
316	};
317
318	#[cfg(target_os = "linux")]
319	crate::sysinfo_linux::gather_linux_sysinfo(&mut sysinfo);
320
321	#[cfg(target_os = "freebsd")]
322	crate::sysinfo_freebsd::gather_freebsd_sysinfo(&mut sysinfo);
323
324	sysinfo
325}
326
327#[inline(never)]
328fn clobber_slice<T>(slice: &mut [T]) {
329	assert!(!slice.is_empty());
330
331	// Discourage the compiler from optimizing out our benchmarks.
332	//
333	// Volatile reads and writes are guaranteed to not be elided nor reordered,
334	// so we can use them to effectively clobber a piece of memory and prevent
335	// the compiler from optimizing out our technically unnecessary code.
336	//
337	// This is not totally bulletproof in theory, but should work in practice.
338	//
339	// SAFETY: We've checked that the slice is not empty, so reading and writing
340	//         its first element is always safe.
341	unsafe {
342		let value = std::ptr::read_volatile(slice.as_ptr());
343		std::ptr::write_volatile(slice.as_mut_ptr(), value);
344	}
345}
346
347#[inline(never)]
348fn clobber_value<T>(input: &mut T) {
349	// Look into `clobber_slice` for a comment.
350	unsafe {
351		let value = std::ptr::read_volatile(input);
352		std::ptr::write_volatile(input, value);
353	}
354}
355
356/// A default [`ExecutionLimit`] that can be used to call [`benchmark_cpu`].
357pub const DEFAULT_CPU_EXECUTION_LIMIT: ExecutionLimit =
358	ExecutionLimit::Both { max_iterations: 4 * 1024, max_duration: Duration::from_millis(100) };
359
360// This benchmarks the single core CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes
361// per second.
362pub fn benchmark_cpu(limit: ExecutionLimit) -> Throughput {
363	benchmark_cpu_parallelism(limit, 1)
364}
365
366// This benchmarks the entire CPU speed as measured by calculating BLAKE2b-256 hashes, in bytes per
367// second. It spawns multiple threads to measure the throughput of the entire CPU and averages the
368// score obtained by each thread. If we have at least `refhw_num_cores` available then the
369// average throughput should be relatively close to the single core performance as measured by
370// calling this function with refhw_num_cores equal to 1.
371pub fn benchmark_cpu_parallelism(limit: ExecutionLimit, refhw_num_cores: usize) -> Throughput {
372	// In general the results of this benchmark are somewhat sensitive to how much
373	// data we hash at the time. The smaller this is the *less* B/s we can hash,
374	// the bigger this is the *more* B/s we can hash, up until a certain point
375	// where we can achieve roughly ~100% of what the hasher can do. If we'd plot
376	// this on a graph with the number of bytes we want to hash on the X axis
377	// and the speed in B/s on the Y axis then we'd essentially see it grow
378	// logarithmically.
379	//
380	// In practice however we might not always have enough data to hit the maximum
381	// possible speed that the hasher can achieve, so the size set here should be
382	// picked in such a way as to still measure how fast the hasher is at hashing,
383	// but without hitting its theoretical maximum speed.
384	const SIZE: usize = 32 * 1024;
385
386	let ready_to_run_benchmark = Arc::new(Barrier::new(refhw_num_cores));
387	let mut benchmark_threads = Vec::new();
388
389	// Spawn a thread for each expected core and average the throughput for each of them.
390	for _ in 0..refhw_num_cores {
391		let ready_to_run_benchmark = ready_to_run_benchmark.clone();
392
393		let handle = std::thread::spawn(move || {
394			let mut buffer = Vec::new();
395			buffer.resize(SIZE, 0x66);
396			let mut hash = Default::default();
397
398			let run = || -> Result<(), ()> {
399				clobber_slice(&mut buffer);
400				hash = pezsp_crypto_hashing::blake2_256(&buffer);
401				clobber_slice(&mut hash);
402
403				Ok(())
404			};
405			ready_to_run_benchmark.wait();
406			benchmark("CPU score", SIZE, limit.max_iterations(), limit.max_duration(), run)
407				.expect("benchmark cannot fail; qed")
408		});
409		benchmark_threads.push(handle);
410	}
411
412	let average_score = benchmark_threads
413		.into_iter()
414		.map(|thread| thread.join().map(|throughput| throughput.as_kibs()).unwrap_or(0.0))
415		.sum::<f64>()
416		/ refhw_num_cores as f64;
417	Throughput::from_kibs(average_score)
418}
419
420/// A default [`ExecutionLimit`] that can be used to call [`benchmark_memory`].
421pub const DEFAULT_MEMORY_EXECUTION_LIMIT: ExecutionLimit =
422	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(100) };
423
424// This benchmarks the effective `memcpy` memory bandwidth available in bytes per second.
425//
426// It doesn't technically measure the absolute maximum memory bandwidth available,
427// but that's fine, because real code most of the time isn't optimized to take
428// advantage of the full memory bandwidth either.
429pub fn benchmark_memory(limit: ExecutionLimit) -> Throughput {
430	// Ideally this should be at least as big as the CPU's L3 cache,
431	// and it should be big enough so that the `memcpy` takes enough
432	// time to be actually measurable.
433	//
434	// As long as it's big enough increasing it further won't change
435	// the benchmark's results.
436	const SIZE: usize = 64 * 1024 * 1024;
437
438	let mut src = Vec::new();
439	let mut dst = Vec::new();
440
441	// Prefault the pages; we want to measure the memory bandwidth,
442	// not how fast the kernel can supply us with fresh memory pages.
443	src.resize(SIZE, 0x66);
444	dst.resize(SIZE, 0x77);
445
446	let run = || -> Result<(), ()> {
447		clobber_slice(&mut src);
448		clobber_slice(&mut dst);
449
450		// SAFETY: Both vectors are of the same type and of the same size,
451		//         so copying data between them is safe.
452		unsafe {
453			// We use `memcpy` directly here since `copy_from_slice` isn't actually
454			// guaranteed to be turned into a `memcpy`.
455			libc::memcpy(dst.as_mut_ptr().cast(), src.as_ptr().cast(), SIZE);
456		}
457
458		clobber_slice(&mut dst);
459		clobber_slice(&mut src);
460
461		Ok(())
462	};
463
464	benchmark("memory score", SIZE, limit.max_iterations(), limit.max_duration(), run)
465		.expect("benchmark cannot fail; qed")
466}
467
468struct TemporaryFile {
469	fp: Option<File>,
470	path: PathBuf,
471}
472
473impl Drop for TemporaryFile {
474	fn drop(&mut self) {
475		let _ = self.fp.take();
476
477		// Remove the file.
478		//
479		// This has to be done *after* the benchmark,
480		// otherwise it changes the results as the data
481		// doesn't actually get properly flushed to the disk,
482		// since the file's not there anymore.
483		if let Err(error) = std::fs::remove_file(&self.path) {
484			log::warn!("Failed to remove the file used for the disk benchmark: {}", error);
485		}
486	}
487}
488
489impl Deref for TemporaryFile {
490	type Target = File;
491	fn deref(&self) -> &Self::Target {
492		self.fp.as_ref().expect("`fp` is None only during `drop`")
493	}
494}
495
496impl DerefMut for TemporaryFile {
497	fn deref_mut(&mut self) -> &mut Self::Target {
498		self.fp.as_mut().expect("`fp` is None only during `drop`")
499	}
500}
501
502fn rng() -> rand_pcg::Pcg64 {
503	rand_pcg::Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96)
504}
505
506fn random_data(size: usize) -> Vec<u8> {
507	let mut buffer = Vec::new();
508	buffer.resize(size, 0);
509	rng().fill(&mut buffer[..]);
510	buffer
511}
512
513/// A default [`ExecutionLimit`] that can be used to call [`benchmark_disk_sequential_writes`]
514/// and [`benchmark_disk_random_writes`].
515pub const DEFAULT_DISK_EXECUTION_LIMIT: ExecutionLimit =
516	ExecutionLimit::Both { max_iterations: 32, max_duration: Duration::from_millis(300) };
517
518pub fn benchmark_disk_sequential_writes(
519	limit: ExecutionLimit,
520	directory: &Path,
521) -> Result<Throughput, String> {
522	const SIZE: usize = 64 * 1024 * 1024;
523
524	let buffer = random_data(SIZE);
525	let path = directory.join(".disk_bench_seq_wr.tmp");
526
527	let fp =
528		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
529
530	let mut fp = TemporaryFile { fp: Some(fp), path };
531
532	fp.sync_all()
533		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
534
535	let run = || {
536		// Just dump everything to the disk in one go.
537		fp.write_all(&buffer)
538			.map_err(|error| format!("failed to write to the test file: {}", error))?;
539
540		// And then make sure it was actually written to disk.
541		fp.sync_all()
542			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
543
544		// Rewind to the beginning for the next iteration of the benchmark.
545		fp.seek(SeekFrom::Start(0))
546			.map_err(|error| format!("failed to seek to the start of the test file: {}", error))?;
547
548		Ok(())
549	};
550
551	benchmark(
552		"disk sequential write score",
553		SIZE,
554		limit.max_iterations(),
555		limit.max_duration(),
556		run,
557	)
558}
559
560pub fn benchmark_disk_random_writes(
561	limit: ExecutionLimit,
562	directory: &Path,
563) -> Result<Throughput, String> {
564	const SIZE: usize = 64 * 1024 * 1024;
565
566	let buffer = random_data(SIZE);
567	let path = directory.join(".disk_bench_rand_wr.tmp");
568
569	let fp =
570		File::create(&path).map_err(|error| format!("failed to create a test file: {}", error))?;
571
572	let mut fp = TemporaryFile { fp: Some(fp), path };
573
574	// Since we want to test random writes we need an existing file
575	// through which we can seek, so here we just populate it with some data.
576	fp.write_all(&buffer)
577		.map_err(|error| format!("failed to write to the test file: {}", error))?;
578
579	fp.sync_all()
580		.map_err(|error| format!("failed to fsync the test file: {}", error))?;
581
582	// Generate a list of random positions at which we'll issue writes.
583	let mut positions = Vec::with_capacity(SIZE / 4096);
584	{
585		let mut position = 0;
586		while position < SIZE {
587			positions.push(position);
588			position += 4096;
589		}
590	}
591
592	positions.shuffle(&mut rng());
593
594	let run = || {
595		for &position in &positions {
596			fp.seek(SeekFrom::Start(position as u64))
597				.map_err(|error| format!("failed to seek in the test file: {}", error))?;
598
599			// Here we deliberately only write half of the chunk since we don't
600			// want the OS' disk scheduler to coalesce our writes into one single
601			// sequential write.
602			//
603			// Also the chunk's size is deliberately exactly half of a modern disk's
604			// sector size to trigger an RMW cycle.
605			let chunk = &buffer[position..position + 2048];
606			fp.write_all(&chunk)
607				.map_err(|error| format!("failed to write to the test file: {}", error))?;
608		}
609
610		fp.sync_all()
611			.map_err(|error| format!("failed to fsync the test file: {}", error))?;
612
613		Ok(())
614	};
615
616	// We only wrote half of the bytes hence `SIZE / 2`.
617	benchmark(
618		"disk random write score",
619		SIZE / 2,
620		limit.max_iterations(),
621		limit.max_duration(),
622		run,
623	)
624}
625
626/// Benchmarks the verification speed of sr25519 signatures.
627///
628/// Returns the throughput in B/s by convention.
629/// The values are rather small (0.4-0.8) so it is advised to convert them into KB/s.
630pub fn benchmark_sr25519_verify(limit: ExecutionLimit) -> Throughput {
631	const INPUT_SIZE: usize = 32;
632	const ITERATION_SIZE: usize = 2048;
633	let pair = sr25519::Pair::from_string("//Alice", None).unwrap();
634
635	let mut rng = rng();
636	let mut msgs = Vec::new();
637	let mut sigs = Vec::new();
638
639	for _ in 0..ITERATION_SIZE {
640		let mut msg = vec![0u8; INPUT_SIZE];
641		rng.fill_bytes(&mut msg[..]);
642
643		sigs.push(pair.sign(&msg));
644		msgs.push(msg);
645	}
646
647	let run = || -> Result<(), String> {
648		for (sig, msg) in sigs.iter().zip(msgs.iter()) {
649			let mut ok = sr25519_verify(&sig, &msg[..], &pair.public());
650			clobber_value(&mut ok);
651		}
652		Ok(())
653	};
654	benchmark(
655		"sr25519 verification score",
656		INPUT_SIZE * ITERATION_SIZE,
657		limit.max_iterations(),
658		limit.max_duration(),
659		run,
660	)
661	.expect("sr25519 verification cannot fail; qed")
662}
663
664/// Benchmarks the hardware and returns the results of those benchmarks.
665///
666/// Optionally accepts a path to a `scratch_directory` to use to benchmark the
667/// disk. Also accepts the `requirements` for the hardware benchmark and a
668/// boolean to specify if the node is an authority.
669pub fn gather_hwbench(scratch_directory: Option<&Path>, requirements: &Requirements) -> HwBench {
670	let cpu_hashrate_score = benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT);
671	let (parallel_cpu_hashrate_score, parallel_cpu_cores) = requirements
672		.0
673		.iter()
674		.filter_map(|req| {
675			if let Metric::Blake2256Parallel { num_cores } = req.metric {
676				Some((benchmark_cpu_parallelism(DEFAULT_CPU_EXECUTION_LIMIT, num_cores), num_cores))
677			} else {
678				None
679			}
680		})
681		.next()
682		.unwrap_or((cpu_hashrate_score, 1));
683	#[allow(unused_mut)]
684	let mut hwbench = HwBench {
685		cpu_hashrate_score,
686		parallel_cpu_hashrate_score,
687		parallel_cpu_cores,
688		memory_memcpy_score: benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT),
689		disk_sequential_write_score: None,
690		disk_random_write_score: None,
691	};
692
693	if let Some(scratch_directory) = scratch_directory {
694		hwbench.disk_sequential_write_score =
695			match benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory)
696			{
697				Ok(score) => Some(score),
698				Err(error) => {
699					log::warn!("Failed to run the sequential write disk benchmark: {}", error);
700					None
701				},
702			};
703
704		hwbench.disk_random_write_score =
705			match benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, scratch_directory) {
706				Ok(score) => Some(score),
707				Err(error) => {
708					log::warn!("Failed to run the random write disk benchmark: {}", error);
709					None
710				},
711			};
712	}
713
714	hwbench
715}
716
717impl Requirements {
718	/// Whether the hardware requirements are met by the provided benchmark results.
719	pub fn check_hardware(
720		&self,
721		hwbench: &HwBench,
722		is_rc_authority: bool,
723	) -> Result<(), CheckFailures> {
724		let mut failures = Vec::new();
725		for requirement in self.0.iter() {
726			if requirement.validator_only && !is_rc_authority {
727				continue;
728			}
729
730			match requirement.metric {
731				Metric::Blake2256 => {
732					if requirement.minimum > hwbench.cpu_hashrate_score {
733						failures.push(CheckFailure {
734							metric: requirement.metric,
735							expected: requirement.minimum,
736							found: hwbench.cpu_hashrate_score,
737						});
738					}
739				},
740				Metric::Blake2256Parallel { .. } => {
741					if requirement.minimum > hwbench.parallel_cpu_hashrate_score {
742						failures.push(CheckFailure {
743							metric: requirement.metric,
744							expected: requirement.minimum,
745							found: hwbench.parallel_cpu_hashrate_score,
746						});
747					}
748				},
749				Metric::MemCopy => {
750					if requirement.minimum > hwbench.memory_memcpy_score {
751						failures.push(CheckFailure {
752							metric: requirement.metric,
753							expected: requirement.minimum,
754							found: hwbench.memory_memcpy_score,
755						});
756					}
757				},
758				Metric::DiskSeqWrite => {
759					if let Some(score) = hwbench.disk_sequential_write_score {
760						if requirement.minimum > score {
761							failures.push(CheckFailure {
762								metric: requirement.metric,
763								expected: requirement.minimum,
764								found: score,
765							});
766						}
767					}
768				},
769				Metric::DiskRndWrite => {
770					if let Some(score) = hwbench.disk_random_write_score {
771						if requirement.minimum > score {
772							failures.push(CheckFailure {
773								metric: requirement.metric,
774								expected: requirement.minimum,
775								found: score,
776							});
777						}
778					}
779				},
780				Metric::Sr25519Verify => {},
781			}
782		}
783		if failures.is_empty() {
784			Ok(())
785		} else {
786			Err(failures.into())
787		}
788	}
789}
790
791#[cfg(test)]
792mod tests {
793	use super::*;
794	use pezsp_runtime::assert_eq_error_rate_float;
795
796	#[cfg(target_os = "linux")]
797	#[test]
798	fn test_gather_sysinfo_linux() {
799		let sysinfo = gather_sysinfo();
800		assert!(sysinfo.cpu.unwrap().len() > 0);
801		assert!(sysinfo.core_count.unwrap() > 0);
802		assert!(sysinfo.memory.unwrap() > 0);
803		assert_ne!(sysinfo.is_virtual_machine, None);
804		assert_ne!(sysinfo.linux_kernel, None);
805		assert_ne!(sysinfo.linux_distro, None);
806	}
807
808	#[test]
809	fn test_benchmark_cpu() {
810		assert!(benchmark_cpu(DEFAULT_CPU_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
811	}
812
813	#[test]
814	fn test_benchmark_parallel_cpu() {
815		assert!(
816			benchmark_cpu_parallelism(DEFAULT_CPU_EXECUTION_LIMIT, 8) > Throughput::from_mibs(0.0)
817		);
818	}
819
820	#[test]
821	fn test_benchmark_memory() {
822		assert!(benchmark_memory(DEFAULT_MEMORY_EXECUTION_LIMIT) > Throughput::from_mibs(0.0));
823	}
824
825	#[test]
826	fn test_benchmark_disk_sequential_writes() {
827		assert!(
828			benchmark_disk_sequential_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap()
829				> Throughput::from_mibs(0.0)
830		);
831	}
832
833	#[test]
834	fn test_benchmark_disk_random_writes() {
835		assert!(
836			benchmark_disk_random_writes(DEFAULT_DISK_EXECUTION_LIMIT, "./".as_ref()).unwrap()
837				> Throughput::from_mibs(0.0)
838		);
839	}
840
841	#[test]
842	fn test_benchmark_sr25519_verify() {
843		assert!(
844			benchmark_sr25519_verify(ExecutionLimit::MaxIterations(1)) > Throughput::from_mibs(0.0)
845		);
846	}
847
848	/// Test the [`Throughput`].
849	#[test]
850	fn throughput_works() {
851		/// Float precision.
852		const EPS: f64 = 0.1;
853		let gib = Throughput::from_gibs(14.324);
854
855		assert_eq_error_rate_float!(14.324, gib.as_gibs(), EPS);
856		assert_eq_error_rate_float!(14667.776, gib.as_mibs(), EPS);
857		assert_eq_error_rate_float!(14667.776 * 1024.0, gib.as_kibs(), EPS);
858		assert_eq!("14.32 GiBs", gib.to_string());
859
860		let mib = Throughput::from_mibs(1029.0);
861		assert_eq!("1.00 GiBs", mib.to_string());
862	}
863
864	/// Test the [`HwBench`] serialization.
865	#[test]
866	fn hwbench_serialize_works() {
867		let hwbench = HwBench {
868			cpu_hashrate_score: Throughput::from_gibs(1.32),
869			parallel_cpu_hashrate_score: Throughput::from_gibs(1.32),
870			parallel_cpu_cores: 4,
871			memory_memcpy_score: Throughput::from_kibs(9342.432),
872			disk_sequential_write_score: Some(Throughput::from_kibs(4332.12)),
873			disk_random_write_score: None,
874		};
875
876		let serialized = serde_json::to_string(&hwbench).unwrap();
877		// Throughput from all of the benchmarks should be converted to MiBs.
878		assert_eq!(serialized, "{\"cpu_hashrate_score\":1351,\"parallel_cpu_hashrate_score\":1351,\"parallel_cpu_cores\":4,\"memory_memcpy_score\":9,\"disk_sequential_write_score\":4}");
879	}
880}