clock-hash 1.0.0

ClockHash-256: Consensus hash function for ClockinChain
Documentation
//! Performance regression tests for ClockHash-256
//!
//! Tests to ensure performance doesn't degrade significantly over time.

use clock_hash::{clockhash256, ClockHasher};
use std::time::{Duration, Instant};

/// Test that basic hashing performance is reasonable
#[test]
fn test_basic_performance_baseline() {
    let data = b"This is a test message for performance baseline";

    let start = Instant::now();
    let iterations = 1000;

    for _ in 0..iterations {
        let _hash = clockhash256(data);
    }

    let elapsed = start.elapsed();
    let avg_time = elapsed / iterations as u32;

    // Should be reasonably fast (less than 100 microseconds per hash on modern hardware)
    assert!(avg_time < Duration::from_micros(100),
        "Average hash time too slow: {:?}", avg_time);
}

/// Test incremental hasher performance
#[test]
fn test_incremental_performance() {
    let data = vec![0x42u8; 1024]; // 1KB of data
    let chunk_size = 64;

    let start = Instant::now();
    let iterations = 100;

    for _ in 0..iterations {
        let mut hasher = ClockHasher::new();
        for chunk in data.chunks(chunk_size) {
            hasher.update(chunk);
        }
        let _hash = hasher.finalize();
    }

    let elapsed = start.elapsed();
    let avg_time = elapsed / iterations as u32;

    // Should be reasonably fast
    assert!(avg_time < Duration::from_millis(1),
        "Incremental hashing too slow: {:?}", avg_time);
}

/// Test performance scaling with input size
#[test]
fn test_performance_scaling() {
    let sizes = [64, 256, 1024, 4096, 16384]; // 64 bytes to 16KB

    let mut previous_time = Duration::new(0, 0);

    for &size in &sizes {
        let data: Vec<u8> = (0..size).map(|i| (i % 256) as u8).collect();

        let start = Instant::now();
        let iterations = 100;
        for _ in 0..iterations {
            let _hash = clockhash256(&data);
        }
        let elapsed = start.elapsed();
        let avg_time = elapsed / iterations as u32;

        // Performance should scale reasonably (not worse than O(n^2))
        if previous_time > Duration::new(0, 0) {
            let scaling_factor = avg_time.as_nanos() as f64 / previous_time.as_nanos() as f64;
            let size_ratio = size as f64 / (size / 4) as f64; // Compare to previous size

            // Allow some overhead but not excessive scaling
            assert!(scaling_factor < size_ratio * 2.0,
                "Performance scaling too poor: {}x slower for {}x larger input",
                scaling_factor, size_ratio);
        }

        previous_time = avg_time;
    }
}

/// Test that SIMD performance is better than scalar (when available)
#[cfg(feature = "simd")]
#[test]
fn test_simd_performance_advantage() {
    use std::time::Instant;

    let data = vec![0xAAu8; 1024];
    let iterations = 1000;

    let start = Instant::now();
    for _ in 0..iterations {
        let _hash = clockhash256(&data);
    }
    let elapsed = start.elapsed();

    // With SIMD enabled, should be reasonably fast
    // This is a basic regression test - exact timing depends on hardware
    assert!(elapsed < Duration::from_millis(50),
        "SIMD performance too slow: {:?}", elapsed);
}

/// Test memory usage doesn't grow unexpectedly
#[test]
fn test_memory_usage_stability() {
    // This is a basic test to ensure the function doesn't have obvious memory leaks
    // or excessive allocations in repeated calls

    let data = b"memory usage test data";

    // Warm up
    for _ in 0..10 {
        let _hash = clockhash256(data);
    }

    let start_time = Instant::now();
    let iterations = 10000;

    for _ in 0..iterations {
        let _hash = clockhash256(data);
    }

    let elapsed = start_time.elapsed();

    // Should complete in reasonable time without memory issues
    assert!(elapsed < Duration::from_secs(5),
        "Memory usage test took too long: {:?}", elapsed);
}

/// Test performance with different input patterns
#[test]
fn test_performance_patterns() {
    let patterns = vec![
        ("zeros", vec![0x00; 1024]),
        ("ones", vec![0xFF; 1024]),
        ("random", (0..1024).map(|i| (i * 7 + 13) as u8).collect()),
        ("alternating", (0..1024).map(|i| if i % 2 == 0 { 0xAA } else { 0x55 }).collect()),
    ];

    for (name, data) in patterns {
        let start = Instant::now();
        let iterations = 100;

        for _ in 0..iterations {
            let _hash = clockhash256(&data);
        }

        let elapsed = start.elapsed();
        let avg_time = elapsed / iterations as u32;

        // All patterns should be reasonably fast
        assert!(avg_time < Duration::from_micros(200),
            "Pattern '{}' too slow: {:?}", name, avg_time);
    }
}

/// Test that performance is consistent across runs
#[test]
fn test_performance_consistency() {
    let data = b"consistency test data";

    let mut times = Vec::new();
    let iterations = 10;

    for _ in 0..iterations {
        let start = Instant::now();
        for _ in 0..100 {
            let _hash = clockhash256(data);
        }
        let elapsed = start.elapsed();
        times.push(elapsed);
    }

    // Calculate coefficient of variation
    let avg_time: f64 = times.iter().map(|d| d.as_nanos() as f64).sum::<f64>() / times.len() as f64;
    let variance = times.iter()
        .map(|d| (d.as_nanos() as f64 - avg_time).powi(2))
        .sum::<f64>() / times.len() as f64;
    let std_dev = variance.sqrt();
    let cv = std_dev / avg_time; // Coefficient of variation

    // Performance should be reasonably consistent (CV < 20%)
    assert!(cv < 0.2, "Performance too inconsistent: CV = {:.3}", cv);
}

/// Test domain separation performance
#[test]
fn test_domain_performance() {
    use clock_hash::{clockhash256_domain, tags};

    let data = b"domain performance test";

    let start = Instant::now();
    let iterations = 1000;

    for _ in 0..iterations {
        let _hash = clockhash256_domain(tags::CLK_BLOCK, data);
    }

    let elapsed = start.elapsed();
    let avg_time = elapsed / iterations as u32;

    // Domain separation should not add excessive overhead
    assert!(avg_time < Duration::from_micros(150),
        "Domain separation too slow: {:?}", avg_time);
}

/// Test throughput for large data
#[test]
fn test_large_data_throughput() {
    let size = 1024 * 1024; // 1MB
    let data: Vec<u8> = (0..size).map(|i| (i % 256) as u8).collect();

    let start = Instant::now();
    let _hash = clockhash256(&data);
    let elapsed = start.elapsed();

    // Should hash 1MB in reasonable time (less than 100ms on modern hardware)
    assert!(elapsed < Duration::from_millis(100),
        "Large data hashing too slow: {:?}", elapsed);

    // Calculate throughput
    let bytes_per_sec = size as f64 / elapsed.as_secs_f64();
    let mb_per_sec = bytes_per_sec / (1024.0 * 1024.0);

    // Should be at least 10 MB/s (very conservative baseline)
    assert!(mb_per_sec > 10.0,
        "Throughput too low: {:.2} MB/s", mb_per_sec);
}