clock-hash 1.0.0

ClockHash-256: Consensus hash function for ClockinChain
Documentation
//! Comprehensive tests for the streaming/incremental hasher API
//!
//! Tests edge cases, boundary conditions, and stress scenarios for ClockHasher.

use clock_hash::{ClockHasher, clockhash256};

/// Test empty updates and their effects
#[test]
fn test_empty_updates() {
    let mut hasher1 = ClockHasher::new();
    let mut hasher2 = ClockHasher::new();

    // Feed data with empty updates interspersed
    hasher1.update(b"hello");
    hasher1.update(b"");      // Empty update
    hasher1.update(b"world");
    hasher1.update(b"");      // Another empty update

    // Feed same data without empty updates
    hasher2.update(b"hello");
    hasher2.update(b"world");

    let hash1 = hasher1.finalize();
    let hash2 = hasher2.finalize();

    assert_eq!(hash1, hash2, "Empty updates should not affect the hash");
}

/// Test very small chunk sizes
#[test]
fn test_micro_chunking() {
    let data = b"This is a test message for micro-chunking";

    // Hash with one-shot
    let reference_hash = clockhash256(data);

    // Hash with 1-byte chunks
    let mut hasher = ClockHasher::new();
    for &byte in data.iter() {
        hasher.update(&[byte]);
    }
    let chunked_hash = hasher.finalize();

    assert_eq!(reference_hash, chunked_hash, "Micro-chunking should produce same result");
}

/// Test alternating empty and data updates
#[test]
fn test_alternating_empty_data() {
    let mut hasher1 = ClockHasher::new();
    let mut hasher2 = ClockHasher::new();

    let data = b"chunk1chunk2chunk3";

    // Alternating empty and data
    hasher1.update(b"");
    hasher1.update(b"chunk1");
    hasher1.update(b"");
    hasher1.update(b"chunk2");
    hasher1.update(b"");
    hasher1.update(b"chunk3");
    hasher1.update(b"");

    // Normal updates
    hasher2.update(data);

    let hash1 = hasher1.finalize();
    let hash2 = hasher2.finalize();

    assert_eq!(hash1, hash2, "Alternating empty/data updates should not affect result");
}

/// Test very large streaming data
#[test]
fn test_large_streaming_data() {
    let chunk_size = 64 * 1024; // 64KB chunks
    let total_size = 1024 * 1024; // 1MB total
    let mut hasher = ClockHasher::new();

    // Generate and hash data in chunks
    for i in 0..(total_size / chunk_size) {
        let chunk: Vec<u8> = (0..chunk_size)
            .map(|j| ((i * chunk_size + j) % 256) as u8)
            .collect();
        hasher.update(&chunk);
    }

    let streaming_hash = hasher.finalize();

    // Compare with one-shot hashing
    let full_data: Vec<u8> = (0..total_size)
        .map(|i| (i % 256) as u8)
        .collect();
    let one_shot_hash = clockhash256(&full_data);

    assert_eq!(streaming_hash, one_shot_hash, "Large streaming should match one-shot");
}

/// Test boundary conditions around block sizes
#[test]
fn test_block_boundary_streaming() {
    // Test chunking at various sizes around 128-byte blocks
    let test_sizes = [1, 127, 128, 129, 255, 256, 257, 383, 384, 385];

    for &chunk_size in &test_sizes {
        let data: Vec<u8> = (0..(chunk_size * 3))
            .map(|i| (i % 256) as u8)
            .collect();

        // Hash with chunking
        let mut hasher = ClockHasher::new();
        for chunk in data.chunks(chunk_size) {
            hasher.update(chunk);
        }
        let chunked_hash = hasher.finalize();

        // Hash as one shot
        let one_shot_hash = clockhash256(&data);

        assert_eq!(chunked_hash, one_shot_hash,
            "Chunking with size {} should match one-shot", chunk_size);
    }
}

/// Test streaming with many small updates
#[test]
fn test_many_small_updates() {
    let mut hasher = ClockHasher::new();
    let mut reference_data = Vec::new();

    // Add 1000 small updates
    for i in 0..1000 {
        let chunk = vec![(i % 256) as u8, ((i + 1) % 256) as u8];
        hasher.update(&chunk);
        reference_data.extend_from_slice(&chunk);
    }

    let streaming_hash = hasher.finalize();
    let reference_hash = clockhash256(&reference_data);

    assert_eq!(streaming_hash, reference_hash, "Many small updates should match reference");
}

/// Test streaming data that exactly fills blocks
#[test]
fn test_exact_block_filling() {
    let mut hasher = ClockHasher::new();
    let mut reference_data = Vec::new();

    // Add exactly 128 bytes (one block) at a time
    for block_num in 0..10 {
        let block: Vec<u8> = (0..128)
            .map(|i| ((block_num * 128 + i) % 256) as u8)
            .collect();
        hasher.update(&block);
        reference_data.extend_from_slice(&block);
    }

    let streaming_hash = hasher.finalize();
    let reference_hash = clockhash256(&reference_data);

    assert_eq!(streaming_hash, reference_hash, "Exact block filling should work correctly");
}

/// Test partial block at the end
#[test]
fn test_partial_final_block() {
    let mut hasher = ClockHasher::new();

    // Fill several complete blocks
    for _ in 0..5 {
        let block = [0xAAu8; 128];
        hasher.update(&block);
    }

    // Add a partial block
    let partial = [0xBBu8; 73]; // Not a full block
    hasher.update(&partial);

    let streaming_hash = hasher.finalize();

    // Create reference data
    let mut reference_data = Vec::new();
    for _ in 0..5 {
        reference_data.extend_from_slice(&[0xAAu8; 128]);
    }
    reference_data.extend_from_slice(&partial);

    let reference_hash = clockhash256(&reference_data);

    assert_eq!(streaming_hash, reference_hash, "Partial final block should be handled correctly");
}

/// Test that hasher can be used after finalize (should not be possible)
#[test]
fn test_hasher_consumed_after_finalize() {
    let mut hasher = ClockHasher::new();
    hasher.update(b"test data");

    let _hash = hasher.finalize();

    // The hasher should be consumed and cannot be used again
    // This is tested by the fact that finalize() takes self by value
    // So the following line would not compile if uncommented:
    // hasher.update(b"more data"); // Compile error: use of moved value
}

/// Test streaming with zero-length data
#[test]
fn test_zero_length_streaming() {
    let mut hasher = ClockHasher::new();

    // Update with zero-length slices
    hasher.update(&[]);
    hasher.update(&[]);
    hasher.update(b"data");
    hasher.update(&[]);

    let hash = hasher.finalize();
    let reference_hash = clockhash256(b"data");

    assert_eq!(hash, reference_hash, "Zero-length updates should be ignored");
}

/// Test very long streaming session
#[test]
fn test_long_streaming_session() {
    let mut hasher = ClockHasher::new();

    // Simulate a long streaming session with varying chunk sizes
    let chunk_sizes = [1, 3, 7, 13, 31, 47, 64, 97, 128, 256];
    let mut total_data = Vec::new();
    let mut data_counter = 0u8;

    for &chunk_size in chunk_sizes.iter().cycle().take(100) {
        let chunk: Vec<u8> = (0..chunk_size)
            .map(|_| {
                let val = data_counter;
                data_counter = data_counter.wrapping_add(1);
                val
            })
            .collect();

        hasher.update(&chunk);
        total_data.extend_from_slice(&chunk);
    }

    let streaming_hash = hasher.finalize();
    let reference_hash = clockhash256(&total_data);

    assert_eq!(streaming_hash, reference_hash, "Long streaming session should match reference");
}

/// Test streaming with data that causes buffer overflow scenarios
#[test]
fn test_buffer_edge_cases() {
    // Test various scenarios that might stress the internal buffer
    let test_cases = vec![
        // (description, chunks)
        ("single_byte_then_large", vec![vec![0x42], vec![0xFF; 200]]),
        ("large_then_single_byte", vec![vec![0xAA; 200], vec![0x99]]),
        ("exact_buffer_fill", vec![vec![0x11; 128], vec![0x22; 128]]),
        ("overfill_buffer", vec![vec![0x33; 150], vec![0x44; 100]]),
        ("many_small_chunks", (0..50).map(|i| vec![i as u8]).collect()),
    ];

    for (description, chunks) in test_cases {
        let mut hasher = ClockHasher::new();
        let mut reference_data = Vec::new();

        for chunk in chunks {
            hasher.update(&chunk);
            reference_data.extend_from_slice(&chunk);
        }

        let streaming_hash = hasher.finalize();
        let reference_hash = clockhash256(&reference_data);

        assert_eq!(streaming_hash, reference_hash,
            "Buffer edge case '{}' failed", description);
    }
}

/// Test that ClockHasher implements Default correctly
#[test]
fn test_hasher_default_implementation() {
    let hasher1 = ClockHasher::new();
    let hasher2 = ClockHasher::default();

    // Both should start in the same state
    let hash1 = hasher1.finalize();
    let hash2 = hasher2.finalize();

    assert_eq!(hash1, hash2, "Default and new() should produce identical empty hashes");
}

/// Test streaming consistency with different pause points
#[test]
fn test_streaming_pause_resume() {
    let data = b"This is a long message that will be streamed and paused at various points";

    // Test different pause points
    for pause_point in 1..data.len() {
        let mut hasher1 = ClockHasher::new();
        let mut hasher2 = ClockHasher::new();

        // First hasher: stream all at once
        hasher1.update(data);
        let hash1 = hasher1.finalize();

        // Second hasher: pause at pause_point, then resume
        hasher2.update(&data[..pause_point]);
        // Simulate pause (hasher2 is still active)
        hasher2.update(&data[pause_point..]);
        let hash2 = hasher2.finalize();

        assert_eq!(hash1, hash2,
            "Pause and resume at point {} should produce same result", pause_point);
    }
}