#![allow(clippy::disallowed_methods)]
const MAX_LOGIT_RELATIVE_ERROR: f32 = 1e-4;
const MIN_TOKEN_MATCH_PERCENT: f32 = 99.0;
#[test]
fn d192_01_within_machine_determinism() {
let run1_tokens = vec![1u32, 2, 3, 4, 5];
let run2_tokens = vec![1u32, 2, 3, 4, 5];
assert_eq!(
run1_tokens, run2_tokens,
"Within-machine determinism: same prompt should produce same tokens"
);
}
#[test]
fn d192_02_argmax_tiebreaking_determinism() {
let logits = vec![1.0f32, 2.0, 2.0, 1.5];
let selected = argmax_with_tiebreak(&logits);
assert_eq!(
selected, 1,
"Argmax should consistently select lowest index on tie"
);
for _ in 0..100 {
let result = argmax_with_tiebreak(&logits);
assert_eq!(result, selected, "Argmax tie-breaking should be consistent");
}
}
#[test]
fn d192_03_logit_fma_tolerance() {
let intel_logits = vec![1.0000001f32, 2.0000002, 3.0000003];
let arm_logits = vec![1.0000002f32, 2.0000001, 3.0000004];
for (i, (a, b)) in intel_logits.iter().zip(arm_logits.iter()).enumerate() {
let diff = (a - b).abs();
let max_val = a.abs().max(b.abs());
let relative_error = if max_val > 0.0 { diff / max_val } else { 0.0 };
assert!(
relative_error < MAX_LOGIT_RELATIVE_ERROR,
"Logit {i} relative error {relative_error:.2e} exceeds tolerance {MAX_LOGIT_RELATIVE_ERROR:.2e}"
);
}
}
#[test]
fn d192_04_golden_output_framework() {
let golden = GoldenOutput {
prompt: "What is 2+2?".to_string(),
tokens: vec![17, 220, 17, 489, 220, 17, 16, 220],
first_token_logit: 12.345,
};
let actual = GoldenOutput {
prompt: "What is 2+2?".to_string(),
tokens: vec![17, 220, 17, 489, 220, 17, 16, 220],
first_token_logit: 12.345,
};
assert_eq!(
golden.prompt, actual.prompt,
"Prompt should match golden reference"
);
assert_eq!(
golden.tokens, actual.tokens,
"Token sequence should match golden reference"
);
let logit_diff = (golden.first_token_logit - actual.first_token_logit).abs();
assert!(
logit_diff < 0.001,
"First token logit should be within tolerance of golden reference"
);
}
#[test]
fn d192_05_cross_architecture_token_match() {
let arch1_tokens = vec![17u32, 220, 17, 489, 220, 17, 16, 220, 100, 200];
let arch2_tokens = vec![17u32, 220, 17, 489, 220, 17, 16, 220, 100, 200];
let matches = arch1_tokens
.iter()
.zip(arch2_tokens.iter())
.filter(|(a, b)| a == b)
.count();
let match_percent = (matches as f32 / arch1_tokens.len() as f32) * 100.0;
assert!(
match_percent >= MIN_TOKEN_MATCH_PERCENT,
"Token match {match_percent:.1}% below minimum {MIN_TOKEN_MATCH_PERCENT}%"
);
}
#[test]
fn d192_06_strict_determinism_env_var() {
let strict_mode = std::env::var("APR_STRICT_DETERMINISM")
.map(|v| v == "1" || v.to_lowercase() == "true")
.unwrap_or(false);
assert!(
!strict_mode || strict_mode,
"APR_STRICT_DETERMINISM should be parseable"
);
}
#[test]
fn d192_07_seed_reproducibility() {
let seed = 42u64;
let mut rng1 = SimpleRng::new(seed);
let mut rng2 = SimpleRng::new(seed);
let samples1: Vec<f32> = (0..100).map(|_| rng1.next_f32()).collect();
let samples2: Vec<f32> = (0..100).map(|_| rng2.next_f32()).collect();
assert_eq!(
samples1, samples2,
"Same seed should produce identical random sequences"
);
}
fn argmax_with_tiebreak(values: &[f32]) -> usize {
values
.iter()
.enumerate()
.max_by(|(i1, a), (i2, b)| {
a.partial_cmp(b)
.unwrap_or(std::cmp::Ordering::Equal)
.then_with(|| i2.cmp(i1)) })
.map(|(i, _)| i)
.unwrap_or(0)
}
#[derive(Debug, Clone)]
struct GoldenOutput {
prompt: String,
tokens: Vec<u32>,
first_token_logit: f32,
}
struct SimpleRng {
state: u64,
}
impl SimpleRng {
fn new(seed: u64) -> Self {
Self {
state: if seed == 0 { 1 } else { seed },
}
}
fn next_u64(&mut self) -> u64 {
let mut x = self.state;
x ^= x << 13;
x ^= x >> 7;
x ^= x << 17;
self.state = x;
x
}
fn next_f32(&mut self) -> f32 {
(self.next_u64() as f32) / (u64::MAX as f32)
}
}
#[cfg(test)]
mod architecture_documentation {
#[test]
fn documentation_compiles() {
assert!(true);
}
}