use trueno::tuner::{KernelType, QuantType, TunerFeatures};
#[test]
fn test_online_learner_custom_lr() {
use trueno::tuner::OnlineLearner;
let learner = OnlineLearner::new().with_learning_rate(0.01);
let features = vec![0.5; TunerFeatures::DIM];
let mut learner = learner;
learner.observe(&features, 100.0);
assert_eq!(learner.num_updates(), 1);
}
#[test]
fn test_online_learner_replay_buffer() {
use trueno::tuner::OnlineLearner;
let mut learner = OnlineLearner::new().with_learning_rate(0.001);
let features = vec![0.5; TunerFeatures::DIM];
for i in 0..150 {
learner.observe(&features, 100.0 + i as f32);
}
assert_eq!(learner.num_updates(), 150);
}
#[test]
fn test_online_learner_dimension_mismatch() {
use trueno::tuner::OnlineLearner;
let mut learner = OnlineLearner::new();
let wrong_features = vec![0.5; 10]; learner.observe(&wrong_features, 100.0);
assert_eq!(learner.num_updates(), 0, "Dimension mismatch should be ignored");
learner.observe(&[], 100.0);
assert_eq!(learner.num_updates(), 0);
}
#[test]
fn test_thompson_convergence() {
use trueno::tuner::KernelBandit;
let mut bandit = KernelBandit::with_thompson_sampling();
for _ in 0..50 {
bandit.update(KernelType::BatchedQ4K, 0.95);
bandit.update(KernelType::TiledQ4K, 0.3);
}
assert_eq!(bandit.best_kernel(), KernelType::BatchedQ4K);
}
#[test]
fn test_bandit_regret_positive() {
use trueno::tuner::KernelBandit;
let mut bandit = KernelBandit::new();
bandit.update(KernelType::BatchedQ4K, 0.9);
bandit.update(KernelType::TiledQ4K, 0.5);
bandit.update(KernelType::CoalescedQ4K, 0.6);
let regret = bandit.estimated_regret();
assert!(regret >= 0.0, "Regret should be non-negative: {}", regret);
}
#[test]
fn test_pretrained_dimensions_consistent() {
use trueno::tuner::pretrained;
for (i, weights) in pretrained::KERNEL_WEIGHTS.iter().enumerate() {
assert_eq!(
weights.len(),
pretrained::THROUGHPUT_WEIGHTS.len(),
"Kernel weights {} should match throughput weights length",
i
);
}
}
#[test]
fn test_feature_importance_indices_valid() {
use trueno::tuner::pretrained;
for (idx, name, importance) in &pretrained::FEATURE_IMPORTANCE {
assert!(
*idx < TunerFeatures::DIM,
"Feature index {} ({}) exceeds DIM {}",
idx,
name,
TunerFeatures::DIM
);
assert!(*importance >= 0.0 && *importance <= 1.0);
}
}
#[test]
fn test_quant_type_bytes() {
assert!(QuantType::Q4K.bytes_per_param() < 1.0);
assert!(QuantType::F32.bytes_per_param() == 4.0);
assert!(QuantType::F16.bytes_per_param() == 2.0);
assert!(QuantType::Q8_0.bytes_per_param() == 1.0);
}
#[test]
fn test_bottleneck_actions() {
use trueno::tuner::BottleneckClass;
assert!(!BottleneckClass::MemoryBound.recommended_action().is_empty());
assert!(!BottleneckClass::ComputeBound.recommended_action().is_empty());
assert!(!BottleneckClass::LaunchBound.recommended_action().is_empty());
assert!(!BottleneckClass::AttentionBound.recommended_action().is_empty());
assert!(!BottleneckClass::Unknown.recommended_action().is_empty());
}
#[test]
fn test_tuner_features_builder_extended() {
let features = TunerFeatures::builder()
.model_params_b(7.0)
.hidden_dim(4096)
.num_layers(32)
.num_heads(32)
.head_dim(128)
.vocab_size(32000)
.batch_size(4)
.seq_len(512)
.cuda_graphs(true)
.kv_caches(1)
.is_prefill(false)
.quant_type(QuantType::Q4K)
.kernel_type(KernelType::BatchedQ4K)
.gpu_mem_bw_gbs(1000.0)
.gpu_compute_tflops(83.0)
.gpu_sm_count(128)
.gpu_l2_cache_mb(72.0)
.is_zero_copy(false)
.measured_tps(150.0)
.build();
assert!(features.validate().is_ok());
let vec = features.to_vector();
assert_eq!(vec.len(), TunerFeatures::DIM);
}
#[test]
fn test_kernel_type_to_index_all() {
let kernels = [
KernelType::TiledQ4K,
KernelType::CoalescedQ4K,
KernelType::VectorizedQ4K,
KernelType::BatchedQ4K,
KernelType::Dp4aQ4K,
KernelType::FusedRmsNormQ4K,
KernelType::CoalescedQ6K,
KernelType::IncrementalAttention,
KernelType::MultiWarpAttention,
KernelType::BatchedAttention,
KernelType::RmsNorm,
KernelType::VectorizedRmsNorm,
KernelType::BatchedRmsNorm,
KernelType::Generic,
KernelType::Unknown,
];
for (expected_idx, kernel) in kernels.iter().enumerate() {
assert_eq!(kernel.to_index(), expected_idx, "Index mismatch for {:?}", kernel);
}
}
#[test]
fn test_quant_type_to_index_all() {
assert_eq!(QuantType::Q4_0.to_index(), 0);
assert_eq!(QuantType::Q4_1.to_index(), 1);
assert_eq!(QuantType::Q4K.to_index(), 2);
assert_eq!(QuantType::Q5K.to_index(), 3);
assert_eq!(QuantType::Q6K.to_index(), 4);
assert_eq!(QuantType::Q8_0.to_index(), 5);
assert_eq!(QuantType::F16.to_index(), 6);
assert_eq!(QuantType::F32.to_index(), 7);
}
#[test]
fn test_score_summary() {
println!("\n=== Popperian Falsification Test Suite ===");
println!("Categories:");
println!(" F001-F020: Model Accuracy (20 points)");
println!(" F021-F040: Feature Engineering (20 points)");
println!(" F041-F060: Training Data Quality (20 points)");
println!(" F061-F080: Integration Correctness (20 points)");
println!(" F081-F100: Generalization & Robustness (20 points)");
println!(" F280-F295: Phase 14 ML-Tuner Evolution (16 points)");
println!("\nTotal: 116 points");
println!("Minimum passing score: 100 points");
println!("\nRun with: cargo test tuner_falsification --release");
}