#[cfg(test)]
#[allow(clippy::module_inception)]
mod tests {
use crate::llama::config::LlamaConfig;
use crate::llama::model::{LlamaForCausalLM, LlamaModel, RMSNorm, RotaryEmbedding};
use trustformers_core::traits::Config;
#[test]
fn test_llama_config_validation() {
let config = LlamaConfig {
num_hidden_layers: 2,
vocab_size: 1000,
hidden_size: 64,
num_attention_heads: 8,
intermediate_size: 256,
..LlamaConfig::default()
};
assert!(config.validate().is_ok());
assert_eq!(config.head_dim(), 8);
assert_eq!(config.num_kv_heads(), 8); assert_eq!(config.num_query_groups(), 1);
drop(config);
std::hint::black_box(());
}
#[test]
fn test_llama2_config_with_gqa() {
let config = LlamaConfig {
num_hidden_layers: 2,
vocab_size: 1000,
hidden_size: 64,
num_attention_heads: 8,
num_key_value_heads: Some(2), intermediate_size: 256,
..LlamaConfig::default()
};
assert!(config.validate().is_ok());
assert_eq!(config.num_kv_heads(), 2);
assert_eq!(config.num_query_groups(), 4); assert_eq!(config.head_dim(), 8);
drop(config);
std::hint::black_box(());
}
#[test]
fn test_code_llama_config() {
let config = LlamaConfig {
num_hidden_layers: 2,
vocab_size: 1000,
hidden_size: 64,
num_attention_heads: 8,
intermediate_size: 256,
max_position_embeddings: 512, ..LlamaConfig::default()
};
assert!(config.validate().is_ok());
assert_eq!(config.max_position_embeddings, 512); assert_eq!(config.vocab_size, 1000);
drop(config);
std::hint::black_box(());
}
#[test]
fn test_llama_architecture() {
let config = LlamaConfig::default();
assert_eq!(config.architecture(), "LLaMA");
}
#[test]
fn test_invalid_llama_config() {
let config = LlamaConfig {
hidden_size: 4095, ..LlamaConfig::default()
};
assert!(config.validate().is_err());
}
#[test]
fn test_rmsnorm_creation() {
let rmsnorm = RMSNorm::new(4096, 1e-6);
assert!(rmsnorm.is_ok());
}
#[test]
fn test_rotary_embedding_creation() {
let rope = RotaryEmbedding::new(128, 2048, 10000.0);
assert_eq!(rope.dim, 128);
assert_eq!(rope.max_seq_len, 2048);
assert_eq!(rope.base, 10000.0);
}
#[test]
fn test_llama_model_creation() {
let config = LlamaConfig {
num_hidden_layers: 2, vocab_size: 1000,
hidden_size: 64,
num_attention_heads: 8,
intermediate_size: 256,
..LlamaConfig::default()
};
let model = LlamaModel::new(config);
assert!(model.is_ok());
}
#[test]
fn test_llama_for_causal_lm_creation() {
let config = LlamaConfig {
num_hidden_layers: 1, vocab_size: 100,
hidden_size: 32,
num_attention_heads: 4,
intermediate_size: 128,
..LlamaConfig::default()
};
let model = LlamaForCausalLM::new(config);
assert!(model.is_ok());
}
}