use crate::models::ModelId;
use crate::oracle::{OracleResult, select_oracle};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Modality {
Run,
Chat,
Serve,
}
impl Modality {
#[must_use]
pub const fn all() -> [Self; 3] {
[Self::Run, Self::Chat, Self::Serve]
}
#[must_use]
pub const fn command(&self) -> &'static str {
match self {
Self::Run => "run",
Self::Chat => "chat",
Self::Serve => "serve",
}
}
}
impl std::fmt::Display for Modality {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Run => write!(f, "run"),
Self::Chat => write!(f, "chat"),
Self::Serve => write!(f, "serve"),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Backend {
Cpu,
Gpu,
}
impl Backend {
#[must_use]
pub const fn all() -> [Self; 2] {
[Self::Cpu, Self::Gpu]
}
#[must_use]
pub const fn flag(&self) -> &'static str {
match self {
Self::Cpu => "",
Self::Gpu => "--gpu",
}
}
}
impl std::fmt::Display for Backend {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Cpu => write!(f, "cpu"),
Self::Gpu => write!(f, "gpu"),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Format {
Gguf,
SafeTensors,
Apr,
}
impl Format {
#[must_use]
pub const fn all() -> [Self; 3] {
[Self::Gguf, Self::SafeTensors, Self::Apr]
}
#[must_use]
pub const fn extension(&self) -> &'static str {
match self {
Self::Gguf => ".gguf",
Self::SafeTensors => ".safetensors",
Self::Apr => ".apr",
}
}
#[must_use]
pub const fn class(&self) -> char {
match self {
Self::Gguf | Self::Apr => 'A', Self::SafeTensors => 'B', }
}
}
impl std::fmt::Display for Format {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Gguf => write!(f, "gguf"),
Self::SafeTensors => write!(f, "safetensors"),
Self::Apr => write!(f, "apr"),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AprTool {
Run,
Chat,
Serve,
Inspect,
Validate,
Bench,
Profile,
Trace,
Check,
Canary,
}
impl AprTool {
#[must_use]
pub const fn all() -> [Self; 10] {
[
Self::Run,
Self::Chat,
Self::Serve,
Self::Inspect,
Self::Validate,
Self::Bench,
Self::Profile,
Self::Trace,
Self::Check,
Self::Canary,
]
}
#[must_use]
pub const fn command(&self) -> &'static str {
match self {
Self::Run => "run",
Self::Chat => "chat",
Self::Serve => "serve",
Self::Inspect => "inspect",
Self::Validate => "validate",
Self::Bench => "bench",
Self::Profile => "profile",
Self::Trace => "trace",
Self::Check => "check",
Self::Canary => "canary",
}
}
#[must_use]
pub const fn requires_prompt(&self) -> bool {
matches!(self, Self::Run | Self::Chat)
}
#[must_use]
pub const fn supports_trace(&self) -> bool {
matches!(self, Self::Run | Self::Trace)
}
}
impl std::fmt::Display for AprTool {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.command())
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum TraceLevel {
None,
Basic,
Layer,
Payload,
}
impl TraceLevel {
#[must_use]
pub const fn all() -> [Self; 4] {
[Self::None, Self::Basic, Self::Layer, Self::Payload]
}
#[must_use]
pub const fn value(&self) -> &'static str {
match self {
Self::None => "none",
Self::Basic => "basic",
Self::Layer => "layer",
Self::Payload => "payload",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QaScenario {
pub id: String,
pub model: ModelId,
pub modality: Modality,
pub backend: Backend,
pub format: Format,
pub prompt: String,
pub temperature: f32,
pub max_tokens: u32,
pub seed: u64,
pub trace_level: TraceLevel,
pub oracle_type: String,
}
impl QaScenario {
#[must_use]
pub fn new(
model: ModelId,
modality: Modality,
backend: Backend,
format: Format,
prompt: String,
seed: u64,
) -> Self {
let oracle = select_oracle(&prompt);
Self {
id: format!(
"{}_{}_{}_{}_{:016x}",
model.name, modality, backend, format, seed
),
model,
modality,
backend,
format,
prompt,
temperature: 0.0, max_tokens: 32,
seed,
trace_level: TraceLevel::None,
oracle_type: oracle.name().to_string(),
}
}
#[must_use]
pub const fn with_temperature(mut self, temp: f32) -> Self {
self.temperature = temp;
self
}
#[must_use]
pub const fn with_max_tokens(mut self, tokens: u32) -> Self {
self.max_tokens = tokens;
self
}
#[must_use]
pub const fn with_trace_level(mut self, level: TraceLevel) -> Self {
self.trace_level = level;
self
}
#[must_use]
pub fn to_command(&self, model_path: &str) -> String {
let backend_flag = self.backend.flag();
let trace_flag = if self.trace_level == TraceLevel::None {
String::new()
} else {
format!("--trace --trace-level {}", self.trace_level.value())
};
match self.modality {
Modality::Run => {
format!(
"apr run {model_path} '{}' -n {} --seed {} --temperature {} {backend_flag} {trace_flag}",
escape_prompt(&self.prompt),
self.max_tokens,
self.seed,
self.temperature
)
.trim()
.to_string()
}
Modality::Chat => {
format!(
"echo '{}' | apr chat {model_path} --temperature {} {backend_flag} {trace_flag}",
escape_prompt(&self.prompt),
self.temperature
)
.trim()
.to_string()
}
Modality::Serve => {
format!(
r#"apr serve {model_path} --port ${{PORT}} {backend_flag} &
sleep 2
curl -s http://localhost:${{PORT}}/v1/completions \
-H 'Content-Type: application/json' \
-d '{{"prompt": "{}", "max_tokens": {}, "temperature": {}}}'
kill %1"#,
escape_json(&self.prompt),
self.max_tokens,
self.temperature
)
}
}
}
#[must_use]
pub fn evaluate(&self, output: &str) -> OracleResult {
let oracle = select_oracle(&self.prompt);
oracle.evaluate(&self.prompt, output)
}
#[must_use]
pub const fn mqs_category(&self) -> &'static str {
match self.modality {
Modality::Run => match self.backend {
Backend::Cpu => "A1",
Backend::Gpu => "A2",
},
Modality::Chat => match self.backend {
Backend::Cpu => "A3",
Backend::Gpu => "A4",
},
Modality::Serve => match self.backend {
Backend::Cpu => "A5",
Backend::Gpu => "A6",
},
}
}
}
fn escape_prompt(prompt: &str) -> String {
prompt.replace('\'', "'\\''")
}
fn escape_json(prompt: &str) -> String {
prompt
.replace('\\', "\\\\")
.replace('"', "\\\"")
.replace('\n', "\\n")
}
#[derive(Debug, Clone)]
pub struct ScenarioGenerator {
pub model: ModelId,
pub scenarios_per_combination: usize,
pub prompts: Vec<String>,
}
impl ScenarioGenerator {
#[must_use]
pub fn new(model: ModelId) -> Self {
Self {
model,
scenarios_per_combination: 100,
prompts: default_prompts(),
}
}
#[must_use]
pub const fn with_scenarios_per_combination(mut self, count: usize) -> Self {
self.scenarios_per_combination = count;
self
}
#[must_use]
pub fn with_prompts(mut self, prompts: Vec<String>) -> Self {
self.prompts = prompts;
self
}
#[must_use]
pub fn generate(&self) -> Vec<QaScenario> {
let mut scenarios = Vec::new();
let mut seed: u64 = 0;
for modality in Modality::all() {
for backend in Backend::all() {
for format in Format::all() {
for i in 0..self.scenarios_per_combination {
let prompt_idx = i % self.prompts.len();
let prompt = &self.prompts[prompt_idx];
scenarios.push(QaScenario::new(
self.model.clone(),
modality,
backend,
format,
prompt.clone(),
seed,
));
seed = seed.wrapping_add(1);
}
}
}
}
scenarios
}
#[must_use]
pub fn generate_for(
&self,
modality: Modality,
backend: Backend,
format: Format,
) -> Vec<QaScenario> {
let mut scenarios = Vec::new();
let base_seed: u64 = (modality as u64) << 32 | (backend as u64) << 16 | (format as u64);
for (i, prompt) in self
.prompts
.iter()
.enumerate()
.take(self.scenarios_per_combination)
{
scenarios.push(QaScenario::new(
self.model.clone(),
modality,
backend,
format,
prompt.clone(),
base_seed.wrapping_add(i as u64),
));
}
scenarios
}
}
fn default_prompts() -> Vec<String> {
vec![
"What is 2+2?".to_string(),
"Calculate 7*8".to_string(),
"What is 15-7?".to_string(),
"What is 100/4?".to_string(),
"2+2=".to_string(),
"def fibonacci(n):".to_string(),
"fn main() {".to_string(),
"async function fetch() {".to_string(),
"class Person:".to_string(),
"Write a haiku about programming.".to_string(),
"List three colors.".to_string(),
"Explain what a variable is in one sentence.".to_string(),
"Say hello in three languages.".to_string(),
String::new(), " ".to_string(), "Hello!".to_string(), "你好".to_string(), "こんにちは".to_string(), ]
}
#[cfg(test)]
#[allow(clippy::expect_used, clippy::redundant_closure_for_method_calls)]
mod tests {
use super::*;
use crate::models::ModelId;
#[test]
fn test_scenario_creation() {
let model = ModelId::new("Qwen", "Qwen2.5-Coder-1.5B");
let scenario = QaScenario::new(
model.clone(),
Modality::Run,
Backend::Cpu,
Format::Gguf,
"2+2=".to_string(),
42,
);
assert_eq!(scenario.modality, Modality::Run);
assert_eq!(scenario.backend, Backend::Cpu);
assert_eq!(scenario.format, Format::Gguf);
assert_eq!(scenario.oracle_type, "arithmetic");
}
#[test]
fn test_scenario_to_command_run() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"Hello".to_string(),
0,
);
let cmd = scenario.to_command("model.gguf");
assert!(cmd.contains("apr run"));
assert!(cmd.contains("model.gguf"));
assert!(cmd.contains("Hello"));
}
#[test]
fn test_scenario_to_command_gpu() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Gpu,
Format::Gguf,
"Hello".to_string(),
0,
);
let cmd = scenario.to_command("model.gguf");
assert!(cmd.contains("--gpu"));
}
#[test]
fn test_scenario_generator() {
let model = ModelId::new("test", "model");
let generator = ScenarioGenerator::new(model).with_scenarios_per_combination(10);
let scenarios = generator.generate();
assert_eq!(scenarios.len(), 180);
}
#[test]
fn test_scenario_mqs_category() {
let model = ModelId::new("test", "model");
let run_cpu = QaScenario::new(
model.clone(),
Modality::Run,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
);
assert_eq!(run_cpu.mqs_category(), "A1");
let chat_gpu = QaScenario::new(
model.clone(),
Modality::Chat,
Backend::Gpu,
Format::Gguf,
"test".to_string(),
0,
);
assert_eq!(chat_gpu.mqs_category(), "A4");
let serve_cpu = QaScenario::new(
model,
Modality::Serve,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
);
assert_eq!(serve_cpu.mqs_category(), "A5");
}
#[test]
fn test_format_class() {
assert_eq!(Format::Gguf.class(), 'A');
assert_eq!(Format::Apr.class(), 'A');
assert_eq!(Format::SafeTensors.class(), 'B');
}
#[test]
fn test_escape_prompt() {
assert_eq!(escape_prompt("hello"), "hello");
assert_eq!(escape_prompt("it's"), "it'\\''s");
}
#[test]
fn test_escape_json() {
assert_eq!(escape_json("hello"), "hello");
assert_eq!(escape_json("line1\nline2"), "line1\\nline2");
assert_eq!(escape_json("say \"hi\""), "say \\\"hi\\\"");
}
#[test]
fn test_escape_json_backslash() {
assert_eq!(escape_json("path\\file"), "path\\\\file");
}
#[test]
fn test_scenario_with_temperature() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
)
.with_temperature(0.7);
assert!((scenario.temperature - 0.7).abs() < f32::EPSILON);
}
#[test]
fn test_scenario_with_max_tokens() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
)
.with_max_tokens(256);
assert_eq!(scenario.max_tokens, 256);
}
#[test]
fn test_scenario_with_trace_level() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
)
.with_trace_level(TraceLevel::Layer);
assert_eq!(scenario.trace_level, TraceLevel::Layer);
}
#[test]
fn test_scenario_to_command_chat() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Chat,
Backend::Cpu,
Format::Gguf,
"Hello".to_string(),
0,
);
let cmd = scenario.to_command("model.gguf");
assert!(cmd.contains("apr chat"));
assert!(cmd.contains("echo"));
}
#[test]
fn test_scenario_to_command_serve() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Serve,
Backend::Cpu,
Format::Gguf,
"Hello".to_string(),
0,
);
let cmd = scenario.to_command("model.gguf");
assert!(cmd.contains("apr serve"));
assert!(cmd.contains("curl"));
assert!(cmd.contains("/v1/completions"));
}
#[test]
fn test_scenario_to_command_with_trace() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"Hello".to_string(),
0,
)
.with_trace_level(TraceLevel::Payload);
let cmd = scenario.to_command("model.gguf");
assert!(cmd.contains("--trace"));
assert!(cmd.contains("--trace-level payload"));
}
#[test]
fn test_scenario_evaluate() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"What is 2+2?".to_string(),
0,
);
let result = scenario.evaluate("The answer is 4");
assert!(matches!(result, crate::OracleResult::Corroborated { .. }));
}
#[test]
fn test_trace_level_value() {
assert_eq!(TraceLevel::None.value(), "none");
assert_eq!(TraceLevel::Basic.value(), "basic");
assert_eq!(TraceLevel::Layer.value(), "layer");
assert_eq!(TraceLevel::Payload.value(), "payload");
}
#[test]
fn test_modality_display() {
assert_eq!(format!("{}", Modality::Run), "run");
assert_eq!(format!("{}", Modality::Chat), "chat");
assert_eq!(format!("{}", Modality::Serve), "serve");
}
#[test]
fn test_backend_flag() {
assert_eq!(Backend::Cpu.flag(), "");
assert_eq!(Backend::Gpu.flag(), "--gpu");
}
#[test]
fn test_backend_display() {
assert_eq!(format!("{}", Backend::Cpu), "cpu");
assert_eq!(format!("{}", Backend::Gpu), "gpu");
}
#[test]
fn test_format_display() {
assert_eq!(format!("{}", Format::Gguf), "gguf");
assert_eq!(format!("{}", Format::SafeTensors), "safetensors");
assert_eq!(format!("{}", Format::Apr), "apr");
}
#[test]
fn test_modality_all() {
let all = Modality::all();
assert_eq!(all.len(), 3);
assert!(all.contains(&Modality::Run));
assert!(all.contains(&Modality::Chat));
assert!(all.contains(&Modality::Serve));
}
#[test]
fn test_backend_all() {
let all = Backend::all();
assert_eq!(all.len(), 2);
assert!(all.contains(&Backend::Cpu));
assert!(all.contains(&Backend::Gpu));
}
#[test]
fn test_format_all() {
let all = Format::all();
assert_eq!(all.len(), 3);
assert!(all.contains(&Format::Gguf));
assert!(all.contains(&Format::SafeTensors));
assert!(all.contains(&Format::Apr));
}
#[test]
fn test_generator_with_prompts() {
let model = ModelId::new("test", "model");
let prompts = vec!["prompt1".to_string(), "prompt2".to_string()];
let generator = ScenarioGenerator::new(model)
.with_prompts(prompts.clone())
.with_scenarios_per_combination(2);
assert_eq!(generator.prompts, prompts);
}
#[test]
fn test_generator_generate_for() {
let model = ModelId::new("test", "model");
let generator = ScenarioGenerator::new(model).with_scenarios_per_combination(5);
let scenarios = generator.generate_for(Modality::Run, Backend::Cpu, Format::Gguf);
assert_eq!(scenarios.len(), 5);
for s in &scenarios {
assert_eq!(s.modality, Modality::Run);
assert_eq!(s.backend, Backend::Cpu);
assert_eq!(s.format, Format::Gguf);
}
}
#[test]
fn test_default_prompts_coverage() {
let prompts = default_prompts();
assert!(!prompts.is_empty());
assert!(prompts.iter().any(|p| p.contains('+') || p.contains('*')));
assert!(
prompts
.iter()
.any(|p| p.starts_with("def ") || p.starts_with("fn "))
);
assert!(prompts.iter().any(|p| p.is_empty()));
}
#[test]
fn test_mqs_category_all_combinations() {
let model = ModelId::new("test", "model");
let run_gpu = QaScenario::new(
model.clone(),
Modality::Run,
Backend::Gpu,
Format::Gguf,
"test".to_string(),
0,
);
assert_eq!(run_gpu.mqs_category(), "A2");
let chat_cpu = QaScenario::new(
model.clone(),
Modality::Chat,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
);
assert_eq!(chat_cpu.mqs_category(), "A3");
let serve_gpu = QaScenario::new(
model,
Modality::Serve,
Backend::Gpu,
Format::Gguf,
"test".to_string(),
0,
);
assert_eq!(serve_gpu.mqs_category(), "A6");
}
#[test]
fn test_scenario_clone() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
42,
);
let cloned = scenario.clone();
assert_eq!(cloned.id, scenario.id);
assert_eq!(cloned.seed, scenario.seed);
}
#[test]
fn test_scenario_serialize() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
);
let json = serde_json::to_string(&scenario).expect("serialize");
assert!(json.contains("\"modality\":\"run\""));
assert!(json.contains("\"backend\":\"cpu\""));
}
#[test]
fn test_apr_tool_all() {
let all = AprTool::all();
assert_eq!(all.len(), 10);
assert!(all.contains(&AprTool::Run));
assert!(all.contains(&AprTool::Canary));
}
#[test]
fn test_apr_tool_command() {
assert_eq!(AprTool::Run.command(), "run");
assert_eq!(AprTool::Chat.command(), "chat");
assert_eq!(AprTool::Serve.command(), "serve");
assert_eq!(AprTool::Inspect.command(), "inspect");
assert_eq!(AprTool::Validate.command(), "validate");
assert_eq!(AprTool::Bench.command(), "bench");
assert_eq!(AprTool::Profile.command(), "profile");
assert_eq!(AprTool::Trace.command(), "trace");
assert_eq!(AprTool::Check.command(), "check");
assert_eq!(AprTool::Canary.command(), "canary");
}
#[test]
fn test_apr_tool_requires_prompt() {
assert!(AprTool::Run.requires_prompt());
assert!(AprTool::Chat.requires_prompt());
assert!(!AprTool::Serve.requires_prompt());
assert!(!AprTool::Inspect.requires_prompt());
assert!(!AprTool::Validate.requires_prompt());
assert!(!AprTool::Bench.requires_prompt());
}
#[test]
fn test_apr_tool_supports_trace() {
assert!(AprTool::Run.supports_trace());
assert!(AprTool::Trace.supports_trace());
assert!(!AprTool::Chat.supports_trace());
assert!(!AprTool::Serve.supports_trace());
}
#[test]
fn test_apr_tool_display() {
assert_eq!(format!("{}", AprTool::Run), "run");
assert_eq!(format!("{}", AprTool::Profile), "profile");
assert_eq!(format!("{}", AprTool::Canary), "canary");
}
#[test]
fn test_format_extension() {
assert_eq!(Format::Gguf.extension(), ".gguf");
assert_eq!(Format::SafeTensors.extension(), ".safetensors");
assert_eq!(Format::Apr.extension(), ".apr");
}
#[test]
fn test_trace_level_all() {
let all = TraceLevel::all();
assert_eq!(all.len(), 4);
assert!(all.contains(&TraceLevel::None));
assert!(all.contains(&TraceLevel::Basic));
assert!(all.contains(&TraceLevel::Layer));
assert!(all.contains(&TraceLevel::Payload));
}
#[test]
fn test_modality_command() {
assert_eq!(Modality::Run.command(), "run");
assert_eq!(Modality::Chat.command(), "chat");
assert_eq!(Modality::Serve.command(), "serve");
}
#[test]
fn test_scenario_debug() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"test".to_string(),
0,
);
let debug_str = format!("{scenario:?}");
assert!(debug_str.contains("QaScenario"));
}
#[test]
fn test_scenario_generator_debug() {
let model = ModelId::new("test", "model");
let generator = ScenarioGenerator::new(model);
let debug_str = format!("{generator:?}");
assert!(debug_str.contains("ScenarioGenerator"));
}
#[test]
fn test_escape_json_tab() {
let result = escape_json("hello\tworld");
assert!(result.contains('\t'));
}
#[test]
fn test_escape_prompt_no_quotes() {
let result = escape_prompt("hello world");
assert_eq!(result, "hello world");
}
#[test]
fn test_scenario_to_command_with_basic_trace() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"Hello".to_string(),
0,
)
.with_trace_level(TraceLevel::Basic);
let cmd = scenario.to_command("model.gguf");
assert!(cmd.contains("--trace"));
assert!(cmd.contains("--trace-level basic"));
}
#[test]
fn test_scenario_to_command_with_layer_trace() {
let model = ModelId::new("test", "model");
let scenario = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"Hello".to_string(),
0,
)
.with_trace_level(TraceLevel::Layer);
let cmd = scenario.to_command("model.gguf");
assert!(cmd.contains("--trace-level layer"));
}
#[test]
fn test_mqs_category_run_cpu_is_a1() {
let model = ModelId::new("t", "m");
let s = QaScenario::new(
model,
Modality::Run,
Backend::Cpu,
Format::Gguf,
"x".into(),
0,
);
assert_eq!(s.mqs_category(), "A1");
assert_ne!(s.mqs_category(), "A2");
assert_ne!(s.mqs_category(), "A3");
}
#[test]
fn test_mqs_category_run_gpu_is_a2() {
let model = ModelId::new("t", "m");
let s = QaScenario::new(
model,
Modality::Run,
Backend::Gpu,
Format::Gguf,
"x".into(),
0,
);
assert_eq!(s.mqs_category(), "A2");
assert_ne!(s.mqs_category(), "A1");
}
#[test]
fn test_mqs_category_chat_cpu_is_a3() {
let model = ModelId::new("t", "m");
let s = QaScenario::new(
model,
Modality::Chat,
Backend::Cpu,
Format::Gguf,
"x".into(),
0,
);
assert_eq!(s.mqs_category(), "A3");
assert_ne!(s.mqs_category(), "A4");
}
#[test]
fn test_mqs_category_chat_gpu_is_a4() {
let model = ModelId::new("t", "m");
let s = QaScenario::new(
model,
Modality::Chat,
Backend::Gpu,
Format::Gguf,
"x".into(),
0,
);
assert_eq!(s.mqs_category(), "A4");
assert_ne!(s.mqs_category(), "A3");
}
#[test]
fn test_mqs_category_serve_cpu_is_a5() {
let model = ModelId::new("t", "m");
let s = QaScenario::new(
model,
Modality::Serve,
Backend::Cpu,
Format::Gguf,
"x".into(),
0,
);
assert_eq!(s.mqs_category(), "A5");
assert_ne!(s.mqs_category(), "A6");
}
#[test]
fn test_mqs_category_serve_gpu_is_a6() {
let model = ModelId::new("t", "m");
let s = QaScenario::new(
model,
Modality::Serve,
Backend::Gpu,
Format::Gguf,
"x".into(),
0,
);
assert_eq!(s.mqs_category(), "A6");
assert_ne!(s.mqs_category(), "A5");
}
#[test]
fn test_format_class_gguf_is_char_a() {
let class = Format::Gguf.class();
assert_eq!(class, 'A');
assert_ne!(class, 'B');
assert_ne!(class, 'X');
}
#[test]
fn test_format_class_apr_is_char_a() {
let class = Format::Apr.class();
assert_eq!(class, 'A');
assert_ne!(class, 'B');
}
#[test]
fn test_format_class_safetensors_is_char_b() {
let class = Format::SafeTensors.class();
assert_eq!(class, 'B');
assert_ne!(class, 'A');
}
#[test]
fn test_escape_json_backslash_not_empty() {
let result = escape_json("a\\b");
assert!(!result.is_empty());
assert_eq!(result, "a\\\\b");
assert!(result.len() > "a\\b".len());
}
#[test]
fn test_escape_json_quote_not_empty() {
let result = escape_json("say \"hi\"");
assert!(!result.is_empty());
assert_eq!(result, "say \\\"hi\\\"");
}
#[test]
fn test_escape_json_newline_not_empty() {
let result = escape_json("line1\nline2");
assert!(!result.is_empty());
assert_eq!(result, "line1\\nline2");
assert!(!result.contains('\n'));
}
#[test]
fn test_escape_json_all_escapes_combined() {
let result = escape_json("a\\b\"c\nd");
assert_eq!(result, "a\\\\b\\\"c\\nd");
}
#[test]
fn test_escape_prompt_single_quote() {
let result = escape_prompt("it's");
assert!(!result.is_empty());
assert_eq!(result, "it'\\''s");
assert!(result.contains("'\\''"));
}
#[test]
fn test_backend_cpu_flag_is_empty() {
let flag = Backend::Cpu.flag();
assert!(flag.is_empty());
assert_eq!(flag, "");
}
#[test]
fn test_backend_gpu_flag_is_gpu_option() {
let flag = Backend::Gpu.flag();
assert!(!flag.is_empty());
assert_eq!(flag, "--gpu");
assert!(flag.starts_with("--"));
}
#[test]
fn test_trace_level_none_value() {
assert_eq!(TraceLevel::None.value(), "none");
assert_ne!(TraceLevel::None.value(), "basic");
}
#[test]
fn test_trace_level_basic_value() {
assert_eq!(TraceLevel::Basic.value(), "basic");
assert_ne!(TraceLevel::Basic.value(), "none");
}
#[test]
fn test_trace_level_layer_value() {
assert_eq!(TraceLevel::Layer.value(), "layer");
assert_ne!(TraceLevel::Layer.value(), "payload");
}
#[test]
fn test_trace_level_payload_value() {
assert_eq!(TraceLevel::Payload.value(), "payload");
assert_ne!(TraceLevel::Payload.value(), "layer");
}
}