use assert_cmd::Command;
use std::fs;
#[test]
fn test_small_input_performance() {
let small_input = "Hello world";
let mut cmd = Command::cargo_bin("token-count").unwrap();
let start = std::time::Instant::now();
cmd.arg("--model").arg("gpt-4").write_stdin(small_input).assert().success();
let elapsed = start.elapsed();
assert!(elapsed.as_secs() < 2, "Small input took too long: {:?}", elapsed);
}
#[test]
fn test_medium_input_performance() {
let medium_input = "word ".repeat(200);
let mut cmd = Command::cargo_bin("token-count").unwrap();
let start = std::time::Instant::now();
cmd.arg("--model").arg("gpt-4").write_stdin(medium_input).assert().success();
let elapsed = start.elapsed();
assert!(elapsed.as_secs() < 2, "Medium input took too long: {:?}", elapsed);
}
#[test]
fn test_large_input_performance() {
let large_input = fs::read_to_string("tests/fixtures/large.txt").unwrap();
let mut cmd = Command::cargo_bin("token-count").unwrap();
let start = std::time::Instant::now();
cmd.arg("--model").arg("gpt-4").write_stdin(large_input).assert().success();
let elapsed = start.elapsed();
assert!(elapsed.as_secs() < 30, "Large input took too long: {:?}", elapsed);
}
#[test]
fn test_empty_input_performance() {
let mut cmd = Command::cargo_bin("token-count").unwrap();
let start = std::time::Instant::now();
cmd.write_stdin("").assert().success();
let elapsed = start.elapsed();
assert!(elapsed.as_secs() < 2, "Empty input took too long: {:?}", elapsed);
}
#[test]
fn test_unicode_input_performance() {
let unicode_input = fs::read_to_string("tests/fixtures/unicode.txt").unwrap();
let mut cmd = Command::cargo_bin("token-count").unwrap();
let start = std::time::Instant::now();
cmd.arg("--model").arg("gpt-4").write_stdin(unicode_input).assert().success();
let elapsed = start.elapsed();
assert!(elapsed.as_secs() < 2, "Unicode input took too long: {:?}", elapsed);
}
#[test]
fn test_all_models_performance() {
let models = vec!["gpt-3.5-turbo", "gpt-4", "gpt-4-turbo", "gpt-4o"];
let test_input = "Hello world test";
for model in models {
let mut cmd = Command::cargo_bin("token-count").unwrap();
let start = std::time::Instant::now();
cmd.arg("--model").arg(model).write_stdin(test_input).assert().success();
let elapsed = start.elapsed();
assert!(elapsed.as_secs() < 2, "Model {} took too long: {:?}", model, elapsed);
}
}
#[test]
fn test_verbose_output_performance() {
let test_input = "word ".repeat(100);
let mut cmd1 = Command::cargo_bin("token-count").unwrap();
let start1 = std::time::Instant::now();
cmd1.arg("--model").arg("gpt-4").write_stdin(test_input.clone()).assert().success();
let elapsed1 = start1.elapsed();
let mut cmd2 = Command::cargo_bin("token-count").unwrap();
let start2 = std::time::Instant::now();
cmd2.arg("--model").arg("gpt-4").arg("-v").write_stdin(test_input).assert().success();
let elapsed2 = start2.elapsed();
assert!(
elapsed2.as_millis() < elapsed1.as_millis() * 2,
"Verbose output significantly slower: simple={:?}, verbose={:?}",
elapsed1,
elapsed2
);
}