import re
import pytest
from benchmark_models import (
BenchmarkData,
CircumspherePerformanceData,
CircumsphereTestCase,
VersionComparisonData,
extract_benchmark_data,
format_benchmark_tables,
format_throughput_value,
format_time_value,
parse_benchmark_header,
parse_throughput_data,
parse_time_data,
)
class TestBenchmarkData:
def test_init(self):
data = BenchmarkData(points=1000, dimension="2D")
assert data.points == 1000
assert data.dimension == "2D"
assert data.time_mean == 0.0
assert data.throughput_mean is None
def test_with_timing_fluent_interface(self):
data = BenchmarkData(1000, "3D").with_timing(100.0, 110.0, 120.0, "µs")
assert data.time_low == 100.0
assert data.time_mean == 110.0
assert data.time_high == 120.0
assert data.time_unit == "µs"
def test_with_throughput_fluent_interface(self):
data = BenchmarkData(1000, "2D").with_throughput(800.0, 900.0, 1000.0, "Kelem/s")
assert data.throughput_low == 800.0
assert data.throughput_mean == 900.0
assert data.throughput_high == 1000.0
assert data.throughput_unit == "Kelem/s"
def test_to_baseline_format_with_timing_only(self):
data = BenchmarkData(1000, "2D").with_timing(100.0, 110.0, 120.0, "µs")
result = data.to_baseline_format()
expected = """=== 1000 Points (2D) ===
Time: [100.0, 110.0, 120.0] µs
"""
assert result == expected
def test_to_baseline_format_with_timing_and_throughput(self):
data = BenchmarkData(1000, "3D").with_timing(100.0, 110.0, 120.0, "µs").with_throughput(800.0, 900.0, 1000.0, "Kelem/s")
result = data.to_baseline_format()
expected = """=== 1000 Points (3D) ===
Time: [100.0, 110.0, 120.0] µs
Throughput: [800.0, 900.0, 1000.0] Kelem/s
"""
assert result == expected
class TestCircumspherePerformanceData:
def test_init(self):
data = CircumspherePerformanceData(method="insphere", time_ns=1000.0)
assert data.method == "insphere"
assert data.time_ns == 1000.0
assert data.relative_performance is None
assert data.winner is False
class TestCircumsphereTestCase:
def test_init_and_get_winner(self):
methods = {
"insphere": CircumspherePerformanceData("insphere", 1000.0),
"insphere_distance": CircumspherePerformanceData("insphere_distance", 1200.0),
"insphere_lifted": CircumspherePerformanceData("insphere_lifted", 800.0),
}
test_case = CircumsphereTestCase("test_basic_3d", "3D", methods)
assert test_case.test_name == "test_basic_3d"
assert test_case.dimension == "3D"
assert test_case.get_winner() == "insphere_lifted"
def test_get_relative_performance(self):
methods = {
"insphere": CircumspherePerformanceData("insphere", 1000.0),
"insphere_distance": CircumspherePerformanceData("insphere_distance", 1200.0),
"insphere_lifted": CircumspherePerformanceData("insphere_lifted", 800.0),
}
test_case = CircumsphereTestCase("test_basic_3d", "3D", methods)
assert test_case.get_relative_performance("insphere_lifted") == pytest.approx(1.0)
assert test_case.get_relative_performance("insphere") == pytest.approx(1.25) assert test_case.get_relative_performance("insphere_distance") == pytest.approx(1.5)
def test_get_winner_empty_methods(self):
test_case = CircumsphereTestCase("test_empty", "3D", {})
assert test_case.get_winner() is None
def test_get_relative_performance_nonexistent_method(self):
methods = {
"insphere": CircumspherePerformanceData("insphere", 1000.0),
}
test_case = CircumsphereTestCase("test_basic_3d", "3D", methods)
assert test_case.get_relative_performance("nonexistent_method") == pytest.approx(0.0)
def test_version_comparison_data_division_by_zero_edge_case(self):
comparison = VersionComparisonData(
test_case="Edge Case",
method="insphere",
old_version="v0.3.0",
new_version="v0.3.1",
old_value=0.0, new_value=100.0,
unit="ns",
)
assert comparison.improvement_pct == pytest.approx(0.0)
class TestVersionComparisonData:
def test_improvement_calculation(self):
comparison = VersionComparisonData(
test_case="Basic 3D",
method="insphere",
old_version="v0.3.0",
new_version="v0.3.1",
old_value=808.0,
new_value=805.0,
unit="ns",
)
expected_improvement = ((808.0 - 805.0) / 808.0) * 100
assert comparison.improvement_pct == pytest.approx(expected_improvement, abs=0.001)
def test_zero_old_value(self):
comparison = VersionComparisonData(
test_case="Basic 3D",
method="insphere",
old_version="v0.3.0",
new_version="v0.3.1",
old_value=0.0,
new_value=805.0,
unit="ns",
)
assert comparison.improvement_pct == 0.0
class TestParsingFunctions:
def test_extract_benchmark_data(self):
baseline_content = """Date: 2024-01-15 10:30:00 UTC
Git commit: abc123def456
=== 1000 Points (2D) ===
Time: [100.0, 110.0, 120.0] µs
Throughput: [8000.0, 9090.9, 10000.0] Kelem/s
=== 5000 Points (3D) ===
Time: [500.0, 550.0, 600.0] µs
Throughput: [8333.3, 9090.9, 10000.0] Kelem/s
"""
benchmarks = extract_benchmark_data(baseline_content)
assert len(benchmarks) == 2
first = benchmarks[0]
assert first.points == 1000
assert first.dimension == "2D"
assert first.time_mean == 110.0
assert first.time_unit == "µs"
assert first.throughput_mean == 9090.9
second = benchmarks[1]
assert second.points == 5000
assert second.dimension == "3D"
assert second.time_mean == 550.0
def test_parse_benchmark_header(self):
result = parse_benchmark_header("=== 1000 Points (2D) ===")
assert result is not None
assert result.points == 1000
assert result.dimension == "2D"
result = parse_benchmark_header("Invalid header")
assert result is None
def test_parse_time_data(self):
benchmark = BenchmarkData(1000, "2D")
success = parse_time_data(benchmark, "Time: [100.0, 110.0, 120.0] µs")
assert success is True
assert benchmark.time_mean == 110.0
assert benchmark.time_unit == "µs"
benchmark2 = BenchmarkData(1000, "2D")
success = parse_time_data(benchmark2, "Invalid time data")
assert success is False
def test_parse_throughput_data(self):
benchmark = BenchmarkData(1000, "2D")
success = parse_throughput_data(benchmark, "Throughput: [8000.0, 9090.9, 10000.0] Kelem/s")
assert success is True
assert benchmark.throughput_mean == 9090.9
assert benchmark.throughput_unit == "Kelem/s"
benchmark2 = BenchmarkData(1000, "2D")
success = parse_throughput_data(benchmark2, "Invalid throughput data")
assert success is False
class TestFormattingFunctions:
def test_format_benchmark_tables(self):
benchmarks = [
BenchmarkData(1000, "2D").with_timing(100.0, 110.0, 120.0, "µs").with_throughput(8000.0, 9090.9, 10000.0, "Kelem/s"),
BenchmarkData(5000, "2D").with_timing(450.0, 500.0, 550.0, "µs").with_throughput(9000.0, 10000.0, 11000.0, "Kelem/s"),
BenchmarkData(2000, "3D").with_timing(200.0, 220.0, 240.0, "µs").with_throughput(8000.0, 9090.9, 10000.0, "Kelem/s"),
]
lines = format_benchmark_tables(benchmarks)
markdown_content = "\n".join(lines)
assert "### 2D Triangulation Performance" in markdown_content
assert "### 3D Triangulation Performance" in markdown_content
assert "| Points | Time (mean) | Throughput (mean) | Scaling |" in markdown_content
assert "|--------|-------------|-------------------|----------|" in markdown_content
assert "| 1000 | 110.00 µs | 9090.900 Kelem/s | 1.0x |" in markdown_content
assert "| 5000 |" in markdown_content assert "4.5x" in markdown_content
def test_format_time_value(self):
assert format_time_value(0.0, "µs") == "N/A"
assert format_time_value(-1.0, "µs") == "N/A"
assert format_time_value(0.5, "µs") == "0.500 µs"
assert format_time_value(110.0, "µs") == "110.00 µs"
assert format_time_value(1500.0, "µs") == "1.500 ms" assert format_time_value(2500.0, "ms") == "2.5000 s" assert format_time_value(50000.0, "ms") == "50.0000 s"
def test_format_throughput_value(self):
assert format_throughput_value(0.5, "Kelem/s") == "0.500 Kelem/s"
assert format_throughput_value(110.0, "Kelem/s") == "110.00 Kelem/s"
assert format_throughput_value(9090.909, "Kelem/s") == "9090.909 Kelem/s"
assert format_throughput_value(None, "Kelem/s") == "N/A"
assert format_throughput_value(110.0, None) == "N/A"
def test_format_time_value_with_unit_aliases(self):
assert format_time_value(500.0, "us") == "500.00 µs" assert format_time_value(500.0, "μs") == "500.00 µs" assert format_time_value(500.0, "µs") == "500.00 µs"
assert format_time_value(1500.0, "us") == "1.500 ms" assert format_time_value(2500.0, "μs") == "2.500 ms"
def test_parse_time_data_with_scientific_notation(self):
benchmark = BenchmarkData(1000, "3D")
success = parse_time_data(benchmark, "Time: [1.0e2, 1.1e2, 1.2e2] µs")
assert success is True
assert benchmark.time_mean == 110.0
assert benchmark.time_unit == "µs"
benchmark2 = BenchmarkData(1000, "3D")
success = parse_time_data(benchmark2, "Time: [-1.0, 0.0, 1.0] µs")
assert success is True
assert benchmark2.time_mean == 0.0
benchmark3 = BenchmarkData(1000, "3D")
success = parse_time_data(benchmark3, "Time: [ 100.0 , 110.0, 120.0 ] µs")
assert success is True
assert benchmark3.time_mean == 110.0
assert benchmark3.time_unit == "µs"
def test_parse_throughput_data_with_scientific_notation(self):
benchmark = BenchmarkData(1000, "2D")
success = parse_throughput_data(benchmark, "Throughput: [8.0e3, 9.09e3, 1.0e4] Kelem/s")
assert success is True
assert benchmark.throughput_mean == 9090.0
assert benchmark.throughput_unit == "Kelem/s"
benchmark2 = BenchmarkData(1000, "2D")
success = parse_throughput_data(benchmark2, "Throughput: [ 8000.0 , 9090.9, 10000.0 ] Kelem/s")
assert success is True
assert benchmark2.throughput_mean == 9090.9
assert benchmark2.throughput_unit == "Kelem/s"
def test_format_benchmark_tables_dimension_sorting(self):
benchmarks = [
BenchmarkData(1000, "10D").with_timing(100.0, 110.0, 120.0, "µs"),
BenchmarkData(1000, "2D").with_timing(50.0, 55.0, 60.0, "µs"),
BenchmarkData(1000, "3D").with_timing(70.0, 75.0, 80.0, "µs"),
BenchmarkData(1000, "1D").with_timing(30.0, 35.0, 40.0, "µs"),
]
lines = format_benchmark_tables(benchmarks)
markdown_content = "\n".join(lines)
pos_1d = markdown_content.find("### 1D Triangulation Performance")
pos_2d = markdown_content.find("### 2D Triangulation Performance")
pos_3d = markdown_content.find("### 3D Triangulation Performance")
pos_10d = markdown_content.find("### 10D Triangulation Performance")
assert pos_1d < pos_2d < pos_3d < pos_10d
def test_format_benchmark_tables_mixed_dimension_formats(self):
benchmarks = [
BenchmarkData(1000, "2D").with_timing(50.0, 55.0, 60.0, "µs"),
BenchmarkData(1000, "custom_format").with_timing(90.0, 95.0, 100.0, "µs"), BenchmarkData(1000, " 3D ").with_timing(70.0, 75.0, 80.0, "µs"), BenchmarkData(1000, "1d").with_timing(30.0, 35.0, 40.0, "µs"), ]
lines = format_benchmark_tables(benchmarks)
markdown_content = "\n".join(lines)
pos_1d = markdown_content.find("### 1d Triangulation Performance")
pos_2d = markdown_content.find("### 2D Triangulation Performance")
pos_3d = markdown_content.find("### 3D Triangulation Performance") pos_custom = markdown_content.find("### custom_format Triangulation Performance")
assert pos_1d < pos_2d < pos_3d < pos_custom
def test_format_benchmark_tables_scaling_baseline_with_zero_first_entry(self):
benchmarks = [
BenchmarkData(1000, "2D").with_timing(0.0, 0.0, 0.0, "µs"), BenchmarkData(2000, "2D").with_timing(100.0, 110.0, 120.0, "µs"), BenchmarkData(5000, "2D").with_timing(450.0, 500.0, 550.0, "µs"), ]
lines = format_benchmark_tables(benchmarks)
markdown_content = "\n".join(lines)
assert "| 1000 | N/A | N/A | N/A |" in markdown_content
assert "| 2000 | 110.00 µs | N/A | 1.0x |" in markdown_content
assert "4.5x" in markdown_content
assert "500.0x" not in markdown_content
def test_format_benchmark_tables_scaling_baseline_all_zero_times(self):
benchmarks = [
BenchmarkData(1000, "2D").with_timing(0.0, 0.0, 0.0, "µs"),
BenchmarkData(2000, "2D").with_timing(0.0, 0.0, 0.0, "µs"),
BenchmarkData(5000, "2D").with_timing(0.0, 0.0, 0.0, "µs"),
]
lines = format_benchmark_tables(benchmarks)
markdown_content = "\n".join(lines)
assert "| 1000 | N/A | N/A | N/A |" in markdown_content
assert "| 2000 | N/A | N/A | N/A |" in markdown_content
assert "| 5000 | N/A | N/A | N/A |" in markdown_content
numeric_scaling_pattern = r"\| [^|]+ \| [^|]+ \| [^|]+ \| [0-9.]+x \|"
assert not re.search(numeric_scaling_pattern, markdown_content)