from __future__ import annotations
import argparse
import json
import os
import shlex
import statistics
import subprocess
import sys
import tempfile
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Iterable
SCRIPT_DIR = Path(__file__).resolve().parent
REPO_ROOT = SCRIPT_DIR.parent
DEFAULT_SYNTEXT_BIN = REPO_ROOT / "target" / "release" / "st"
DEFAULT_PRESET_FILE = REPO_ROOT / "benchmarks" / "repo_presets.json"
DEFAULT_BUILD_ITERATIONS = 3
DEFAULT_SEARCH_ITERATIONS = 5
DEFAULT_WARMUPS = 1
@dataclass(frozen=True)
class QuerySpec:
mode: str
pattern: str
@property
def name(self) -> str:
cleaned = self.pattern.replace("\n", " ").strip()
if len(cleaned) > 48:
cleaned = f"{cleaned[:45]}..."
return f"{self.mode}:{cleaned}"
@dataclass(frozen=True)
class PresetSpec:
name: str
display_name: str
repo_url: str
suggested_local_path: str
language_focus: str
scale: str
build_iterations: int
search_iterations: int
warmups: int
tools: tuple[str, ...]
queries: tuple[QuerySpec, ...]
notes: tuple[str, ...]
def parse_query(value: str) -> QuerySpec:
try:
mode, pattern = value.split(":", 1)
except ValueError as exc:
raise argparse.ArgumentTypeError(
f"invalid query {value!r}, expected literal:<pattern> or regex:<pattern>"
) from exc
if mode not in {"literal", "regex"}:
raise argparse.ArgumentTypeError(
f"invalid query mode {mode!r}, expected literal or regex"
)
if not pattern:
raise argparse.ArgumentTypeError("query pattern must not be empty")
return QuerySpec(mode=mode, pattern=pattern)
def load_presets(path: Path) -> dict[str, PresetSpec]:
raw = json.loads(path.read_text())
presets: dict[str, PresetSpec] = {}
for item in raw.get("presets", []):
preset = PresetSpec(
name=item["name"],
display_name=item["display_name"],
repo_url=item["repo_url"],
suggested_local_path=item["suggested_local_path"],
language_focus=item["language_focus"],
scale=item["scale"],
build_iterations=int(item["build_iterations"]),
search_iterations=int(item["search_iterations"]),
warmups=int(item["warmups"]),
tools=tuple(item.get("tools", ["syntext", "rg", "grep"])),
queries=tuple(parse_query(query) for query in item["queries"]),
notes=tuple(item.get("notes", [])),
)
presets[preset.name] = preset
return presets
def print_presets(presets: dict[str, PresetSpec]) -> None:
print("# Benchmark Presets\n")
for preset in sorted(presets.values(), key=lambda item: item.name):
print(f"- `{preset.name}`: {preset.display_name}")
print(f" repo: `{preset.repo_url}`")
print(f" suggested local path: `{preset.suggested_local_path}`")
print(f" focus: `{preset.language_focus}`, scale: `{preset.scale}`")
print(
" default settings: "
f"build_iterations={preset.build_iterations}, "
f"search_iterations={preset.search_iterations}, warmups={preset.warmups}"
)
print(" tools: " + ", ".join(f"`{tool}`" for tool in preset.tools))
print(
" queries: "
+ ", ".join(f"`{query.name}`" for query in preset.queries)
)
if preset.notes:
print(" notes: " + " ".join(preset.notes))
print()
def tracked_files(repo_root: Path) -> bytes:
result = subprocess.run(
["git", "-C", str(repo_root), "ls-files", "-z"],
check=True,
capture_output=True,
)
return result.stdout
def tracked_file_count(repo_root: Path) -> int:
output = tracked_files(repo_root)
if not output:
return 0
return sum(1 for part in output.split(b"\0") if part)
def ensure_syntext_binary(syntext_bin: Path) -> None:
if syntext_bin.exists():
return
subprocess.run(
["cargo", "build", "--release", "--bin", "st"],
cwd=REPO_ROOT,
check=True,
)
def run_timed(
cmd: list[str] | str,
*,
cwd: Path,
env: dict[str, str],
shell: bool = False,
allowed_codes: Iterable[int] = (0,),
) -> float:
start = time.perf_counter()
completed = subprocess.run(
cmd,
cwd=cwd,
env=env,
shell=shell,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
text=False,
)
elapsed_ms = (time.perf_counter() - start) * 1000.0
if completed.returncode not in set(allowed_codes):
raise RuntimeError(f"command failed with exit {completed.returncode}: {cmd!r}")
return elapsed_ms
def output_line_count(
cmd: list[str] | str,
*,
cwd: Path,
env: dict[str, str],
shell: bool = False,
allowed_codes: Iterable[int] = (0,),
) -> int:
completed = subprocess.run(
cmd,
cwd=cwd,
env=env,
shell=shell,
capture_output=True,
text=False,
)
if completed.returncode not in set(allowed_codes):
raise RuntimeError(f"command failed with exit {completed.returncode}: {cmd!r}")
stdout = completed.stdout
if not stdout:
return 0
count = stdout.count(b"\n")
if not stdout.endswith(b"\n"):
count += 1
return count
def summarize(samples_ms: list[float]) -> dict[str, float]:
ordered = sorted(samples_ms)
return {
"median_ms": round(statistics.median(ordered), 3),
"min_ms": round(ordered[0], 3),
"max_ms": round(ordered[-1], 3),
}
def summarize_int(samples: list[int], unit: str) -> dict[str, int]:
ordered = sorted(samples)
return {
f"median_{unit}": statistics.median_low(ordered),
f"min_{unit}": ordered[0],
f"max_{unit}": ordered[-1],
}
def dir_size_bytes(root: Path) -> int:
total = 0
for path in root.rglob("*"):
if path.is_file():
total += path.stat().st_size
return total
def syntext_search_cmd(
syntext_bin: Path, repo_root: Path, index_dir: Path, query: QuerySpec
) -> list[str]:
cmd = [
str(syntext_bin),
"--repo-root",
str(repo_root),
"--index-dir",
str(index_dir),
]
if query.mode == "literal":
cmd.append("-F")
cmd.append(query.pattern)
return cmd
def syntext_bench_search_cmd(
syntext_bin: Path,
repo_root: Path,
index_dir: Path,
queries: list[QuerySpec],
iterations: int,
warmups: int,
) -> list[str]:
cmd = [
str(syntext_bin),
"--repo-root",
str(repo_root),
"--index-dir",
str(index_dir),
"bench-search",
"--iterations",
str(iterations),
"--warmups",
str(warmups),
]
for query in queries:
cmd.extend(["--query", query.name])
return cmd
def rg_search_cmd(repo_root: Path, query: QuerySpec) -> list[str]:
cmd = ["rg", "-n", "--no-heading", "--color", "never", "--hidden"]
if query.mode == "literal":
cmd.append("-F")
cmd.extend([query.pattern, str(repo_root)])
return cmd
def grep_search_cmd(
repo_root: Path, tracked_list: Path, query: QuerySpec, grep_mode: str
) -> str:
grep_flag = "-F" if query.mode == "literal" else "-E"
pattern = shlex.quote(query.pattern)
if grep_mode == "tracked":
return (
f"xargs -0 grep -nIH {grep_flag} -e {pattern} "
f"< {shlex.quote(str(tracked_list))}"
)
return (
f"grep -RInH --exclude-dir=.git {grep_flag} -e {pattern} "
f"{shlex.quote(str(repo_root))}"
)
def benchmark_command(
cmd: list[str] | str,
*,
cwd: Path,
env: dict[str, str],
warmups: int,
iterations: int,
shell: bool = False,
allowed_codes: Iterable[int] = (0, 1),
) -> dict[str, float]:
for _ in range(warmups):
run_timed(cmd, cwd=cwd, env=env, shell=shell, allowed_codes=allowed_codes)
samples = [
run_timed(cmd, cwd=cwd, env=env, shell=shell, allowed_codes=allowed_codes)
for _ in range(iterations)
]
return summarize(samples)
def parse_tools(value: str) -> tuple[str, ...]:
allowed = {"syntext", "rg", "grep"}
tools = tuple(part.strip() for part in value.split(",") if part.strip())
if not tools:
raise argparse.ArgumentTypeError("tool list must not be empty")
unknown = [tool for tool in tools if tool not in allowed]
if unknown:
raise argparse.ArgumentTypeError(
f"unknown tool(s): {', '.join(unknown)}; expected one of syntext, rg, grep"
)
return tools
def syntext_batch_results(
cmd: list[str],
*,
cwd: Path,
env: dict[str, str],
) -> dict[str, dict[str, object]]:
completed = subprocess.run(
cmd,
cwd=cwd,
env=env,
capture_output=True,
text=True,
check=False,
)
if completed.returncode != 0:
raise RuntimeError(
f"command failed with exit {completed.returncode}: {cmd!r}\n{completed.stderr}"
)
payload = json.loads(completed.stdout)
return {
item["query"]: {
"count": item["count"],
"timings_ms": item["timings_ms"],
}
for item in payload.get("queries", [])
}
def report_tools(tools: tuple[str, ...], syntext_search_mode: str) -> list[str]:
expanded: list[str] = []
for tool in tools:
if tool == "syntext" and syntext_search_mode == "both":
expanded.extend(["syntext-fork", "syntext-persistent"])
else:
expanded.append(tool)
return expanded
def render_markdown_report(report: dict[str, object]) -> str:
lines: list[str] = []
lines.append("# External Benchmark")
lines.append("")
lines.append(f"- Repo: `{report['repo']}`")
if report["preset"]:
lines.append(f"- Preset: `{report['preset']}`")
lines.append(f"- Tracked files: `{report['tracked_files']}`")
lines.append(f"- Grep mode: `{report['grep_mode']}`")
lines.append(f"- Tools: `{', '.join(report['tools'])}`")
lines.append(f"- Syntext build iterations: `{report['build_iterations']}`")
lines.append(
f"- Search iterations per tool/query: `{report['search_iterations']}`"
)
lines.append(f"- Syntext search mode: `{report['syntext_search_mode']}`")
if report["build_only"]:
lines.append("- Mode: `build-only`")
lines.append("")
build_summary = report["syntext_index_build_ms"]
size_summary = report["syntext_index_bytes"]
lines.append("## Syntext index build")
lines.append("")
lines.append(
f"- median: `{build_summary['median_ms']}` ms"
f", min: `{build_summary['min_ms']}` ms"
f", max: `{build_summary['max_ms']}` ms"
)
lines.append(
f"- index bytes: median `{size_summary['median_bytes']}`"
f", min `{size_summary['min_bytes']}`"
f", max `{size_summary['max_bytes']}`"
)
lines.append("")
if report["queries"]:
lines.extend(render_markdown_tables(report))
lines.append("")
lines.append("## Notes")
lines.append("")
lines.append(
"- `syntext` search latency excludes index build time, which is reported separately."
)
lines.append(
"- `syntext` index byte totals measure the full on-disk index directory for each build iteration."
)
if report["syntext_search_mode"] == "both":
lines.append(
"- `syntext-fork` measures one process per query. `syntext-persistent` reuses one opened index for all queries in the run."
)
if report["grep_mode"] == "tracked" and "grep" in report["tools"]:
lines.append(
"- `grep` uses `git ls-files` as its file list. That is a better baseline than raw recursive grep, but it is still not ignore-aware in the same way as `rg`."
)
elif "grep" in report["tools"]:
lines.append(
"- `grep` uses recursive traversal and may include files that `rg` or `syntext` skip."
)
mismatched = [
result
for result in report["queries"]
if len(set(result["counts"].values())) != 1
]
if mismatched:
lines.append(
"- Match counts differ for at least one query. Treat timing comparisons cautiously."
)
literal_mismatches = [
result
for result in mismatched
if str(result["query"]).startswith("literal:")
]
if literal_mismatches:
lines.append(
"- For literal queries, a lower `syntext` count often means the pattern is being matched as a mid-token substring inside larger identifiers. Current `syntext` coverage guarantees are strongest for token-aligned queries."
)
return "\n".join(lines)
def render_markdown_tables(report: dict[str, object]) -> list[str]:
lines: list[str] = []
lines.append("## Search latency")
lines.append("")
lines.append("| Query | Tool | Matches | Median ms | Min ms | Max ms |")
lines.append("|---|---:|---:|---:|---:|---:|")
for result in report["queries"]:
query_name = result["query"]
counts = result["counts"]
timings = result["timings_ms"]
for tool in report["tools"]:
summary = timings[tool]
lines.append(
f"| `{query_name}` | `{tool}` | `{counts[tool]}` | "
f"`{summary['median_ms']}` | `{summary['min_ms']}` | `{summary['max_ms']}` |"
)
return lines
def main() -> int:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--repo", help="Git repository to benchmark")
parser.add_argument(
"--preset",
help="Named benchmark preset from the preset catalog",
)
parser.add_argument(
"--preset-file",
default=str(DEFAULT_PRESET_FILE),
help="JSON preset catalog to load, default: benchmarks/repo_presets.json",
)
parser.add_argument(
"--list-presets",
action="store_true",
help="List available presets and exit",
)
parser.add_argument(
"--syntext-bin",
default=str(DEFAULT_SYNTEXT_BIN),
help="Path to syntext binary, default: target/release/st",
)
parser.add_argument(
"--query",
action="append",
type=parse_query,
default=[],
help="Query spec, for example literal:workspace or regex:LanguageServer(Id|Status)",
)
parser.add_argument(
"--build-iterations",
type=int,
default=None,
help="Number of syntext index builds to time",
)
parser.add_argument(
"--search-iterations",
type=int,
default=None,
help="Number of search iterations per tool and query",
)
parser.add_argument(
"--warmups",
type=int,
default=None,
help="Warmup runs before timed search iterations",
)
parser.add_argument(
"--grep-mode",
choices=("tracked", "recursive"),
default="tracked",
help="tracked uses git ls-files, recursive uses grep -R over the repo root",
)
parser.add_argument(
"--json",
action="store_true",
help="Emit machine-readable JSON instead of Markdown",
)
parser.add_argument(
"--markdown-table-only",
action="store_true",
help="Emit only the Markdown search-latency table section for easy paste into docs",
)
parser.add_argument(
"--output",
help="Write the rendered output to a file instead of stdout",
)
parser.add_argument(
"--build-only",
action="store_true",
help="Measure repeated syntext index builds and index bytes, skip search benchmarking",
)
parser.add_argument(
"--tools",
type=parse_tools,
help="Comma-separated tool set, for example syntext,rg or syntext,rg,grep",
)
parser.add_argument(
"--syntext-search-mode",
choices=("fork", "persistent", "both"),
default="fork",
help="fork runs one syntext process per search; persistent reuses one opened index per benchmark run; both reports both syntext modes side by side",
)
args = parser.parse_args()
preset_file = Path(args.preset_file).resolve()
presets = load_presets(preset_file) if preset_file.exists() else {}
if args.list_presets:
if not presets:
raise SystemExit(f"no preset catalog found at {preset_file}")
print_presets(presets)
return 0
selected_preset = None
if args.preset:
if args.preset not in presets:
known = ", ".join(sorted(presets))
raise SystemExit(f"unknown preset {args.preset!r}. Known presets: {known}")
selected_preset = presets[args.preset]
repo_arg = args.repo
if selected_preset and repo_arg is None:
suggested_path = Path(selected_preset.suggested_local_path)
if suggested_path.joinpath(".git").exists():
repo_arg = str(suggested_path)
else:
raise SystemExit(
f"preset {selected_preset.name!r} suggests {suggested_path}, "
"but that repository is not present locally. Pass --repo explicitly."
)
if repo_arg is None:
raise SystemExit("either --repo or --preset is required")
repo_root = Path(repo_arg).resolve()
syntext_bin = Path(args.syntext_bin).resolve()
if not repo_root.joinpath(".git").exists():
raise SystemExit(f"{repo_root} is not a Git repository")
queries = list(args.query)
if selected_preset and not queries:
queries = list(selected_preset.queries)
if not args.build_only and not queries:
raise SystemExit("at least one --query is required")
build_iterations = args.build_iterations
if build_iterations is None:
build_iterations = (
selected_preset.build_iterations
if selected_preset
else DEFAULT_BUILD_ITERATIONS
)
search_iterations = args.search_iterations
if search_iterations is None:
search_iterations = (
selected_preset.search_iterations
if selected_preset
else DEFAULT_SEARCH_ITERATIONS
)
warmups = args.warmups
if warmups is None:
warmups = selected_preset.warmups if selected_preset else DEFAULT_WARMUPS
tools = args.tools
if selected_preset and tools is None:
tools = selected_preset.tools
if tools is None:
tools = ("syntext", "rg", "grep")
display_tools = report_tools(tools, args.syntext_search_mode)
ensure_syntext_binary(syntext_bin)
env = dict(os.environ)
env.setdefault("LC_ALL", "C")
tracked = tracked_files(repo_root)
tracked_count = sum(1 for part in tracked.split(b"\0") if part) if tracked else 0
build_samples: list[float] = []
build_size_samples: list[int] = []
for _ in range(build_iterations):
with tempfile.TemporaryDirectory(prefix="syntext-bench-index-") as index_dir:
cmd = [
str(syntext_bin),
"--repo-root",
str(repo_root),
"--index-dir",
index_dir,
"index",
"--quiet",
]
build_samples.append(
run_timed(cmd, cwd=repo_root, env=env, allowed_codes=(0,))
)
build_size_samples.append(dir_size_bytes(Path(index_dir)))
query_results: list[dict[str, object]] = []
if not args.build_only:
with tempfile.TemporaryDirectory(prefix="syntext-bench-search-") as index_dir:
index_path = Path(index_dir)
subprocess.run(
[
str(syntext_bin),
"--repo-root",
str(repo_root),
"--index-dir",
str(index_path),
"index",
"--quiet",
],
cwd=repo_root,
env=env,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
with tempfile.NamedTemporaryFile(
prefix="syntext-bench-files-", delete=False
) as filelist:
filelist.write(tracked)
tracked_list_path = Path(filelist.name)
try:
syntext_batch: dict[str, dict[str, object]] | None = None
if "syntext" in tools and args.syntext_search_mode in (
"persistent",
"both",
):
syntext_batch = syntext_batch_results(
syntext_bench_search_cmd(
syntext_bin,
repo_root,
index_path,
queries,
search_iterations,
warmups,
),
cwd=repo_root,
env=env,
)
for query in queries:
syntext_cmd = syntext_search_cmd(
syntext_bin, repo_root, index_path, query
)
rg_cmd = rg_search_cmd(repo_root, query)
grep_cmd = grep_search_cmd(
repo_root, tracked_list_path, query, args.grep_mode
)
counts: dict[str, int] = {}
timings: dict[str, dict[str, float]] = {}
if "syntext" in tools:
if args.syntext_search_mode == "both":
batch_entry = syntext_batch[query.name]
counts["syntext-persistent"] = int(batch_entry["count"])
timings["syntext-persistent"] = batch_entry["timings_ms"]
counts["syntext-fork"] = output_line_count(
syntext_cmd,
cwd=repo_root,
env=env,
allowed_codes=(0, 1),
)
timings["syntext-fork"] = benchmark_command(
syntext_cmd,
cwd=repo_root,
env=env,
warmups=warmups,
iterations=search_iterations,
allowed_codes=(0, 1),
)
elif syntext_batch is not None:
batch_entry = syntext_batch[query.name]
counts["syntext"] = int(batch_entry["count"])
timings["syntext"] = batch_entry["timings_ms"]
else:
counts["syntext"] = output_line_count(
syntext_cmd,
cwd=repo_root,
env=env,
allowed_codes=(0, 1),
)
timings["syntext"] = benchmark_command(
syntext_cmd,
cwd=repo_root,
env=env,
warmups=warmups,
iterations=search_iterations,
allowed_codes=(0, 1),
)
if "rg" in tools:
counts["rg"] = output_line_count(
rg_cmd, cwd=repo_root, env=env, allowed_codes=(0, 1)
)
timings["rg"] = benchmark_command(
rg_cmd,
cwd=repo_root,
env=env,
warmups=warmups,
iterations=search_iterations,
allowed_codes=(0, 1),
)
if "grep" in tools:
counts["grep"] = output_line_count(
grep_cmd,
cwd=repo_root,
env=env,
shell=True,
allowed_codes=(0, 1, 123),
)
timings["grep"] = benchmark_command(
grep_cmd,
cwd=repo_root,
env=env,
warmups=warmups,
iterations=search_iterations,
shell=True,
allowed_codes=(0, 1, 123),
)
query_results.append(
{
"query": query.name,
"counts": counts,
"timings_ms": timings,
}
)
finally:
tracked_list_path.unlink(missing_ok=True)
report = {
"repo": str(repo_root),
"preset": selected_preset.name if selected_preset else None,
"tracked_files": tracked_count,
"grep_mode": args.grep_mode,
"tools": display_tools,
"syntext_search_mode": args.syntext_search_mode,
"build_only": args.build_only,
"build_iterations": build_iterations,
"search_iterations": search_iterations,
"warmups": warmups,
"syntext_index_build_ms": summarize(build_samples),
"syntext_index_bytes": summarize_int(build_size_samples, "bytes"),
"queries": query_results,
}
if args.json and args.markdown_table_only:
raise SystemExit("choose either --json or --markdown-table-only, not both")
if args.build_only and args.markdown_table_only:
raise SystemExit("--build-only cannot be combined with --markdown-table-only")
if args.json:
rendered = json.dumps(report, indent=2)
elif args.markdown_table_only:
rendered = "\n".join(render_markdown_tables(report))
else:
rendered = render_markdown_report(report)
if args.output:
Path(args.output).write_text(rendered + ("\n" if not rendered.endswith("\n") else ""))
else:
print(rendered)
return 0
if __name__ == "__main__":
sys.exit(main())