name: Benchmarks
on:
push:
branches:
- main
paths:
- 'benches/**'
- 'src/**'
- 'Cargo.toml'
- 'Cargo.lock'
- '.github/workflows/benchmark.yml'
pull_request:
paths:
- 'benches/**'
- 'src/**'
- 'Cargo.toml'
- 'Cargo.lock'
- '.github/workflows/benchmark.yml'
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
jobs:
benchmark:
name: ${{ matrix.label }}
strategy:
fail-fast: false
matrix:
include:
- os: macos-latest
arch: aarch64
tier: neon
rustflags: ''
label: macos-aarch64-neon
- os: ubuntu-latest
arch: x86_64
tier: default
rustflags: ''
label: ubuntu-x86_64-default
- os: ubuntu-latest
arch: x86_64
tier: native
rustflags: '-C target-cpu=native'
label: ubuntu-x86_64-native
- os: ubuntu-latest
arch: x86_64
tier: ssse3-only
rustflags: '-C target-feature=+ssse3,-avx,-avx2,-fma'
label: ubuntu-x86_64-ssse3-only
- os: windows-latest
arch: x86_64
tier: default
rustflags: ''
label: windows-x86_64-default
runs-on: ${{ matrix.os }}
env:
RUSTFLAGS: ${{ matrix.rustflags }}
steps:
- uses: actions/checkout@v6
- name: Install Rust
run: rustup update stable --no-self-update && rustup default stable
- name: Print CPU info (Linux)
if: runner.os == 'Linux'
shell: bash
run: |
echo "=== /proc/cpuinfo (first flags line) ==="
grep -m1 '^flags' /proc/cpuinfo || true
echo "=== lscpu ==="
lscpu || true
- name: Print CPU info (macOS)
if: runner.os == 'macOS'
shell: bash
run: |
echo "=== sysctl machdep.cpu ==="
sysctl machdep.cpu || true
echo "=== uname -m ==="
uname -m
- name: Print CPU info (Windows)
if: runner.os == 'Windows'
shell: pwsh
run: |
Get-CimInstance Win32_Processor | Select-Object Name, Manufacturer, NumberOfCores, NumberOfLogicalProcessors | Format-List
- name: Cache cargo build and registry
uses: actions/cache@v5
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-bench-${{ matrix.tier }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-bench-${{ matrix.tier }}-
${{ runner.os }}-bench-
- name: Run benchmarks - histogram
shell: bash
run: cargo bench --bench histogram -- --output-format bencher | tee benchmark-histogram-${{ matrix.label }}.txt
continue-on-error: true
- name: Run benchmarks - phash
shell: bash
run: cargo bench --bench phash -- --output-format bencher | tee benchmark-phash-${{ matrix.label }}.txt
continue-on-error: true
- name: Run benchmarks - threshold
shell: bash
run: cargo bench --bench threshold -- --output-format bencher | tee benchmark-threshold-${{ matrix.label }}.txt
continue-on-error: true
- name: Run benchmarks - content
shell: bash
run: cargo bench --bench content -- --output-format bencher | tee benchmark-content-${{ matrix.label }}.txt
continue-on-error: true
- name: Run benchmarks - adaptive
shell: bash
run: cargo bench --bench adaptive -- --output-format bencher | tee benchmark-adaptive-${{ matrix.label }}.txt
continue-on-error: true
- name: Collect benchmark summary
shell: bash
run: |
summary="benchmark-summary-${{ matrix.label }}.md"
echo "## Benchmark Results for ${{ matrix.label }}" > "$summary"
echo "" >> "$summary"
echo "### System Information" >> "$summary"
echo "- OS: ${{ matrix.os }}" >> "$summary"
echo "- Arch: ${{ matrix.arch }}" >> "$summary"
echo "- SIMD tier: ${{ matrix.tier }}" >> "$summary"
echo "- Runner: ${{ runner.name }}" >> "$summary"
echo "- Runner arch (GH): ${{ runner.arch }}" >> "$summary"
echo "- RUSTFLAGS: \`${{ matrix.rustflags }}\`" >> "$summary"
echo "- Date: $(date -u +"%Y-%m-%d %H:%M:%S UTC")" >> "$summary"
echo "" >> "$summary"
for bench in histogram phash threshold content adaptive; do
file="benchmark-${bench}-${{ matrix.label }}.txt"
if [ -f "$file" ]; then
echo "### ${bench}" >> "$summary"
echo "" >> "$summary"
echo "\`\`\`" >> "$summary"
grep "^test " "$file" >> "$summary" || echo "No results" >> "$summary"
echo "\`\`\`" >> "$summary"
echo "" >> "$summary"
fi
done
cat "$summary"
- name: Create benchmark archive
shell: bash
run: |
mkdir -p benchmark-results
mv benchmark-*.txt benchmark-results/ 2>/dev/null || true
mv benchmark-summary-${{ matrix.label }}.md benchmark-results/ 2>/dev/null || true
if [ -d "target/criterion" ]; then
cp -r target/criterion benchmark-results/criterion-${{ matrix.label }} || true
fi
- name: Upload benchmark results
uses: actions/upload-artifact@v7
with:
name: benchmark-results-${{ matrix.label }}
path: benchmark-results/
retention-days: 90
- name: Upload Criterion detailed results
uses: actions/upload-artifact@v7
if: always()
with:
name: criterion-detailed-${{ matrix.label }}
path: target/criterion/
retention-days: 90
continue-on-error: true
aggregate-results:
name: Aggregate benchmark results
needs: benchmark
runs-on: ubuntu-latest
if: always()
steps:
- name: Download all benchmark results
uses: actions/download-artifact@v6
with:
path: all-results
- name: Create combined summary
shell: bash
run: |
echo "# Benchmark Results Summary" > BENCHMARK_SUMMARY.md
echo "" >> BENCHMARK_SUMMARY.md
echo "Date: $(date -u +"%Y-%m-%d %H:%M:%S UTC")" >> BENCHMARK_SUMMARY.md
echo "" >> BENCHMARK_SUMMARY.md
for os_dir in all-results/benchmark-results-*/; do
if [ -d "$os_dir" ]; then
for summary in "$os_dir"benchmark-summary-*.md; do
if [ -f "$summary" ]; then
echo "" >> BENCHMARK_SUMMARY.md
cat "$summary" >> BENCHMARK_SUMMARY.md
echo "" >> BENCHMARK_SUMMARY.md
echo "---" >> BENCHMARK_SUMMARY.md
fi
done
fi
done
cat BENCHMARK_SUMMARY.md
- name: Upload combined results
uses: actions/upload-artifact@v7
with:
name: benchmark-results-combined
path: |
BENCHMARK_SUMMARY.md
all-results/
retention-days: 90
- name: Comment PR with benchmark results
if: github.event_name == 'pull_request'
uses: actions/github-script@v9
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const fs = require('fs');
const summary = fs.readFileSync('BENCHMARK_SUMMARY.md', 'utf8');
const comment = `## Benchmark Results\n\n${summary}\n\n<details>\n<summary>View detailed results</summary>\n\nDetailed Criterion results have been uploaded as artifacts. Download them from the workflow run to view charts and detailed statistics.\n\n</details>`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});
continue-on-error: true