benchmark 0.8.0

Nanosecond-precision benchmarking for dev, testing, and production. Zero-overhead core timing when disabled; optional std-powered collectors and zero-dependency metrics (Watch/Timer) for real service observability.
Documentation
name: Perf

on:
  # Manual trigger only
  workflow_dispatch: {}

jobs:
  perf-tests:
    name: Perf Tests and Benches
    runs-on: ubuntu-latest
    # Only run when manually triggered on main branch
    if: ${{ github.event_name == 'workflow_dispatch' && github.ref_name == 'main' }}
    concurrency:
      group: perf-${{ github.ref }}
      cancel-in-progress: false
    timeout-minutes: 45
    env:
      PERF_TESTS: "1"
    steps:
      - uses: actions/checkout@v4
      - uses: dtolnay/rust-toolchain@stable
      - uses: Swatinem/rust-cache@v2

      # Optional diagnostics for visibility
      - name: Environment
        run: |
          rustc -Vv
          cargo -V
          lscpu || true
          cat /etc/os-release || true

      - name: Build (all-features) for benches
        run: cargo build --all-features

      - name: Run perf-gated tests (ignored by default)
        run: cargo test -F perf-tests -- --ignored

      - name: Run perf-gated benches
        run: cargo bench -F perf-tests

      - name: Install jq (for baseline comparison)
        run: |
          sudo apt-get update
          sudo apt-get install -y jq python3

      - name: Run watch_timer_hot (metrics) with fixed timings
        env:
          PERF_TESTS: "1"
        run: |
          cargo bench -F "perf-tests metrics" --bench watch_timer_hot -- \
            --measurement-time 5 \
            --warm-up-time 2 \
            --save-baseline current

      - name: Show Criterion output (watch_timer_hot)
        run: |
          echo "Listing target/criterion (if present)"
          ls -la target || true
          ls -la target/criterion || true
          find target/criterion -maxdepth 2 -type d -print || true

      - name: Compare Criterion results to baselines (watch_timer_hot)
        env:
          PERF_COMPARE_STRICT: "0"
        run: |
          if [ ! -d target/criterion ]; then
            echo "No Criterion results found; skipping watch_timer_hot baseline comparison.";
            exit 0;
          fi
          if [ ! -d target/criterion/watch_timer_hot ] && [ ! -f target/criterion/new/estimates.json ]; then
            echo "Group directory target/criterion/watch_timer_hot not found and flat layout not detected; skipping.";
            exit 0;
          fi
          echo "## watch_timer_hot baseline comparison" >> "$GITHUB_STEP_SUMMARY"
          bash scripts/compare_criterion_baseline.sh watch_timer_hot perf_baselines/watch_timer_hot.json | tee -a "$GITHUB_STEP_SUMMARY"

      - name: Run timers (perf) with fixed timings
        env:
          PERF_TESTS: "1"
        run: |
          cargo bench -F perf-tests --bench timers -- \
            --measurement-time 5 \
            --warm-up-time 2 \
            --save-baseline current

      - name: Compare Criterion results to baselines (timers)
        env:
          PERF_COMPARE_STRICT: "0"
        run: |
          if [ ! -d target/criterion ]; then
            echo "No Criterion results found; skipping timers baseline comparison.";
            exit 0;
          fi
          if [ ! -d target/criterion/timers ] && [ ! -f target/criterion/new/estimates.json ]; then
            echo "Group directory target/criterion/timers not found and flat layout not detected; skipping.";
            exit 0;
          fi
          echo "\n## timers baseline comparison" >> "$GITHUB_STEP_SUMMARY"
          bash scripts/compare_criterion_baseline.sh timers perf_baselines/timers.json | tee -a "$GITHUB_STEP_SUMMARY"

      - name: Run histogram_hot (perf) with fixed timings
        env:
          PERF_TESTS: "1"
        run: |
          cargo bench -F perf-tests --bench histogram_hot -- \
            --measurement-time 5 \
            --warm-up-time 2 \
            --save-baseline current

      - name: Compare Criterion results to baselines (histogram_hot)
        env:
          PERF_COMPARE_STRICT: "0"
        run: |
          if [ ! -d target/criterion ]; then
            echo "No Criterion results found; skipping histogram_hot baseline comparison.";
            exit 0;
          fi
          if [ ! -d target/criterion/histogram_hot ] && [ ! -f target/criterion/new/estimates.json ]; then
            echo "Group directory target/criterion/histogram_hot not found and flat layout not detected; skipping.";
            exit 0;
          fi
          echo "\n## histogram_hot baseline comparison" >> "$GITHUB_STEP_SUMMARY"
          bash scripts/compare_criterion_baseline.sh histogram_hot perf_baselines/histogram_hot.json | tee -a "$GITHUB_STEP_SUMMARY"