name: CI
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
test:
name: Tests (stable/beta/MSRV)
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
rust: [stable, beta, 1.70.0]
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.rust }}
components: rustfmt, clippy
- name: Cache
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run tests (default features, no benches)
run: cargo test --verbose
- name: Run doc tests (default features)
run: cargo test --doc
clippy:
name: Clippy (stable)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
components: clippy
- name: Run clippy
run: cargo clippy --all-features -- -D warnings
fmt:
name: Rustfmt (stable)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- name: Run rustfmt
run: cargo fmt --all -- --check
coverage:
name: Coverage (llvm-cov)
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@v4
- name: Install Rust (stable)
uses: dtolnay/rust-toolchain@stable
with:
components: llvm-tools-preview
- name: Cache (cargo registry/git/target)
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: coverage-${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate coverage summary and enforce threshold (85%)
shell: bash
run: |
set -euxo pipefail
cargo llvm-cov --summary-only --workspace \
--fail-under-lines 85 \
--ignore-filename-regex '^(dev/|docs/|benches/|examples/)'
- name: Generate LCOV report
shell: bash
run: |
set -euxo pipefail
mkdir -p target/coverage
cargo llvm-cov --workspace --lcov --output-path target/coverage/lcov.info \
--ignore-filename-regex '^(dev/|docs/|benches/|examples/)'
- name: Upload LCOV artifact
uses: actions/upload-artifact@v4
with:
name: coverage-lcov
path: target/coverage/lcov.info
security:
name: Security audit
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Install cargo-audit
uses: taiki-e/install-action@cargo-audit
- name: Generate Cargo.lock (if missing)
run: cargo generate-lockfile
- name: Run cargo-audit
run: cargo audit
msrv:
name: MSRV (Rust 1.70.0)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust 1.70.0
uses: dtolnay/rust-toolchain@1.70.0
with:
components: rustfmt, clippy
- name: Cargo check (all-features)
run: cargo check --all-features
- name: Rustfmt check (MSRV)
run: cargo fmt --all -- --check
- name: Clippy (MSRV)
run: cargo clippy --all-features -- -D warnings
docs:
name: Docs (rustdoc warnings denied)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Build documentation
env:
RUSTDOCFLAGS: -D warnings
run: cargo doc --no-deps
- name: Upload docs artifact
uses: actions/upload-artifact@v4
with:
name: docs
path: target/doc
build-matrix:
name: Cross-Platform Compatibility
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
build: [default, all-features, no-default-features]
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Build (default features)
if: matrix.build == 'default'
run: cargo build --verbose
- name: Build (all features)
if: matrix.build == 'all-features'
run: cargo build --verbose --all-features
- name: Build (no default features)
if: matrix.build == 'no-default-features'
run: cargo build --verbose --no-default-features
publish-dry-run:
name: Publish (dry run)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cargo package and publish dry-run
run: |
cargo package
cargo publish --dry-run
examples:
name: Examples (build, lint, run quick_start)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Build examples
run: cargo build --examples
- name: Clippy (examples)
run: cargo clippy --examples -- -D warnings
- name: Run quick_start example
run: |
mkdir -p examples-out
cargo run --example quick_start --release | tee examples-out/quick_start.txt
- name: Run streaming_rate_window example
run: |
mkdir -p examples-out
cargo run --example streaming_rate_window --release | tee examples-out/streaming_rate_window.txt
- name: Upload example outputs
uses: actions/upload-artifact@v4
with:
name: example-outputs
path: examples-out
if-no-files-found: error
retention-days: 7
smoke-bench:
name: Benchmarks (smoke)
if: ${{ github.event_name == 'pull_request' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: smoke-bench-${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run Criterion (short)
run: |
# Shorten warmup/measurement for CI speed; pass through to Criterion
cargo bench -- -w 0.3 -m 1.0 -n 20
- name: Upload Criterion reports
uses: actions/upload-artifact@v4
with:
name: criterion-reports
path: target/criterion
benchmark-regression:
name: Benchmark Regression (Criterion)
if: ${{ github.event_name == 'push' || github.event_name == 'pull_request' }}
runs-on: ubuntu-latest
permissions:
contents: write
deployments: write
pull-requests: write
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: bench-regression-${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Run Criterion benchmarks
run: |
# Run the explicit bench target to ensure benches execute
cargo bench --bench metrics_bench --all-features
- name: Inspect Criterion output (debug)
shell: bash
run: |
set -euo pipefail
echo "Listing target/criterion contents (if any):"
ls -R target/criterion || true
echo "Preview an estimates.json if present:"
first=$(ls target/criterion/*/new/estimates.json 2>/dev/null | head -n1 || true)
if [ -n "$first" ]; then
echo "Showing: $first"
head -n 50 "$first"
else
echo "No estimates.json files found yet."
fi
- name: Aggregate Criterion results to custom JSON (smaller is better)
shell: bash
id: aggregate
run: |
set -euo pipefail
OUT=criterion-summary.json
: > "$OUT"
shopt -s globstar nullglob
found=0
# Criterion v0.5 typically writes to target/criterion/**/new/estimates.json
# Older versions may write to target/criterion/**/estimates.json
echo '[' >> "$OUT"
first=true
for f in target/criterion/**/new/estimates.json target/criterion/**/estimates.json; do
[ -f "$f" ] || continue
found=1
if [[ "$f" == */new/estimates.json ]]; then
benchdir=$(dirname "$(dirname "$f")")
name=$(basename "$benchdir")
else
benchdir=$(dirname "$f")
name=$(basename "$benchdir")
fi
mean=$(jq -r '.mean.point_estimate' "$f")
# Fallback to 0 if parsing fails
if [ -z "$mean" ] || [ "$mean" = "null" ]; then
mean=0
fi
if [ "$first" = true ]; then
first=false
else
echo ',' >> "$OUT"
fi
printf '{"name":"%s","value":%s,"unit":"ns/op"}' "$name" "$mean" >> "$OUT"
done
echo ']' >> "$OUT"
if [ "$found" -eq 0 ]; then
echo "No Criterion estimates found under target/criterion/**/(new/)estimates.json" >&2
echo "Directory tree:" >&2
ls -R target/criterion >&2 || true
echo "found=false" >> "$GITHUB_OUTPUT"
exit 0
else
echo "found=true" >> "$GITHUB_OUTPUT"
fi
- name: Upload Criterion summary artifact
if: steps.aggregate.outputs.found == 'true'
uses: actions/upload-artifact@v4
with:
name: criterion-summary
path: criterion-summary.json
if-no-files-found: error
retention-days: 7
- name: Upload raw Criterion reports (optional)
if: steps.aggregate.outputs.found == 'true'
uses: actions/upload-artifact@v4
with:
name: criterion-reports-raw
path: target/criterion
if-no-files-found: ignore
retention-days: 7
- name: Benchmark Regression Check
if: steps.aggregate.outputs.found == 'true'
uses: benchmark-action/github-action-benchmark@v1
continue-on-error: true
with:
name: Criterion
tool: 'customSmallerIsBetter'
output-file-path: 'criterion-summary.json'
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: ${{ github.ref == 'refs/heads/main' }}
gh-pages-branch: 'gh-pages'
benchmark-data-dir-path: 'benchmark-data'
fail-on-alert: false
alert-threshold: '200%'
- name: Ensure benchmark viewer on gh-pages (main.js)
if: steps.aggregate.outputs.found == 'true'
shell: bash
run: |
set -euo pipefail
git fetch origin gh-pages:gh-pages || true
git fetch origin main:main || true
# Switch to gh-pages in-place
git switch gh-pages
changed=0
if [ ! -f main.js ]; then
# Pull the viewer from main branch to avoid here-doc quoting issues
git show origin/main:docs/benchmarks/main.js > main.js
git add main.js
git -c user.name="github-action-benchmark" -c user.email="github@users.noreply.github.com" commit -m "chore(bench): ensure viewer main.js present"
git push origin gh-pages:gh-pages --no-verify
changed=1
fi
# Switch back to the original ref if needed
git switch -