astrora_core 0.1.1

Astrora - Rust-backed astrodynamics library - core computational components
Documentation
name: Continuous Benchmarking

on:
  push:
    branches: [main]
  pull_request:
    branches: [main]
  workflow_dispatch:  # Allow manual triggering

permissions:
  contents: write
  deployments: write

jobs:
  benchmark:
    name: Run Performance Benchmarks
    runs-on: ubuntu-latest

    steps:
      - name: Checkout code
        uses: actions/checkout@v4

      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version: '3.11'

      - name: Set up Rust
        uses: dtolnay/rust-toolchain@stable
        with:
          toolchain: stable

      - name: Cache Rust dependencies
        uses: actions/cache@v4
        with:
          path: |
            ~/.cargo/bin/
            ~/.cargo/registry/index/
            ~/.cargo/registry/cache/
            ~/.cargo/git/db/
            target/
          key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
          restore-keys: |
            ${{ runner.os }}-cargo-

      - name: Install uv
        uses: astral-sh/setup-uv@v4
        with:
          enable-cache: true
          cache-dependency-glob: "pyproject.toml"

      - name: Create virtual environment
        run: uv venv .venv

      - name: Install dependencies
        run: |
          source .venv/bin/activate
          uv pip install -e ".[dev]"

      - name: Build Rust extension (release mode for accurate benchmarks)
        run: |
          source .venv/bin/activate
          uv pip install maturin
          maturin develop --release

      - name: Run benchmarks and generate JSON output
        run: |
          source .venv/bin/activate
          pytest tests/benchmark_numpy_overhead.py \
            --benchmark-only \
            --benchmark-json=benchmark_results.json \
            --benchmark-warmup=on \
            --benchmark-min-rounds=5 \
            --benchmark-columns=min,max,mean,stddev,median,ops,rounds
        continue-on-error: false

      - name: Store benchmark result (main branch)
        if: github.ref == 'refs/heads/main' && github.event_name == 'push'
        uses: benchmark-action/github-action-benchmark@v1
        with:
          tool: 'pytest'
          output-file-path: benchmark_results.json
          github-token: ${{ secrets.GITHUB_TOKEN }}
          auto-push: true
          # Alert on 20% regression (default is 200%)
          alert-threshold: '120%'
          comment-on-alert: true
          fail-on-alert: false
          # alert-comment-cc-users: '@cachemcclure'  # Optional: notify maintainer

      - name: Compare benchmark result (PR)
        if: github.event_name == 'pull_request'
        uses: benchmark-action/github-action-benchmark@v1
        with:
          tool: 'pytest'
          output-file-path: benchmark_results.json
          github-token: ${{ secrets.GITHUB_TOKEN }}
          auto-push: false
          # Alert on 20% regression for PRs
          alert-threshold: '120%'
          comment-on-alert: true
          fail-on-alert: true
          # Note: Comparison against main branch will happen automatically
          # via GitHub Pages data (no external-data-json-path needed)

      - name: Upload benchmark artifacts
        uses: actions/upload-artifact@v4
        if: always()
        with:
          name: benchmark-results
          path: benchmark_results.json
          retention-days: 90