edgefirst-schemas 3.2.0

Message schemas for EdgeFirst Perception - ROS2 Common Interfaces, Foxglove, and custom types
Documentation
name: Benchmarks

on:
  workflow_dispatch:

env:
  CARGO_TERM_COLOR: always

jobs:
  # ==========================================================================
  # Phase 1: Build Rust Benchmark Binaries on ARM Runner
  # ==========================================================================
  build-rust-benchmarks:
    name: Build Rust Benchmarks
    runs-on: ubuntu-22.04-arm
    steps:
      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1

      - name: Install Rust toolchain
        run: |
          rustup toolchain install stable --profile minimal

      - name: Build benchmark binaries
        run: |
          # Build all benchmark binaries in release mode
          cargo bench --no-run

          # Find and copy benchmark binaries (using -print0/xargs for robustness)
          mkdir -p benchmark-binaries
          find target/release/deps -maxdepth 1 -type f -executable -name "serialization*" \
            ! -name '*.so' ! -name '*.a' ! -name '*.d' ! -name '*.rlib' -print0 | \
            xargs -0 -I{} sh -c 'cp "$1" benchmark-binaries/ && echo "Copied: $(basename "$1")"' sh '{}'

          ls -la benchmark-binaries/

      - name: Upload benchmark binaries
        uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
        with:
          name: benchmark-binaries-aarch64
          path: benchmark-binaries/
          retention-days: 7

  # ==========================================================================
  # Phase 1b: Build C++ Benchmark Binaries on ARM Runner
  # ==========================================================================
  build-cpp-benchmarks:
    name: Build C++ Benchmarks
    runs-on: ubuntu-22.04-arm
    steps:
      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1

      - name: Install build tools
        run: |
          sudo apt-get update
          sudo apt-get install -y cmake ninja-build

      - name: Install Rust toolchain
        run: rustup toolchain install stable --profile minimal

      - name: Build edgefirst-schemas shared library
        run: cargo build --release

      - name: Configure and build C++ benchmarks (native aarch64)
        run: |
          cmake -S benches/cpp -B benches/cpp/build/aarch64 -G Ninja \
              -DCMAKE_BUILD_TYPE=Release
          cmake --build benches/cpp/build/aarch64 -j

      - name: Stage binaries
        run: |
          mkdir -p cpp-benchmark-binaries
          cp benches/cpp/build/aarch64/bench_edgefirst cpp-benchmark-binaries/
          cp benches/cpp/build/aarch64/bench_fastcdr   cpp-benchmark-binaries/
          cp benches/cpp/build/aarch64/parity_test     cpp-benchmark-binaries/
          cp target/release/libedgefirst_schemas.so    cpp-benchmark-binaries/

      - name: Upload binaries
        uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
        with:
          name: cpp-benchmark-binaries-aarch64
          path: cpp-benchmark-binaries/
          retention-days: 7

  # ==========================================================================
  # Phase 2: Run Rust Benchmarks on Target Hardware (NXP i.MX 8M Plus)
  # ==========================================================================
  run-rust-benchmarks:
    name: Run Rust on i.MX 8M Plus
    needs: build-rust-benchmarks
    runs-on: nxp-imx8mp-latest
    timeout-minutes: 60
    steps:
      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1

      - name: Download benchmark binaries
        uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
        with:
          name: benchmark-binaries-aarch64
          path: benchmark-binaries/

      - name: Make binaries executable
        run: chmod +x benchmark-binaries/*

      - name: Run benchmarks
        env:
          BENCH_FAST: "1"
        run: |
          mkdir -p benchmark-results

          echo "=== Running benchmarks on $(hostname) ==="
          echo "BENCH_FAST=$BENCH_FAST (reduced benchmark variants for CI)"
          uname -a
          cat /proc/cpuinfo | head -30

          # Run each benchmark binary and save output
          for bench in benchmark-binaries/*; do
            if [ -f "$bench" ] && [ -x "$bench" ]; then
              name=$(basename "$bench")
              echo ""
              echo "=== Running $name ==="

              # Run benchmark with Criterion, save both stdout and JSON
              # Use --save-baseline to generate JSON data
              "$bench" --bench --save-baseline github-ci 2>&1 | tee "benchmark-results/${name}.txt"
            fi
          done

          # Package Criterion JSON data if available
          if [ -d "target/criterion" ]; then
            echo "Packaging Criterion JSON data..."
            tar -czf benchmark-results/criterion-data.tar.gz -C target criterion
          fi

      - name: Upload raw Rust benchmark results
        uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
        with:
          name: rust-benchmark-results-raw
          path: benchmark-results/
          retention-days: 7

  # ==========================================================================
  # Phase 2b: Run C++ Benchmarks on Target Hardware (NXP i.MX 8M Plus)
  # ==========================================================================
  run-cpp-benchmarks:
    name: Run C++ on i.MX 8M Plus
    needs: build-cpp-benchmarks
    runs-on: nxp-imx8mp-latest
    timeout-minutes: 30
    steps:
      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1

      - uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
        with:
          name: cpp-benchmark-binaries-aarch64
          path: cpp-benchmark-binaries/

      - name: Run parity test (gates benchmark trust)
        run: |
          chmod +x cpp-benchmark-binaries/*
          cd cpp-benchmark-binaries
          LD_LIBRARY_PATH=. ./parity_test

      - name: Run C++ benchmarks
        env:
          BENCH_FAST: "1"
        run: |
          mkdir -p cpp-benchmark-results
          cd cpp-benchmark-binaries
          for bin in bench_*; do
              impl="${bin#bench_}"
              echo "=== Running $bin ==="
              LD_LIBRARY_PATH=. ./"$bin" --benchmark_format=json \
                     --benchmark_out="../cpp-benchmark-results/${impl}.json" \
                     --benchmark_min_time=0.05s \
                     2>&1 | tee "../cpp-benchmark-results/${impl}.txt"
          done

      - uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
        with:
          name: cpp-benchmark-results-raw
          path: cpp-benchmark-results/
          retention-days: 7

  # ==========================================================================
  # Phase 3: Run Python Benchmarks on Target Hardware (NXP i.MX 8M Plus)
  # ==========================================================================
  run-python-benchmarks:
    name: Run Python on i.MX 8M Plus
    runs-on: nxp-imx8mp-latest
    timeout-minutes: 30
    steps:
      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1

      - name: Set up Python virtual environment
        run: |
          # Create venv and install dependencies
          python3 -m venv venv
          source venv/bin/activate
          pip install --upgrade pip
          pip install -e ".[bench]"

      - name: Run Python benchmarks
        env:
          BENCH_FAST: "1"
        run: |
          source venv/bin/activate
          mkdir -p python-benchmark-results

          echo "=== Running Python benchmarks on $(hostname) ==="
          echo "BENCH_FAST=$BENCH_FAST (reduced benchmark variants for CI)"
          uname -a
          python3 --version

          # Run benchmarks with JSON output
          pytest benches/python/bench_serialization.py \
            --benchmark-only \
            --benchmark-json=python-benchmark-results/benchmark.json \
            --benchmark-columns=min,max,mean,stddev \
            --benchmark-disable-gc \
            -v 2>&1 | tee python-benchmark-results/benchmark.txt

      - name: Upload Python benchmark results
        uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
        with:
          name: python-benchmark-results-raw
          path: python-benchmark-results/
          retention-days: 7

  # ==========================================================================
  # Phase 4: Process and Publish Results
  # ==========================================================================
  process-benchmarks:
    name: Process Benchmark Results
    needs: [run-rust-benchmarks, run-python-benchmarks, run-cpp-benchmarks]
    runs-on: ubuntu-22.04-arm
    steps:
      - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1

      - name: Download Rust benchmark results
        uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
        with:
          name: rust-benchmark-results-raw
          path: rust-benchmark-results/

      - name: Download Python benchmark results
        uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
        with:
          name: python-benchmark-results-raw
          path: python-benchmark-results/

      - name: Download C++ benchmark results
        uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
        with:
          name: cpp-benchmark-results-raw
          path: cpp-benchmark-results/

      - name: Collate raw results
        run: |
          # Combine Rust text output for the full-log summary
          cat rust-benchmark-results/*.txt > rust-benchmark-output.txt 2>/dev/null || true

          # Copy Python benchmark text output
          cp python-benchmark-results/benchmark.txt python-benchmark-output.txt 2>/dev/null || true

          # Combine both for full output
          echo "=== Rust Benchmarks ===" > benchmark-output.txt
          cat rust-benchmark-output.txt >> benchmark-output.txt 2>/dev/null || echo "No Rust benchmarks" >> benchmark-output.txt
          echo "" >> benchmark-output.txt
          echo "=== Python Benchmarks ===" >> benchmark-output.txt
          cat python-benchmark-output.txt >> benchmark-output.txt 2>/dev/null || echo "No Python benchmarks" >> benchmark-output.txt

      - name: Render benchmark report
        id: bench
        run: |
          mkdir -p combined-results
          cp rust-benchmark-results/criterion-data.tar.gz combined-results/ 2>/dev/null || true
          cp python-benchmark-results/benchmark.json combined-results/ 2>/dev/null || true
          cp cpp-benchmark-results/*.json combined-results/ 2>/dev/null || true

          python3 scripts/render_benchmarks.py combined-results > benchmark-summary.md

          # Count benchmarks for output
          BENCH_COUNT=$(grep -c "Benchmarking\|PASSED" benchmark-output.txt 2>/dev/null || echo "0")
          echo "bench_count=$BENCH_COUNT" >> $GITHUB_OUTPUT

      - name: Write job summary
        run: |
          cat benchmark-summary.md >> $GITHUB_STEP_SUMMARY
          echo "" >> $GITHUB_STEP_SUMMARY
          echo "---" >> $GITHUB_STEP_SUMMARY
          echo "" >> $GITHUB_STEP_SUMMARY
          echo "<details>" >> $GITHUB_STEP_SUMMARY
          echo "<summary>📋 Full Benchmark Output</summary>" >> $GITHUB_STEP_SUMMARY
          echo "" >> $GITHUB_STEP_SUMMARY
          echo '```' >> $GITHUB_STEP_SUMMARY
          cat benchmark-output.txt >> $GITHUB_STEP_SUMMARY
          echo '```' >> $GITHUB_STEP_SUMMARY
          echo "</details>" >> $GITHUB_STEP_SUMMARY

      - name: Upload processed results
        uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
        with:
          name: benchmark-results
          path: |
            benchmark-output.txt
            benchmark-summary.md
          retention-days: 30