name: CI
on:
push:
branches: [main, master]
pull_request:
branches: [main, master]
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
with:
components: rustfmt, clippy
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Check formatting
run: cargo fmt --all -- --check
- name: Run clippy
run: cargo clippy --all-targets --all-features -- -D warnings
test:
name: Test (${{ matrix.os }})
runs-on: ${{ matrix.os }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
key: ${{ matrix.os }}
- name: Run tests
run: cargo test --all-features --verbose
continue-on-error: ${{ matrix.os == 'windows-latest' }}
- name: Run doc tests
run: cargo test --doc
continue-on-error: ${{ matrix.os == 'windows-latest' }}
coverage:
name: Coverage
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
with:
components: llvm-tools-preview
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate coverage report
run: |
# Generate JSON report for analysis
cargo llvm-cov --workspace \
--ignore-filename-regex "(tests/|benches/|examples/)" \
--json \
--output-path coverage.json
# Generate HTML report for artifacts
cargo llvm-cov --workspace \
--ignore-filename-regex "(tests/|benches/|examples/)" \
--html \
--output-dir coverage-html
# Generate codecov format
cargo llvm-cov --workspace \
--ignore-filename-regex "(tests/|benches/|examples/)" \
--codecov \
--output-path codecov.json
- name: Check coverage thresholds
id: coverage_check
run: |
# Extract overall coverage
OVERALL=$(jq -r '.data[0].totals.lines.percent // 0' coverage.json)
echo "overall_coverage=$OVERALL" >> $GITHUB_OUTPUT
# Module-specific coverage thresholds
# Define thresholds: module_pattern:minimum_percent
declare -A THRESHOLDS=(
["color.rs"]=95
["buffer/"]=90
["cell.rs"]=95
["input/"]=90
["text/"]=85
["renderer/"]=85
["ansi/"]=90
)
FAILURES=""
echo "## Coverage Report" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Module | Coverage | Threshold | Status |" >> $GITHUB_STEP_SUMMARY
echo "|--------|----------|-----------|--------|" >> $GITHUB_STEP_SUMMARY
for pattern in "${!THRESHOLDS[@]}"; do
THRESHOLD=${THRESHOLDS[$pattern]}
# Extract coverage for files matching pattern
# Get coverage percentage for files containing the pattern
COVERAGE=$(jq -r --arg pat "$pattern" '
[.data[0].files[] | select(.filename | contains($pat))]
| if length == 0 then 0
else ([.[].summary.lines.percent] | add / length)
end
' coverage.json 2>/dev/null || echo "0")
# Handle null/empty
if [ -z "$COVERAGE" ] || [ "$COVERAGE" = "null" ]; then
COVERAGE="N/A"
STATUS="⚠️"
elif (( $(echo "$COVERAGE < $THRESHOLD" | bc -l) )); then
FAILURES="$FAILURES\n- $pattern: ${COVERAGE}% < ${THRESHOLD}%"
STATUS="❌"
else
STATUS="✅"
fi
echo "| \`$pattern\` | ${COVERAGE}% | ${THRESHOLD}% | $STATUS |" >> $GITHUB_STEP_SUMMARY
done
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Overall Coverage: ${OVERALL}%**" >> $GITHUB_STEP_SUMMARY
# Overall threshold check (50% for now, increase as coverage improves)
OVERALL_THRESHOLD=50
if (( $(echo "$OVERALL < $OVERALL_THRESHOLD" | bc -l) )); then
echo "" >> $GITHUB_STEP_SUMMARY
echo "::error::Overall coverage ${OVERALL}% is below minimum threshold of ${OVERALL_THRESHOLD}%"
exit 1
fi
# Report module failures as warnings (not blocking for now)
if [ -n "$FAILURES" ]; then
echo "" >> $GITHUB_STEP_SUMMARY
echo "### Module Coverage Warnings" >> $GITHUB_STEP_SUMMARY
echo -e "$FAILURES" >> $GITHUB_STEP_SUMMARY
echo "::warning::Some modules are below their coverage thresholds"
fi
- name: Upload to Codecov
uses: codecov/codecov-action@v4
with:
files: codecov.json
fail_ci_if_error: false
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Upload coverage artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: coverage-report
path: |
coverage.json
coverage-html/
retention-days: 14
- name: Comment coverage on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
let coverage = 'unknown';
try {
const data = JSON.parse(fs.readFileSync('coverage.json', 'utf8'));
coverage = data.data[0].totals.lines.percent.toFixed(1);
} catch (e) {
console.log('Could not read coverage:', e);
}
const body = `## Code Coverage Report
**Overall Line Coverage: ${coverage}%**
<details>
<summary>View detailed coverage report</summary>
Download the coverage artifacts from this workflow run for the full HTML report.
</details>
`;
// Find existing comment
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.find(c =>
c.user.type === 'Bot' && c.body.includes('Code Coverage Report')
);
if (botComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body
});
}
security:
name: Security Audit
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Install cargo-audit
run: cargo install cargo-audit
- name: Run security audit
run: cargo audit
docs:
name: Documentation
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Build documentation
run: cargo doc --no-deps --all-features
env:
RUSTDOCFLAGS: -D warnings
build:
name: Build (${{ matrix.target }})
needs: [lint, test]
runs-on: ${{ matrix.os }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
include:
- os: ubuntu-latest
target: x86_64-unknown-linux-gnu
- os: ubuntu-24.04-arm
target: aarch64-unknown-linux-gnu
- os: macos-14
target: aarch64-apple-darwin
- os: macos-13
target: x86_64-apple-darwin
- os: windows-latest
target: x86_64-pc-windows-msvc
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
with:
targets: ${{ matrix.target }}
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
key: ${{ matrix.target }}
- name: Build release
run: cargo build --release --target ${{ matrix.target }}
bench:
name: Benchmarks
runs-on: ubuntu-latest
timeout-minutes: 30
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Run benchmarks
run: cargo bench --all-features
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: target/criterion/
retention-days: 30
minimal-versions:
name: Minimal Versions
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Check with minimal versions
run: |
cargo update -Z minimal-versions
cargo check --all-features
conformance:
name: Conformance Tests
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Run conformance tests
run: ./scripts/conformance.sh --json > conformance_results.json
- name: Show conformance summary
if: always()
run: |
echo "## Conformance Results" >> $GITHUB_STEP_SUMMARY
if [ -f conformance_results.json ]; then
PASSED=$(jq -r '.passed // 0' conformance_results.json)
FAILED=$(jq -r '.failed // 0' conformance_results.json)
echo "- **Passed:** $PASSED" >> $GITHUB_STEP_SUMMARY
echo "- **Failed:** $FAILED" >> $GITHUB_STEP_SUMMARY
fi
- name: Upload conformance artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: conformance-results
path: |
conformance_results.json
target/test-artifacts/conformance/
retention-days: 7
performance-budgets:
name: Performance Budgets
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Run performance budget tests
run: cargo test --test benchmark_comparison -- --nocapture
- name: Show performance summary
run: |
echo "## Performance Budget Tests" >> $GITHUB_STEP_SUMMARY
echo "All performance budgets passed." >> $GITHUB_STEP_SUMMARY
snapshot-tests:
name: Snapshot Tests
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Install cargo-insta
uses: taiki-e/install-action@v2
with:
tool: cargo-insta
- name: Verify snapshots
run: cargo insta test --check
- name: Show snapshot summary
run: |
echo "## Snapshot Tests" >> $GITHUB_STEP_SUMMARY
SNAP_COUNT=$(find src -name "*.snap" | wc -l)
echo "Verified **$SNAP_COUNT** snapshots." >> $GITHUB_STEP_SUMMARY
property-tests:
name: Property Tests
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Run property tests
run: cargo test proptest -- --nocapture
env:
PROPTEST_CASES: 1000
- name: Show property test summary
run: |
echo "## Property Tests" >> $GITHUB_STEP_SUMMARY
echo "Property-based testing with proptest completed." >> $GITHUB_STEP_SUMMARY
e2e-tests:
name: E2E Tests
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Run E2E tests with artifact capture
run: cargo test --test 'e2e_*' --test 'highlight_e2e' -- --nocapture
env:
HARNESS_ARTIFACTS: 1
HARNESS_LOG_LEVEL: debug
RUST_BACKTRACE: 1
- name: Show E2E test summary
if: always()
run: |
echo "## E2E Tests" >> $GITHUB_STEP_SUMMARY
if [ -d "target/test-artifacts" ]; then
SUITES=$(find target/test-artifacts -mindepth 1 -maxdepth 1 -type d | wc -l)
echo "Test suites: $SUITES" >> $GITHUB_STEP_SUMMARY
if [ -f target/test-artifacts/*/summary.json ]; then
for summary in target/test-artifacts/*/summary.json; do
PASSED=$(jq -r '.passed // "unknown"' "$summary" 2>/dev/null || echo "unknown")
SUITE=$(dirname "$summary" | xargs basename)
echo "- $SUITE: $PASSED" >> $GITHUB_STEP_SUMMARY
done
fi
else
echo "No test artifacts generated." >> $GITHUB_STEP_SUMMARY
fi
- name: Upload E2E test artifacts
uses: actions/upload-artifact@v4
if: failure()
with:
name: e2e-test-artifacts
path: |
target/test-artifacts/
retention-days: 7
demo-smoke:
name: Demo Smoke (${{ matrix.size }})
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
fail-fast: false
matrix:
size: ["80x24", "132x43", "40x12", "200x60"]
include:
- size: "80x24"
description: "Standard terminal"
- size: "132x43"
description: "Wide terminal"
- size: "40x12"
description: "Minimal terminal"
- size: "200x60"
description: "Large terminal"
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
key: demo-smoke
- name: Build demo_showcase
run: cargo build --release --bin demo_showcase
- name: Run headless demo smoke test
id: smoke
run: |
mkdir -p target/demo-smoke
OUTPUT="target/demo-smoke/${{ matrix.size }}.json"
STDERR_LOG="target/demo-smoke/${{ matrix.size }}.stderr.log"
# Run the binary directly (already built in previous step)
# Redirect stdout to JSON file, stderr to separate log to avoid corrupting JSON
if ./target/release/demo_showcase \
--headless-smoke \
--tour \
--exit-after-tour \
--headless-size ${{ matrix.size }} \
--seed 12345 \
--max-frames 1000 \
--headless-dump-json > "$OUTPUT" 2>"$STDERR_LOG"; then
echo "status=success" >> $GITHUB_OUTPUT
else
echo "status=failed" >> $GITHUB_OUTPUT
echo "Stderr output:"
cat "$STDERR_LOG"
exit 1
fi
# Validate JSON output
if ! jq empty "$OUTPUT" 2>/dev/null; then
echo "Invalid JSON output"
cat "$OUTPUT"
exit 1
fi
# Extract stats
FRAMES=$(jq -r '.frames_rendered // 0' "$OUTPUT")
LAYOUT=$(jq -r '.layout_mode // "unknown"' "$OUTPUT")
echo "frames=$FRAMES" >> $GITHUB_OUTPUT
echo "layout=$LAYOUT" >> $GITHUB_OUTPUT
- name: Show smoke test results
if: always()
run: |
echo "## Demo Smoke: ${{ matrix.size }}" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Size:** ${{ matrix.size }} (${{ matrix.description }})" >> $GITHUB_STEP_SUMMARY
echo "**Status:** ${{ steps.smoke.outputs.status || 'unknown' }}" >> $GITHUB_STEP_SUMMARY
echo "**Frames:** ${{ steps.smoke.outputs.frames || 'N/A' }}" >> $GITHUB_STEP_SUMMARY
echo "**Layout:** ${{ steps.smoke.outputs.layout || 'N/A' }}" >> $GITHUB_STEP_SUMMARY
- name: Upload smoke test artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: demo-smoke-${{ matrix.size }}
path: target/demo-smoke/
retention-days: 7
pty-tests:
name: PTY Tests
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Build demo_showcase binary
run: cargo build --bin demo_showcase
- name: Run PTY E2E tests
run: cargo test --test pty_e2e --features pty-tests -- --nocapture --test-threads=1
env:
TERM: xterm-256color
HARNESS_ARTIFACTS: 1
HARNESS_LOG_LEVEL: debug
RUST_BACKTRACE: 1
continue-on-error: true
- name: Show PTY test summary
if: always()
run: |
echo "## PTY Tests" >> $GITHUB_STEP_SUMMARY
if [ -d "target/test-artifacts" ]; then
echo "Test artifacts generated." >> $GITHUB_STEP_SUMMARY
find target/test-artifacts -name "*.txt" -o -name "*.json" | head -20 | while read f; do
echo "- $(basename $f)" >> $GITHUB_STEP_SUMMARY
done
else
echo "No test artifacts found." >> $GITHUB_STEP_SUMMARY
fi
- name: Upload PTY test artifacts
uses: actions/upload-artifact@v4
if: always()
with:
name: pty-test-artifacts
path: |
target/test-artifacts/
retention-days: 7
ci-success:
name: CI Success
needs: [lint, test, coverage, security, docs, build, conformance, performance-budgets, snapshot-tests, property-tests, e2e-tests, demo-smoke, pty-tests]
runs-on: ubuntu-latest
if: always()
steps:
- name: Check all jobs passed
run: |
# Required jobs (must pass)
if [[ "${{ needs.lint.result }}" != "success" ]] || \
[[ "${{ needs.test.result }}" != "success" ]] || \
[[ "${{ needs.coverage.result }}" != "success" ]] || \
[[ "${{ needs.security.result }}" != "success" ]] || \
[[ "${{ needs.docs.result }}" != "success" ]] || \
[[ "${{ needs.build.result }}" != "success" ]] || \
[[ "${{ needs.conformance.result }}" != "success" ]] || \
[[ "${{ needs.performance-budgets.result }}" != "success" ]] || \
[[ "${{ needs.snapshot-tests.result }}" != "success" ]] || \
[[ "${{ needs.property-tests.result }}" != "success" ]] || \
[[ "${{ needs.e2e-tests.result }}" != "success" ]] || \
[[ "${{ needs.demo-smoke.result }}" != "success" ]]; then
echo "One or more required jobs failed"
exit 1
fi
# PTY tests are informational (allowed to fail)
if [[ "${{ needs.pty-tests.result }}" != "success" ]]; then
echo "::warning::PTY tests failed (informational, not blocking)"
fi
echo "All required CI jobs passed!"
- name: Generate CI summary
if: always()
run: |
echo "## CI Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "| Job | Status |" >> $GITHUB_STEP_SUMMARY
echo "|-----|--------|" >> $GITHUB_STEP_SUMMARY
echo "| Lint | ${{ needs.lint.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Tests | ${{ needs.test.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Coverage | ${{ needs.coverage.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Security | ${{ needs.security.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Docs | ${{ needs.docs.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Build | ${{ needs.build.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Conformance | ${{ needs.conformance.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Performance | ${{ needs.performance-budgets.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Snapshots | ${{ needs.snapshot-tests.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Property | ${{ needs.property-tests.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| E2E | ${{ needs.e2e-tests.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| Demo Smoke | ${{ needs.demo-smoke.result == 'success' && '✅' || '❌' }} |" >> $GITHUB_STEP_SUMMARY
echo "| PTY (info) | ${{ needs.pty-tests.result == 'success' && '✅' || '⚠️' }} |" >> $GITHUB_STEP_SUMMARY