lineguard 0.1.7

A fast and reliable file linter that ensures proper line endings and clean formatting
Documentation
name: Benchmark

on:
  push:
    branches: [ master ]
    paths:
      - 'src/**'
      - 'Cargo.*'
      - '.github/workflows/benchmark.yml'
  pull_request:
    branches: [ master ]
    paths:
      - 'src/**'
      - 'Cargo.*'
      - '.github/workflows/benchmark.yml'
  workflow_dispatch:

# Cancel old runs when pushing new commits to a PR
concurrency:
  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
  cancel-in-progress: true

permissions:
  contents: write
  deployments: write
  pull-requests: write

jobs:
  benchmark:
    name: Performance Benchmark
    runs-on: ubuntu-latest
    # Only run on master push or manual trigger or PR with 'benchmark' label
    if: |
      github.event_name == 'push' ||
      github.event_name == 'workflow_dispatch' ||
      contains(github.event.pull_request.labels.*.name, 'benchmark')

    steps:
    - name: Checkout repository
      uses: actions/checkout@v6

    - name: Install Rust toolchain
      uses: dtolnay/rust-toolchain@stable

    - name: Cache cargo registry
      uses: actions/cache@v5
      with:
        path: |
          ~/.cargo/registry
          ~/.cargo/git
        key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
        restore-keys: |
          ${{ runner.os }}-cargo-registry-

    - name: Install hyperfine
      run: |
        wget https://github.com/sharkdp/hyperfine/releases/download/v1.18.0/hyperfine_1.18.0_amd64.deb
        sudo dpkg -i hyperfine_1.18.0_amd64.deb

    - name: Build release binary
      run: cargo build --release

    - name: Prepare test data
      run: |
        # Create test directory structure
        mkdir -p bench_data/{small,medium,large}

        # Small files (100 files, 1KB each)
        for i in {1..100}; do
          printf "line %s\n" {1..50} > bench_data/small/file_$i.txt
        done

        # Medium files (100 files, 100KB each)
        for i in {1..100}; do
          printf "line %s\n" {1..5000} > bench_data/medium/file_$i.txt
        done

        # Large files (10 files, 10MB each)
        for i in {1..10}; do
          printf "line %s\n" {1..500000} > bench_data/large/file_$i.txt
        done

        # Don't add files with issues - they cause lineguard to exit with code 1

    - name: Run benchmarks
      run: |
        # Benchmark different scenarios and create custom output
        hyperfine \
          --warmup 3 \
          --min-runs 10 \
          --ignore-failure \
          --export-json hyperfine_results.json \
          --export-markdown benchmark_results.md \
          './target/release/lineguard bench_data/small' \
          './target/release/lineguard bench_data/medium' \
          './target/release/lineguard bench_data/large' \
          './target/release/lineguard bench_data --recursive' \
          './target/release/lineguard bench_data/**/*.txt'

        # Convert hyperfine output to custom benchmark format
        python3 << 'EOF'
        import json

        with open('hyperfine_results.json', 'r') as f:
            hyperfine_data = json.load(f)

        # Create array of benchmark results as required by customSmallerIsBetter
        benchmarks = []
        for result in hyperfine_data['results']:
            command = result['command']
            # Extract scenario name from command
            if 'small' in command:
                name = 'Small files (100x1KB)'
            elif 'medium' in command:
                name = 'Medium files (100x100KB)'
            elif 'large' in command:
                name = 'Large files (10x10MB)'
            elif '--recursive' in command:
                name = 'Recursive scan'
            else:
                name = 'Glob pattern'

            benchmarks.append({
                'name': name,
                'unit': 'seconds',
                'value': result['mean']
            })

        # Write array directly (not wrapped in object) as required by the action
        with open('benchmark_results.json', 'w') as f:
            json.dump(benchmarks, f, indent=2)
        EOF

    # Setup Pages deployment
    - name: Setup Pages
      uses: actions/configure-pages@v5
      if: github.event_name != 'pull_request'

    - name: Setup gh-pages branch
      if: github.event_name != 'pull_request'
      run: |
        # Configure git identity
        git config user.name "github-actions[bot]"
        git config user.email "github-actions[bot]@users.noreply.github.com"

        git fetch origin gh-pages:gh-pages || true
        if ! git show-ref --verify --quiet refs/heads/gh-pages; then
          echo "Creating gh-pages branch"
          git checkout --orphan gh-pages
          git rm -rf .
          echo "# Benchmark Results" > README.md
          git add README.md
          git commit -m "Initial gh-pages commit"
          git push origin gh-pages
          git checkout master
        fi

    - name: Store benchmark result
      uses: benchmark-action/github-action-benchmark@v1
      if: github.event_name != 'pull_request'
      with:
        tool: 'customSmallerIsBetter'
        output-file-path: benchmark_results.json
        github-token: ${{ secrets.GITHUB_TOKEN }}
        auto-push: true
        benchmark-data-dir-path: 'dev/bench'

    - name: Comment PR with results
      uses: actions/github-script@v8
      if: github.event_name == 'pull_request'
      with:
        script: |
          const fs = require('fs');
          const markdown = fs.readFileSync('benchmark_results.md', 'utf8');

          github.rest.issues.createComment({
            issue_number: context.issue.number,
            owner: context.repo.owner,
            repo: context.repo.repo,
            body: `## Benchmark Results\n\n${markdown}`
          })

  size-check:
    name: Binary Size Check
    runs-on: ubuntu-latest
    # Only run on PRs
    if: github.event_name == 'pull_request'

    steps:
    - name: Checkout repository
      uses: actions/checkout@v6

    - name: Install Rust toolchain
      uses: dtolnay/rust-toolchain@stable

    - name: Build binaries
      run: |
        # Build different configurations
        cargo build --release
        cargo build --release --no-default-features

    - name: Check binary sizes
      run: |
        ls -lh target/release/lineguard
        size target/release/lineguard

        # Create size report
        cat << EOF > size_report.md
        ## Binary Size Report

        | Configuration | Size |
        |--------------|------|
        | Release | $(ls -lh target/release/lineguard | awk '{print $5}') |
        | Stripped | $(strip target/release/lineguard && ls -lh target/release/lineguard | awk '{print $5}') |
        EOF

    - name: Comment PR with size report
      uses: actions/github-script@v8
      if: github.event_name == 'pull_request'
      with:
        script: |
          const fs = require('fs');
          const report = fs.readFileSync('size_report.md', 'utf8');

          // Find existing comment
          const { data: comments } = await github.rest.issues.listComments({
            issue_number: context.issue.number,
            owner: context.repo.owner,
            repo: context.repo.repo,
          });

          const botComment = comments.find(comment =>
            comment.user.type === 'Bot' &&
            comment.body.includes('## Binary Size Report')
          );

          if (botComment) {
            // Update existing comment
            await github.rest.issues.updateComment({
              comment_id: botComment.id,
              owner: context.repo.owner,
              repo: context.repo.repo,
              body: report
            });
          } else {
            // Create new comment if none exists
            await github.rest.issues.createComment({
              issue_number: context.issue.number,
              owner: context.repo.owner,
              repo: context.repo.repo,
              body: report
            });
          }