1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
name: Perf
on:
# Manual trigger only
workflow_dispatch:
jobs:
perf-tests:
name: Perf Tests and Benches
runs-on: ubuntu-latest
# Only run when manually triggered on main branch
if: ${{ github.event_name == 'workflow_dispatch' && github.ref_name == 'main' }}
concurrency:
group: perf-${{ github.ref }}
cancel-in-progress: false
timeout-minutes: 45
env:
PERF_TESTS: "1"
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
# Optional diagnostics for visibility
- name: Environment
run: |
rustc -Vv
cargo -V
lscpu || true
cat /etc/os-release || true
- name: Build (all-features) for benches
run: cargo build --all-features
- name: Run perf-gated tests (ignored by default)
run: cargo test -F perf-tests -- --ignored
- name: Run perf-gated benches
run: cargo bench -F perf-tests
- name: Install jq (for baseline comparison)
run: |
sudo apt-get update
sudo apt-get install -y jq python3
- name: Run watch_timer_hot (metrics) with fixed timings
env:
PERF_TESTS: "1"
run: |
cargo bench -F "perf-tests metrics" --bench watch_timer_hot -- \
--measurement-time 5 \
--warm-up-time 2 \
--save-baseline current
- name: Show Criterion output (watch_timer_hot)
run: |
echo "Listing target/criterion (if present)"
ls -la target || true
ls -la target/criterion || true
find target/criterion -maxdepth 2 -type d -print || true
- name: Compare Criterion results to baselines (watch_timer_hot)
env:
PERF_COMPARE_STRICT: "0"
run: |
if [ ! -d target/criterion ]; then
echo "No Criterion results found; skipping watch_timer_hot baseline comparison.";
exit 0;
fi
if [ ! -d target/criterion/watch_timer_hot ] && [ ! -f target/criterion/new/estimates.json ]; then
echo "Group directory target/criterion/watch_timer_hot not found and flat layout not detected; skipping.";
exit 0;
fi
echo "## watch_timer_hot baseline comparison" >> "$GITHUB_STEP_SUMMARY"
bash scripts/compare_criterion_baseline.sh watch_timer_hot perf_baselines/watch_timer_hot.json | tee -a "$GITHUB_STEP_SUMMARY"
- name: Run timers (perf) with fixed timings
env:
PERF_TESTS: "1"
run: |
cargo bench -F perf-tests --bench timers -- \
--measurement-time 5 \
--warm-up-time 2 \
--save-baseline current
- name: Compare Criterion results to baselines (timers)
env:
PERF_COMPARE_STRICT: "0"
run: |
if [ ! -d target/criterion ]; then
echo "No Criterion results found; skipping timers baseline comparison.";
exit 0;
fi
if [ ! -d target/criterion/timers ] && [ ! -f target/criterion/new/estimates.json ]; then
echo "Group directory target/criterion/timers not found and flat layout not detected; skipping.";
exit 0;
fi
echo "\n## timers baseline comparison" >> "$GITHUB_STEP_SUMMARY"
bash scripts/compare_criterion_baseline.sh timers perf_baselines/timers.json | tee -a "$GITHUB_STEP_SUMMARY"
- name: Run histogram_hot (perf) with fixed timings
env:
PERF_TESTS: "1"
run: |
cargo bench -F perf-tests --bench histogram_hot -- \
--measurement-time 5 \
--warm-up-time 2 \
--save-baseline current
- name: Compare Criterion results to baselines (histogram_hot)
env:
PERF_COMPARE_STRICT: "0"
run: |
if [ ! -d target/criterion ]; then
echo "No Criterion results found; skipping histogram_hot baseline comparison.";
exit 0;
fi
if [ ! -d target/criterion/histogram_hot ] && [ ! -f target/criterion/new/estimates.json ]; then
echo "Group directory target/criterion/histogram_hot not found and flat layout not detected; skipping.";
exit 0;
fi
echo "\n## histogram_hot baseline comparison" >> "$GITHUB_STEP_SUMMARY"
bash scripts/compare_criterion_baseline.sh histogram_hot perf_baselines/histogram_hot.json | tee -a "$GITHUB_STEP_SUMMARY"