Skip to main content

testx/
parallel.rs

1use std::collections::HashMap;
2use std::sync::{Arc, Mutex};
3use std::time::{Duration, Instant};
4
5use crate::adapters::{TestCase, TestRunResult, TestSuite};
6
7/// Configuration for parallel test execution.
8#[derive(Debug, Clone)]
9pub struct ParallelConfig {
10    /// Maximum number of concurrent jobs (0 = auto-detect)
11    pub max_jobs: usize,
12    /// How to distribute tests across workers
13    pub strategy: PartitionStrategy,
14    /// Whether to fail fast (stop all workers on first failure)
15    pub fail_fast: bool,
16    /// Whether to isolate output per worker
17    pub isolate_output: bool,
18}
19
20impl ParallelConfig {
21    /// Create a new parallel config with auto-detected job count.
22    pub fn new() -> Self {
23        Self {
24            max_jobs: 0,
25            strategy: PartitionStrategy::RoundRobin,
26            fail_fast: false,
27            isolate_output: true,
28        }
29    }
30
31    /// Set max jobs.
32    pub fn with_max_jobs(mut self, jobs: usize) -> Self {
33        self.max_jobs = jobs;
34        self
35    }
36
37    /// Set partition strategy.
38    pub fn with_strategy(mut self, strategy: PartitionStrategy) -> Self {
39        self.strategy = strategy;
40        self
41    }
42
43    /// Set fail-fast mode.
44    pub fn with_fail_fast(mut self, fail_fast: bool) -> Self {
45        self.fail_fast = fail_fast;
46        self
47    }
48
49    /// Get effective job count — uses available CPUs if max_jobs is 0.
50    pub fn effective_jobs(&self) -> usize {
51        if self.max_jobs == 0 {
52            std::thread::available_parallelism()
53                .map(|n| n.get())
54                .unwrap_or(4)
55        } else {
56            self.max_jobs
57        }
58    }
59
60    /// Whether parallel execution is enabled.
61    pub fn is_enabled(&self) -> bool {
62        self.effective_jobs() > 1
63    }
64}
65
66impl Default for ParallelConfig {
67    fn default() -> Self {
68        Self::new()
69    }
70}
71
72/// Strategy for partitioning tests across workers.
73#[derive(Debug, Clone)]
74pub enum PartitionStrategy {
75    /// Distribute tests in round-robin order
76    RoundRobin,
77    /// Group tests by suite
78    BySuite,
79    /// Distribute by estimated duration (longest first)
80    ByDuration,
81    /// Each worker gets a contiguous chunk
82    Chunked,
83}
84
85/// A partition of work to be executed by a single worker.
86#[derive(Debug, Clone)]
87pub struct WorkPartition {
88    /// Worker index (0-based)
89    pub worker_id: usize,
90    /// Suite name to test name mapping for this partition
91    pub test_groups: Vec<TestGroup>,
92}
93
94/// A group of tests from the same suite.
95#[derive(Debug, Clone)]
96pub struct TestGroup {
97    /// Suite name
98    pub suite_name: String,
99    /// Test names in this group
100    pub test_names: Vec<String>,
101}
102
103impl WorkPartition {
104    /// Total number of tests in this partition.
105    pub fn total_tests(&self) -> usize {
106        self.test_groups.iter().map(|g| g.test_names.len()).sum()
107    }
108
109    /// Whether this partition is empty.
110    pub fn is_empty(&self) -> bool {
111        self.test_groups.is_empty()
112    }
113}
114
115/// Partition test cases across N workers using the given strategy.
116pub fn partition_tests(
117    result: &TestRunResult,
118    num_workers: usize,
119    strategy: &PartitionStrategy,
120) -> Vec<WorkPartition> {
121    if num_workers == 0 {
122        return vec![];
123    }
124
125    match strategy {
126        PartitionStrategy::RoundRobin => partition_round_robin(result, num_workers),
127        PartitionStrategy::BySuite => partition_by_suite(result, num_workers),
128        PartitionStrategy::ByDuration => partition_by_duration(result, num_workers),
129        PartitionStrategy::Chunked => partition_chunked(result, num_workers),
130    }
131}
132
133fn partition_round_robin(result: &TestRunResult, num_workers: usize) -> Vec<WorkPartition> {
134    let mut partitions: Vec<WorkPartition> = (0..num_workers)
135        .map(|id| WorkPartition {
136            worker_id: id,
137            test_groups: Vec::new(),
138        })
139        .collect();
140
141    let mut worker_idx = 0;
142    for suite in &result.suites {
143        for test in &suite.tests {
144            let partition = &mut partitions[worker_idx % num_workers];
145
146            // Find or create test group for this suite
147            if let Some(group) = partition
148                .test_groups
149                .iter_mut()
150                .find(|g| g.suite_name == suite.name)
151            {
152                group.test_names.push(test.name.clone());
153            } else {
154                partition.test_groups.push(TestGroup {
155                    suite_name: suite.name.clone(),
156                    test_names: vec![test.name.clone()],
157                });
158            }
159
160            worker_idx += 1;
161        }
162    }
163
164    partitions
165}
166
167fn partition_by_suite(result: &TestRunResult, num_workers: usize) -> Vec<WorkPartition> {
168    let mut partitions: Vec<WorkPartition> = (0..num_workers)
169        .map(|id| WorkPartition {
170            worker_id: id,
171            test_groups: Vec::new(),
172        })
173        .collect();
174
175    // Assign each suite to the worker with the least tests
176    for suite in &result.suites {
177        let min_worker = partitions
178            .iter()
179            .enumerate()
180            .min_by_key(|(_, p)| p.total_tests())
181            .map(|(i, _)| i)
182            .unwrap_or(0);
183
184        partitions[min_worker].test_groups.push(TestGroup {
185            suite_name: suite.name.clone(),
186            test_names: suite.tests.iter().map(|t| t.name.clone()).collect(),
187        });
188    }
189
190    partitions
191}
192
193fn partition_by_duration(result: &TestRunResult, num_workers: usize) -> Vec<WorkPartition> {
194    let mut partitions: Vec<WorkPartition> = (0..num_workers)
195        .map(|id| WorkPartition {
196            worker_id: id,
197            test_groups: Vec::new(),
198        })
199        .collect();
200
201    // Collect all tests with their durations, sorted longest first
202    let mut all_tests: Vec<(&str, &str, Duration)> = Vec::new();
203    for suite in &result.suites {
204        for test in &suite.tests {
205            all_tests.push((&suite.name, &test.name, test.duration));
206        }
207    }
208    all_tests.sort_by(|a, b| b.2.cmp(&a.2));
209
210    // Track total duration per worker
211    let mut worker_durations = vec![Duration::ZERO; num_workers];
212
213    // Greedy assignment: assign longest test to least-loaded worker
214    for (suite_name, test_name, duration) in all_tests {
215        let min_worker = worker_durations
216            .iter()
217            .enumerate()
218            .min_by_key(|(_, d)| *d)
219            .map(|(i, _)| i)
220            .unwrap_or(0);
221
222        worker_durations[min_worker] += duration;
223
224        let partition = &mut partitions[min_worker];
225        if let Some(group) = partition
226            .test_groups
227            .iter_mut()
228            .find(|g| g.suite_name == suite_name)
229        {
230            group.test_names.push(test_name.to_string());
231        } else {
232            partition.test_groups.push(TestGroup {
233                suite_name: suite_name.to_string(),
234                test_names: vec![test_name.to_string()],
235            });
236        }
237    }
238
239    partitions
240}
241
242fn partition_chunked(result: &TestRunResult, num_workers: usize) -> Vec<WorkPartition> {
243    let mut partitions: Vec<WorkPartition> = (0..num_workers)
244        .map(|id| WorkPartition {
245            worker_id: id,
246            test_groups: Vec::new(),
247        })
248        .collect();
249
250    // Flatten all tests
251    let mut all_tests: Vec<(&str, &str)> = Vec::new();
252    for suite in &result.suites {
253        for test in &suite.tests {
254            all_tests.push((&suite.name, &test.name));
255        }
256    }
257
258    let chunk_size = all_tests.len().div_ceil(num_workers);
259
260    for (i, chunk) in all_tests.chunks(chunk_size).enumerate() {
261        if i >= num_workers {
262            break;
263        }
264        for (suite_name, test_name) in chunk {
265            let partition = &mut partitions[i];
266            if let Some(group) = partition
267                .test_groups
268                .iter_mut()
269                .find(|g| g.suite_name == *suite_name)
270            {
271                group.test_names.push(test_name.to_string());
272            } else {
273                partition.test_groups.push(TestGroup {
274                    suite_name: suite_name.to_string(),
275                    test_names: vec![test_name.to_string()],
276                });
277            }
278        }
279    }
280
281    partitions
282}
283
284/// Result from a single parallel worker.
285#[derive(Debug, Clone)]
286pub struct WorkerResult {
287    /// Worker index
288    pub worker_id: usize,
289    /// Test results from this worker
290    pub result: TestRunResult,
291    /// Wall time for this worker
292    pub wall_time: Duration,
293    /// Whether this worker was cancelled due to fail-fast
294    pub cancelled: bool,
295}
296
297/// Aggregated result from all parallel workers.
298#[derive(Debug, Clone)]
299pub struct ParallelResult {
300    /// Individual worker results
301    pub workers: Vec<WorkerResult>,
302    /// Merged result from all workers
303    pub merged: TestRunResult,
304    /// Total wall time (max of all workers)
305    pub wall_time: Duration,
306    /// Number of workers used
307    pub num_workers: usize,
308    /// Whether any worker was cancelled
309    pub had_cancellation: bool,
310}
311
312/// Merge results from multiple workers into a single TestRunResult.
313pub fn merge_worker_results(workers: &[WorkerResult]) -> TestRunResult {
314    let mut suite_map: HashMap<String, Vec<TestCase>> = HashMap::new();
315    let mut total_duration = Duration::ZERO;
316    let mut any_failed = false;
317
318    for worker in workers {
319        total_duration = total_duration.max(worker.wall_time);
320        for suite in &worker.result.suites {
321            let tests = suite_map.entry(suite.name.clone()).or_default();
322            tests.extend(suite.tests.iter().cloned());
323        }
324        if worker.result.total_failed() > 0 {
325            any_failed = true;
326        }
327    }
328
329    let suites: Vec<TestSuite> = suite_map
330        .into_iter()
331        .map(|(name, tests)| TestSuite { name, tests })
332        .collect();
333
334    TestRunResult {
335        suites,
336        duration: total_duration,
337        raw_exit_code: if any_failed { 1 } else { 0 },
338    }
339}
340
341/// Build a ParallelResult from worker results.
342pub fn build_parallel_result(workers: Vec<WorkerResult>) -> ParallelResult {
343    let wall_time = workers
344        .iter()
345        .map(|w| w.wall_time)
346        .max()
347        .unwrap_or_default();
348    let num_workers = workers.len();
349    let had_cancellation = workers.iter().any(|w| w.cancelled);
350    let merged = merge_worker_results(&workers);
351
352    ParallelResult {
353        workers,
354        merged,
355        wall_time,
356        num_workers,
357        had_cancellation,
358    }
359}
360
361/// Thread-safe cancellation flag for fail-fast mode.
362#[derive(Debug, Clone)]
363pub struct CancellationToken {
364    cancelled: Arc<Mutex<bool>>,
365}
366
367impl CancellationToken {
368    /// Create a new cancellation token.
369    pub fn new() -> Self {
370        Self {
371            cancelled: Arc::new(Mutex::new(false)),
372        }
373    }
374
375    /// Cancel all workers.
376    pub fn cancel(&self) {
377        if let Ok(mut c) = self.cancelled.lock() {
378            *c = true;
379        }
380    }
381
382    /// Check if cancellation was requested.
383    pub fn is_cancelled(&self) -> bool {
384        self.cancelled.lock().map(|c| *c).unwrap_or(false)
385    }
386}
387
388impl Default for CancellationToken {
389    fn default() -> Self {
390        Self::new()
391    }
392}
393
394/// Statistics about parallel execution.
395#[derive(Debug, Clone)]
396pub struct ParallelStats {
397    /// Number of workers used
398    pub num_workers: usize,
399    /// Tests per worker (min, max, avg)
400    pub tests_per_worker: (usize, usize, f64),
401    /// Total CPU time (sum of all workers)
402    pub total_cpu_time: Duration,
403    /// Wall time
404    pub wall_time: Duration,
405    /// Speedup factor (cpu_time / wall_time)
406    pub speedup: f64,
407    /// Efficiency (speedup / num_workers)
408    pub efficiency: f64,
409}
410
411/// Compute statistics about parallel execution.
412pub fn compute_parallel_stats(result: &ParallelResult) -> ParallelStats {
413    let num_workers = result.num_workers;
414    let total_cpu_time: Duration = result.workers.iter().map(|w| w.wall_time).sum();
415    let wall_time = result.wall_time;
416
417    let tests_per_worker: Vec<usize> = result
418        .workers
419        .iter()
420        .map(|w| w.result.total_tests())
421        .collect();
422
423    let min_tests = tests_per_worker.iter().copied().min().unwrap_or(0);
424    let max_tests = tests_per_worker.iter().copied().max().unwrap_or(0);
425    let avg_tests = if num_workers > 0 {
426        tests_per_worker.iter().sum::<usize>() as f64 / num_workers as f64
427    } else {
428        0.0
429    };
430
431    let speedup = if wall_time.as_secs_f64() > 0.0 {
432        total_cpu_time.as_secs_f64() / wall_time.as_secs_f64()
433    } else {
434        1.0
435    };
436
437    let efficiency = if num_workers > 0 {
438        speedup / num_workers as f64
439    } else {
440        0.0
441    };
442
443    ParallelStats {
444        num_workers,
445        tests_per_worker: (min_tests, max_tests, avg_tests),
446        total_cpu_time,
447        wall_time,
448        speedup,
449        efficiency,
450    }
451}
452
453/// Estimate how long a partition would take based on known durations.
454pub fn estimate_partition_time(partition: &WorkPartition, result: &TestRunResult) -> Duration {
455    let mut total = Duration::ZERO;
456
457    for group in &partition.test_groups {
458        if let Some(suite) = result.suites.iter().find(|s| s.name == group.suite_name) {
459            for test_name in &group.test_names {
460                if let Some(test) = suite.tests.iter().find(|t| &t.name == test_name) {
461                    total += test.duration;
462                }
463            }
464        }
465    }
466
467    total
468}
469
470/// Check if partitions are balanced (no worker has more than 2x the average).
471pub fn is_balanced(partitions: &[WorkPartition]) -> bool {
472    if partitions.is_empty() {
473        return true;
474    }
475
476    let counts: Vec<usize> = partitions.iter().map(|p| p.total_tests()).collect();
477    let min = counts.iter().copied().min().unwrap_or(0);
478    let max = counts.iter().copied().max().unwrap_or(0);
479
480    if max == 0 {
481        return true;
482    }
483
484    // Balanced if the max worker has no more than 2x the min (or min+2 for small counts)
485    max <= min.saturating_mul(2).saturating_add(2)
486}
487
488/// Rebalance partitions that are too skewed.
489pub fn rebalance(partitions: &mut [WorkPartition]) {
490    if partitions.is_empty() {
491        return;
492    }
493
494    // Flatten all tests
495    let mut all_tests: Vec<(String, String)> = Vec::new();
496    for partition in partitions.iter() {
497        for group in &partition.test_groups {
498            for test_name in &group.test_names {
499                all_tests.push((group.suite_name.clone(), test_name.clone()));
500            }
501        }
502    }
503
504    // Clear all partitions
505    for partition in partitions.iter_mut() {
506        partition.test_groups.clear();
507    }
508
509    // Redistribute round-robin
510    for (i, (suite_name, test_name)) in all_tests.iter().enumerate() {
511        let idx = i % partitions.len();
512        let partition = &mut partitions[idx];
513
514        if let Some(group) = partition
515            .test_groups
516            .iter_mut()
517            .find(|g| g.suite_name == *suite_name)
518        {
519            group.test_names.push(test_name.clone());
520        } else {
521            partition.test_groups.push(TestGroup {
522                suite_name: suite_name.clone(),
523                test_names: vec![test_name.clone()],
524            });
525        }
526    }
527}
528
529/// Format a partition for display.
530pub fn format_partition(partition: &WorkPartition) -> String {
531    let mut parts = Vec::new();
532    for group in &partition.test_groups {
533        parts.push(format!(
534            "{}({} tests)",
535            group.suite_name,
536            group.test_names.len()
537        ));
538    }
539    format!(
540        "Worker {}: {} tests [{}]",
541        partition.worker_id,
542        partition.total_tests(),
543        parts.join(", ")
544    )
545}
546
547/// Monitor for tracking parallel execution progress.
548#[derive(Debug)]
549pub struct ProgressMonitor {
550    /// Start time of execution
551    start_time: Instant,
552    /// Total tests to run
553    total_tests: usize,
554    /// Completed test count per worker
555    completed: Arc<Mutex<HashMap<usize, usize>>>,
556}
557
558impl ProgressMonitor {
559    /// Create a new progress monitor.
560    pub fn new(total_tests: usize) -> Self {
561        Self {
562            start_time: Instant::now(),
563            total_tests,
564            completed: Arc::new(Mutex::new(HashMap::new())),
565        }
566    }
567
568    /// Record a completed test for a worker.
569    pub fn record_completion(&self, worker_id: usize) {
570        if let Ok(mut map) = self.completed.lock() {
571            *map.entry(worker_id).or_insert(0) += 1;
572        }
573    }
574
575    /// Get total completed tests across all workers.
576    pub fn total_completed(&self) -> usize {
577        self.completed
578            .lock()
579            .map(|map| map.values().sum())
580            .unwrap_or(0)
581    }
582
583    /// Get completion percentage.
584    pub fn progress_percent(&self) -> f64 {
585        if self.total_tests == 0 {
586            return 100.0;
587        }
588        (self.total_completed() as f64 / self.total_tests as f64) * 100.0
589    }
590
591    /// Get elapsed time.
592    pub fn elapsed(&self) -> Duration {
593        self.start_time.elapsed()
594    }
595
596    /// Estimated time remaining.
597    pub fn eta(&self) -> Option<Duration> {
598        let completed = self.total_completed();
599        if completed == 0 {
600            return None;
601        }
602
603        let elapsed = self.elapsed();
604        let rate = completed as f64 / elapsed.as_secs_f64();
605        let remaining = self.total_tests.saturating_sub(completed) as f64;
606        let estimate = remaining / rate;
607
608        if estimate.is_finite() && estimate >= 0.0 {
609            Some(Duration::from_secs_f64(estimate))
610        } else {
611            None
612        }
613    }
614}
615
616#[cfg(test)]
617mod tests {
618    use super::*;
619    use crate::adapters::TestStatus;
620
621    fn make_test(name: &str, status: TestStatus, duration_ms: u64) -> TestCase {
622        TestCase {
623            name: name.into(),
624            status,
625            duration: Duration::from_millis(duration_ms),
626            error: None,
627        }
628    }
629
630    fn make_suite(name: &str, tests: Vec<TestCase>) -> TestSuite {
631        TestSuite {
632            name: name.into(),
633            tests,
634        }
635    }
636
637    fn make_result(suites: Vec<TestSuite>) -> TestRunResult {
638        TestRunResult {
639            suites,
640            duration: Duration::from_millis(100),
641            raw_exit_code: 0,
642        }
643    }
644
645    // ─── ParallelConfig Tests ───────────────────────────────────────────
646
647    #[test]
648    fn config_default() {
649        let config = ParallelConfig::new();
650        assert_eq!(config.max_jobs, 0);
651        assert!(!config.fail_fast);
652        assert!(config.isolate_output);
653    }
654
655    #[test]
656    fn config_effective_jobs() {
657        let config = ParallelConfig::new().with_max_jobs(4);
658        assert_eq!(config.effective_jobs(), 4);
659    }
660
661    #[test]
662    fn config_auto_detect_jobs() {
663        let config = ParallelConfig::new();
664        assert!(config.effective_jobs() >= 1);
665    }
666
667    #[test]
668    fn config_is_enabled() {
669        let config = ParallelConfig::new().with_max_jobs(1);
670        assert!(!config.is_enabled());
671
672        let config = ParallelConfig::new().with_max_jobs(4);
673        assert!(config.is_enabled());
674    }
675
676    // ─── Round Robin Partitioning ───────────────────────────────────────
677
678    #[test]
679    fn partition_rr_basic() {
680        let result = make_result(vec![make_suite(
681            "math",
682            vec![
683                make_test("test_a", TestStatus::Passed, 10),
684                make_test("test_b", TestStatus::Passed, 20),
685                make_test("test_c", TestStatus::Passed, 30),
686                make_test("test_d", TestStatus::Passed, 40),
687            ],
688        )]);
689
690        let partitions = partition_tests(&result, 2, &PartitionStrategy::RoundRobin);
691        assert_eq!(partitions.len(), 2);
692        assert_eq!(partitions[0].total_tests(), 2);
693        assert_eq!(partitions[1].total_tests(), 2);
694    }
695
696    #[test]
697    fn partition_rr_uneven() {
698        let result = make_result(vec![make_suite(
699            "math",
700            vec![
701                make_test("test_a", TestStatus::Passed, 10),
702                make_test("test_b", TestStatus::Passed, 20),
703                make_test("test_c", TestStatus::Passed, 30),
704            ],
705        )]);
706
707        let partitions = partition_tests(&result, 2, &PartitionStrategy::RoundRobin);
708        assert_eq!(partitions[0].total_tests(), 2);
709        assert_eq!(partitions[1].total_tests(), 1);
710    }
711
712    #[test]
713    fn partition_rr_more_workers_than_tests() {
714        let result = make_result(vec![make_suite(
715            "math",
716            vec![make_test("test_a", TestStatus::Passed, 10)],
717        )]);
718
719        let partitions = partition_tests(&result, 4, &PartitionStrategy::RoundRobin);
720        assert_eq!(partitions.len(), 4);
721        assert_eq!(partitions[0].total_tests(), 1);
722        assert_eq!(partitions[1].total_tests(), 0);
723    }
724
725    // ─── By Suite Partitioning ──────────────────────────────────────────
726
727    #[test]
728    fn partition_by_suite_basic() {
729        let result = make_result(vec![
730            make_suite(
731                "math",
732                vec![
733                    make_test("test_add", TestStatus::Passed, 10),
734                    make_test("test_sub", TestStatus::Passed, 20),
735                ],
736            ),
737            make_suite(
738                "strings",
739                vec![
740                    make_test("test_concat", TestStatus::Passed, 10),
741                    make_test("test_upper", TestStatus::Passed, 20),
742                ],
743            ),
744        ]);
745
746        let partitions = partition_tests(&result, 2, &PartitionStrategy::BySuite);
747        // Each suite should go to a different worker
748        assert_eq!(partitions.len(), 2);
749        let total: usize = partitions.iter().map(|p| p.total_tests()).sum();
750        assert_eq!(total, 4);
751    }
752
753    #[test]
754    fn partition_by_suite_unbalanced() {
755        let result = make_result(vec![
756            make_suite(
757                "big",
758                vec![
759                    make_test("a", TestStatus::Passed, 10),
760                    make_test("b", TestStatus::Passed, 10),
761                    make_test("c", TestStatus::Passed, 10),
762                ],
763            ),
764            make_suite("small", vec![make_test("d", TestStatus::Passed, 10)]),
765        ]);
766
767        let partitions = partition_tests(&result, 2, &PartitionStrategy::BySuite);
768        // Big suite goes first, small suite to worker with fewer tests
769        let total: usize = partitions.iter().map(|p| p.total_tests()).sum();
770        assert_eq!(total, 4);
771    }
772
773    // ─── By Duration Partitioning ───────────────────────────────────────
774
775    #[test]
776    fn partition_by_duration() {
777        let result = make_result(vec![make_suite(
778            "math",
779            vec![
780                make_test("slow", TestStatus::Passed, 1000),
781                make_test("medium", TestStatus::Passed, 500),
782                make_test("fast1", TestStatus::Passed, 100),
783                make_test("fast2", TestStatus::Passed, 100),
784            ],
785        )]);
786
787        let partitions = partition_tests(&result, 2, &PartitionStrategy::ByDuration);
788        assert_eq!(partitions.len(), 2);
789
790        // Worker 0 gets "slow" (1000ms), worker 1 gets "medium" (500ms)
791        // Then "fast1" goes to worker 1 (500+100=600 < 1000), "fast2" to worker 1 (700 < 1000)
792        let total: usize = partitions.iter().map(|p| p.total_tests()).sum();
793        assert_eq!(total, 4);
794    }
795
796    // ─── Chunked Partitioning ───────────────────────────────────────────
797
798    #[test]
799    fn partition_chunked_basic() {
800        let result = make_result(vec![make_suite(
801            "math",
802            vec![
803                make_test("a", TestStatus::Passed, 10),
804                make_test("b", TestStatus::Passed, 10),
805                make_test("c", TestStatus::Passed, 10),
806                make_test("d", TestStatus::Passed, 10),
807            ],
808        )]);
809
810        let partitions = partition_tests(&result, 2, &PartitionStrategy::Chunked);
811        assert_eq!(partitions[0].total_tests(), 2);
812        assert_eq!(partitions[1].total_tests(), 2);
813    }
814
815    // ─── Zero Workers ───────────────────────────────────────────────────
816
817    #[test]
818    fn partition_zero_workers() {
819        let result = make_result(vec![]);
820        let partitions = partition_tests(&result, 0, &PartitionStrategy::RoundRobin);
821        assert!(partitions.is_empty());
822    }
823
824    // ─── Merge Worker Results ───────────────────────────────────────────
825
826    #[test]
827    fn merge_workers_basic() {
828        let w1 = WorkerResult {
829            worker_id: 0,
830            result: make_result(vec![make_suite(
831                "math",
832                vec![make_test("test_add", TestStatus::Passed, 10)],
833            )]),
834            wall_time: Duration::from_millis(100),
835            cancelled: false,
836        };
837
838        let w2 = WorkerResult {
839            worker_id: 1,
840            result: make_result(vec![make_suite(
841                "math",
842                vec![make_test("test_sub", TestStatus::Passed, 10)],
843            )]),
844            wall_time: Duration::from_millis(150),
845            cancelled: false,
846        };
847
848        let merged = merge_worker_results(&[w1, w2]);
849        assert_eq!(merged.total_tests(), 2);
850        assert_eq!(merged.duration, Duration::from_millis(150)); // max wall time
851        assert_eq!(merged.raw_exit_code, 0);
852    }
853
854    #[test]
855    fn merge_workers_with_failure() {
856        let w1 = WorkerResult {
857            worker_id: 0,
858            result: make_result(vec![make_suite(
859                "math",
860                vec![make_test("test_add", TestStatus::Failed, 10)],
861            )]),
862            wall_time: Duration::from_millis(100),
863            cancelled: false,
864        };
865
866        let w2 = WorkerResult {
867            worker_id: 1,
868            result: make_result(vec![make_suite(
869                "strings",
870                vec![make_test("test_concat", TestStatus::Passed, 10)],
871            )]),
872            wall_time: Duration::from_millis(100),
873            cancelled: false,
874        };
875
876        let merged = merge_worker_results(&[w1, w2]);
877        assert_eq!(merged.total_tests(), 2);
878        assert_eq!(merged.raw_exit_code, 1);
879    }
880
881    #[test]
882    fn merge_workers_same_suite() {
883        let w1 = WorkerResult {
884            worker_id: 0,
885            result: make_result(vec![make_suite(
886                "math",
887                vec![make_test("test_a", TestStatus::Passed, 10)],
888            )]),
889            wall_time: Duration::from_millis(100),
890            cancelled: false,
891        };
892
893        let w2 = WorkerResult {
894            worker_id: 1,
895            result: make_result(vec![make_suite(
896                "math",
897                vec![make_test("test_b", TestStatus::Passed, 10)],
898            )]),
899            wall_time: Duration::from_millis(100),
900            cancelled: false,
901        };
902
903        let merged = merge_worker_results(&[w1, w2]);
904        assert_eq!(merged.suites.len(), 1);
905        assert_eq!(merged.suites[0].tests.len(), 2);
906    }
907
908    // ─── Build Parallel Result ──────────────────────────────────────────
909
910    #[test]
911    fn build_parallel_result_basic() {
912        let workers = vec![
913            WorkerResult {
914                worker_id: 0,
915                result: make_result(vec![make_suite(
916                    "a",
917                    vec![make_test("t1", TestStatus::Passed, 10)],
918                )]),
919                wall_time: Duration::from_millis(100),
920                cancelled: false,
921            },
922            WorkerResult {
923                worker_id: 1,
924                result: make_result(vec![make_suite(
925                    "b",
926                    vec![make_test("t2", TestStatus::Passed, 10)],
927                )]),
928                wall_time: Duration::from_millis(200),
929                cancelled: false,
930            },
931        ];
932
933        let result = build_parallel_result(workers);
934        assert_eq!(result.num_workers, 2);
935        assert_eq!(result.wall_time, Duration::from_millis(200));
936        assert!(!result.had_cancellation);
937        assert_eq!(result.merged.total_tests(), 2);
938    }
939
940    #[test]
941    fn build_parallel_result_with_cancel() {
942        let workers = vec![WorkerResult {
943            worker_id: 0,
944            result: make_result(vec![]),
945            wall_time: Duration::from_millis(50),
946            cancelled: true,
947        }];
948
949        let result = build_parallel_result(workers);
950        assert!(result.had_cancellation);
951    }
952
953    // ─── CancellationToken Tests ────────────────────────────────────────
954
955    #[test]
956    fn cancellation_token_default() {
957        let token = CancellationToken::new();
958        assert!(!token.is_cancelled());
959    }
960
961    #[test]
962    fn cancellation_token_cancel() {
963        let token = CancellationToken::new();
964        token.cancel();
965        assert!(token.is_cancelled());
966    }
967
968    #[test]
969    fn cancellation_token_clone() {
970        let token = CancellationToken::new();
971        let clone = token.clone();
972        token.cancel();
973        assert!(clone.is_cancelled());
974    }
975
976    // ─── Stats Tests ────────────────────────────────────────────────────
977
978    #[test]
979    fn stats_basic() {
980        let workers = vec![
981            WorkerResult {
982                worker_id: 0,
983                result: make_result(vec![make_suite(
984                    "a",
985                    vec![
986                        make_test("t1", TestStatus::Passed, 10),
987                        make_test("t2", TestStatus::Passed, 10),
988                    ],
989                )]),
990                wall_time: Duration::from_millis(100),
991                cancelled: false,
992            },
993            WorkerResult {
994                worker_id: 1,
995                result: make_result(vec![make_suite(
996                    "b",
997                    vec![make_test("t3", TestStatus::Passed, 10)],
998                )]),
999                wall_time: Duration::from_millis(100),
1000                cancelled: false,
1001            },
1002        ];
1003
1004        let result = build_parallel_result(workers);
1005        let stats = compute_parallel_stats(&result);
1006
1007        assert_eq!(stats.num_workers, 2);
1008        assert_eq!(stats.tests_per_worker.0, 1); // min
1009        assert_eq!(stats.tests_per_worker.1, 2); // max
1010        assert!(stats.speedup >= 1.0);
1011    }
1012
1013    // ─── Balance Tests ──────────────────────────────────────────────────
1014
1015    #[test]
1016    fn is_balanced_basic() {
1017        let partitions = vec![
1018            WorkPartition {
1019                worker_id: 0,
1020                test_groups: vec![TestGroup {
1021                    suite_name: "s".into(),
1022                    test_names: vec!["a".into(), "b".into()],
1023                }],
1024            },
1025            WorkPartition {
1026                worker_id: 1,
1027                test_groups: vec![TestGroup {
1028                    suite_name: "s".into(),
1029                    test_names: vec!["c".into(), "d".into()],
1030                }],
1031            },
1032        ];
1033        assert!(is_balanced(&partitions));
1034    }
1035
1036    #[test]
1037    fn is_balanced_skewed() {
1038        let partitions = vec![
1039            WorkPartition {
1040                worker_id: 0,
1041                test_groups: vec![TestGroup {
1042                    suite_name: "s".into(),
1043                    test_names: vec![
1044                        "a".into(),
1045                        "b".into(),
1046                        "c".into(),
1047                        "d".into(),
1048                        "e".into(),
1049                        "f".into(),
1050                        "g".into(),
1051                        "h".into(),
1052                        "i".into(),
1053                    ],
1054                }],
1055            },
1056            WorkPartition {
1057                worker_id: 1,
1058                test_groups: vec![TestGroup {
1059                    suite_name: "s".into(),
1060                    test_names: vec!["f".into()],
1061                }],
1062            },
1063        ];
1064        assert!(!is_balanced(&partitions));
1065    }
1066
1067    #[test]
1068    fn is_balanced_empty() {
1069        assert!(is_balanced(&[]));
1070    }
1071
1072    // ─── Rebalance Tests ────────────────────────────────────────────────
1073
1074    #[test]
1075    fn rebalance_skewed() {
1076        let mut partitions = vec![
1077            WorkPartition {
1078                worker_id: 0,
1079                test_groups: vec![TestGroup {
1080                    suite_name: "s".into(),
1081                    test_names: vec!["a".into(), "b".into(), "c".into(), "d".into()],
1082                }],
1083            },
1084            WorkPartition {
1085                worker_id: 1,
1086                test_groups: Vec::new(),
1087            },
1088        ];
1089
1090        rebalance(&mut partitions);
1091        assert_eq!(partitions[0].total_tests(), 2);
1092        assert_eq!(partitions[1].total_tests(), 2);
1093    }
1094
1095    // ─── Estimate Time Tests ────────────────────────────────────────────
1096
1097    #[test]
1098    fn estimate_time() {
1099        let result = make_result(vec![make_suite(
1100            "math",
1101            vec![
1102                make_test("a", TestStatus::Passed, 100),
1103                make_test("b", TestStatus::Passed, 200),
1104            ],
1105        )]);
1106
1107        let partition = WorkPartition {
1108            worker_id: 0,
1109            test_groups: vec![TestGroup {
1110                suite_name: "math".into(),
1111                test_names: vec!["a".into(), "b".into()],
1112            }],
1113        };
1114
1115        let est = estimate_partition_time(&partition, &result);
1116        assert_eq!(est, Duration::from_millis(300));
1117    }
1118
1119    #[test]
1120    fn estimate_time_missing_test() {
1121        let result = make_result(vec![make_suite(
1122            "math",
1123            vec![make_test("a", TestStatus::Passed, 100)],
1124        )]);
1125
1126        let partition = WorkPartition {
1127            worker_id: 0,
1128            test_groups: vec![TestGroup {
1129                suite_name: "math".into(),
1130                test_names: vec!["a".into(), "nonexistent".into()],
1131            }],
1132        };
1133
1134        let est = estimate_partition_time(&partition, &result);
1135        assert_eq!(est, Duration::from_millis(100)); // only "a"
1136    }
1137
1138    // ─── Format Partition ───────────────────────────────────────────────
1139
1140    #[test]
1141    fn format_partition_test() {
1142        let partition = WorkPartition {
1143            worker_id: 0,
1144            test_groups: vec![
1145                TestGroup {
1146                    suite_name: "math".into(),
1147                    test_names: vec!["a".into(), "b".into()],
1148                },
1149                TestGroup {
1150                    suite_name: "strings".into(),
1151                    test_names: vec!["c".into()],
1152                },
1153            ],
1154        };
1155
1156        let formatted = format_partition(&partition);
1157        assert!(formatted.contains("Worker 0"));
1158        assert!(formatted.contains("3 tests"));
1159        assert!(formatted.contains("math(2 tests)"));
1160        assert!(formatted.contains("strings(1 tests)"));
1161    }
1162
1163    // ─── Progress Monitor Tests ─────────────────────────────────────────
1164
1165    #[test]
1166    fn progress_monitor_basic() {
1167        let monitor = ProgressMonitor::new(10);
1168        assert_eq!(monitor.total_completed(), 0);
1169        assert_eq!(monitor.progress_percent(), 0.0);
1170    }
1171
1172    #[test]
1173    fn progress_monitor_track() {
1174        let monitor = ProgressMonitor::new(4);
1175        monitor.record_completion(0);
1176        monitor.record_completion(0);
1177        monitor.record_completion(1);
1178
1179        assert_eq!(monitor.total_completed(), 3);
1180        assert_eq!(monitor.progress_percent(), 75.0);
1181    }
1182
1183    #[test]
1184    fn progress_monitor_zero_total() {
1185        let monitor = ProgressMonitor::new(0);
1186        assert_eq!(monitor.progress_percent(), 100.0);
1187    }
1188
1189    #[test]
1190    fn progress_monitor_elapsed() {
1191        let monitor = ProgressMonitor::new(10);
1192        assert!(monitor.elapsed() < Duration::from_secs(1));
1193    }
1194
1195    #[test]
1196    fn progress_monitor_eta_none() {
1197        let monitor = ProgressMonitor::new(10);
1198        assert!(monitor.eta().is_none()); // no completions yet
1199    }
1200}