Skip to main content

rpytest_core/protocol/
messages.rs

1//! Request and response types for the IPC protocol.
2
3use serde::{Deserialize, Serialize};
4
5/// Current protocol version. Increment when breaking changes are made.
6pub const PROTOCOL_VERSION: u32 = 1;
7
8/// Test node information returned from daemon.
9#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
10pub struct TestNodeInfo {
11    /// Unique node ID (pytest format).
12    pub node_id: String,
13    /// File path relative to repo root.
14    pub file_path: String,
15    /// Line number where test is defined.
16    pub lineno: Option<u32>,
17    /// Test function/method name.
18    pub name: String,
19    /// Parent class name (if method).
20    pub class_name: Option<String>,
21    /// Markers attached to this test.
22    pub markers: Vec<String>,
23    /// Whether test is marked as skip.
24    pub skip: bool,
25    /// Whether test is marked as xfail.
26    pub xfail: bool,
27}
28
29/// Commands sent from CLI to daemon.
30#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
31#[serde(tag = "type", rename_all = "snake_case")]
32pub enum Request {
33    /// Initialize a repository context within the daemon.
34    InitContext {
35        /// Protocol version for compatibility checking.
36        protocol_version: u32,
37        /// Absolute path to the repository root.
38        repo_path: String,
39        /// Optional path to Python interpreter.
40        python_path: Option<String>,
41        /// Execution mode: "embedded", "subprocess", or "auto".
42        #[serde(default)]
43        execution_mode: Option<String>,
44    },
45
46    /// Collect tests for a repository context.
47    Collect {
48        /// Context identifier returned from InitContext.
49        context_id: String,
50        /// Force full re-collection even if cache is valid.
51        force: bool,
52    },
53
54    /// Run a set of tests.
55    Run {
56        /// Context identifier.
57        context_id: String,
58        /// List of test node IDs to run.
59        node_ids: Vec<String>,
60        /// Number of parallel workers (None = auto).
61        workers: Option<u32>,
62        /// Stop after N failures.
63        maxfail: Option<u32>,
64    },
65
66    /// List tests matching filters (without running).
67    List {
68        /// Context identifier.
69        context_id: String,
70        /// Keyword expression filter.
71        keyword: Option<String>,
72        /// Marker expression filter.
73        marker: Option<String>,
74    },
75
76    /// Get detailed inventory with full test metadata.
77    GetInventory {
78        /// Context identifier.
79        context_id: String,
80    },
81
82    /// Get worker pool status.
83    GetWorkerStatus {
84        /// Context identifier.
85        context_id: String,
86    },
87
88    /// Configure worker pool.
89    ConfigureWorkers {
90        /// Context identifier.
91        context_id: String,
92        /// Number of workers to maintain.
93        num_workers: u32,
94    },
95
96    /// Shutdown the daemon or a specific context.
97    Shutdown {
98        /// If Some, shutdown only this context. If None, shutdown entire daemon.
99        context_id: Option<String>,
100    },
101
102    /// Health check / ping.
103    Ping,
104
105    /// Start a streaming run (returns run_id, results come via GetRunProgress).
106    RunStream {
107        /// Context identifier.
108        context_id: String,
109        /// List of test node IDs to run.
110        node_ids: Vec<String>,
111        /// Number of parallel workers (None = auto).
112        workers: Option<u32>,
113        /// Stop after N failures.
114        maxfail: Option<u32>,
115    },
116
117    /// Get progress and results from a streaming run.
118    GetRunProgress {
119        /// Context identifier.
120        context_id: String,
121        /// Run identifier from RunStream response.
122        run_id: String,
123    },
124
125    // --- Phase 5: Flakiness ---
126    /// Get flakiness report for all tracked tests.
127    GetFlakinessReport {
128        /// Context identifier.
129        context_id: String,
130    },
131
132    /// Get flakiness info for a specific test.
133    GetTestFlakiness {
134        /// Context identifier.
135        context_id: String,
136        /// Test node ID.
137        node_id: String,
138    },
139
140    /// Configure auto-rerun behavior.
141    ConfigureRerun {
142        /// Context identifier.
143        context_id: String,
144        /// Enable auto-rerun.
145        enabled: bool,
146        /// Maximum reruns per test.
147        max_reruns: u32,
148        /// Only rerun known flaky tests.
149        only_flaky: bool,
150        /// Delay between reruns in milliseconds.
151        delay_ms: u32,
152    },
153
154    /// Get current rerun configuration.
155    GetRerunConfig {
156        /// Context identifier.
157        context_id: String,
158    },
159
160    /// Run tests with auto-rerun enabled.
161    RunWithRerun {
162        /// Context identifier.
163        context_id: String,
164        /// List of test node IDs to run.
165        node_ids: Vec<String>,
166        /// Number of parallel workers (None = auto).
167        workers: Option<u32>,
168        /// Stop after N failures.
169        maxfail: Option<u32>,
170    },
171
172    // --- Phase 5: Fixtures ---
173    /// Configure session fixture reuse.
174    ConfigureFixtureReuse {
175        /// Context identifier.
176        context_id: String,
177        /// Enable fixture reuse.
178        enabled: bool,
179        /// Max fixture age in seconds.
180        max_age_seconds: f64,
181        /// Teardown on conftest.py changes.
182        teardown_on_conftest_change: bool,
183    },
184
185    /// Get fixture configuration.
186    GetFixtureConfig {
187        /// Context identifier.
188        context_id: String,
189    },
190
191    /// Get session status.
192    GetSessionStatus {
193        /// Context identifier.
194        context_id: String,
195    },
196
197    // --- Phase 5: Sharding ---
198    /// Get tests for a specific shard.
199    GetShard {
200        /// Context identifier.
201        context_id: String,
202        /// Tests to shard (empty = all inventory).
203        node_ids: Vec<String>,
204        /// This shard's index (0-based).
205        shard_index: u32,
206        /// Total number of shards.
207        total_shards: u32,
208        /// Sharding strategy.
209        strategy: String,
210    },
211
212    /// Get sharding distribution info.
213    GetShardInfo {
214        /// Context identifier.
215        context_id: String,
216        /// Tests to shard (empty = all inventory).
217        node_ids: Vec<String>,
218        /// Total number of shards.
219        total_shards: u32,
220        /// Sharding strategy.
221        strategy: String,
222    },
223}
224
225/// Responses sent from daemon to CLI.
226#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
227#[serde(tag = "type", rename_all = "snake_case")]
228pub enum Response {
229    /// Context successfully initialized.
230    ContextReady {
231        /// Protocol version for compatibility checking.
232        protocol_version: u32,
233        /// Unique context identifier.
234        context_id: String,
235        /// Hash of the current inventory for cache validation.
236        inventory_hash: String,
237    },
238
239    /// Collection completed.
240    CollectionComplete {
241        /// Number of test nodes collected.
242        node_count: usize,
243        /// Collection duration in milliseconds.
244        duration_ms: u64,
245    },
246
247    /// List of test node IDs matching the query.
248    TestList {
249        /// Matching node IDs.
250        node_ids: Vec<String>,
251    },
252
253    /// Detailed inventory data.
254    InventoryData {
255        /// Inventory hash for cache validation.
256        hash: String,
257        /// Collection timestamp (Unix epoch ms).
258        collected_at: u64,
259        /// Test nodes with metadata.
260        nodes: Vec<TestNodeInfo>,
261    },
262
263    /// Run completed.
264    RunComplete {
265        /// Total tests run.
266        total: usize,
267        /// Tests passed.
268        passed: usize,
269        /// Tests failed.
270        failed: usize,
271        /// Tests skipped.
272        skipped: usize,
273        /// Tests errored.
274        errors: usize,
275        /// Total duration in milliseconds.
276        duration_ms: u64,
277    },
278
279    /// Worker pool status.
280    WorkerStatus {
281        /// Number of active workers.
282        active_workers: u32,
283        /// Number of idle workers.
284        idle_workers: u32,
285        /// Total tests executed by pool.
286        tests_executed: u64,
287        /// Average test duration in milliseconds.
288        avg_test_duration_ms: u64,
289    },
290
291    /// Worker configuration acknowledged.
292    WorkerConfigAck {
293        /// New number of workers.
294        num_workers: u32,
295    },
296
297    /// Shutdown acknowledged.
298    ShutdownAck,
299
300    /// Pong response to ping.
301    Pong,
302
303    /// Streaming run started.
304    RunStarted {
305        /// Unique run identifier for polling progress.
306        run_id: String,
307        /// Total tests to run.
308        total_tests: usize,
309    },
310
311    /// Progress update with any completed test results.
312    RunProgress {
313        /// Run identifier.
314        run_id: String,
315        /// Total tests in this run.
316        total: usize,
317        /// Tests completed so far.
318        completed: usize,
319        /// Tests currently running.
320        running: usize,
321        /// Whether the run is complete.
322        done: bool,
323        /// Newly completed test results since last poll.
324        results: Vec<TestResultInfo>,
325    },
326
327    /// Error response.
328    Error {
329        /// Error category.
330        code: ErrorCode,
331        /// Human-readable error message.
332        message: String,
333    },
334
335    // --- Phase 5: Flakiness Responses ---
336    /// Flakiness report for tracked tests.
337    FlakinessReport {
338        /// Tests identified as flaky.
339        flaky_tests: Vec<FlakinessInfo>,
340        /// Tests with some failures but not flaky.
341        unstable_tests: Vec<FlakinessInfo>,
342        /// Count of stable tests.
343        stable_count: usize,
344        /// Total tests tracked.
345        total_tracked: usize,
346    },
347
348    /// Flakiness info for a single test.
349    TestFlakiness {
350        /// Test node ID.
351        node_id: String,
352        /// Failure rate (0.0-1.0).
353        failure_rate: f64,
354        /// Whether test is considered flaky.
355        is_flaky: bool,
356        /// Number of outcome flips.
357        flaky_streak: u32,
358        /// Consecutive failures.
359        consecutive_failures: u32,
360        /// Consecutive passes.
361        consecutive_passes: u32,
362        /// Total runs.
363        total_runs: u32,
364        /// Recent outcomes.
365        recent_outcomes: Vec<String>,
366    },
367
368    /// Rerun configuration.
369    RerunConfig {
370        /// Whether enabled.
371        enabled: bool,
372        /// Max reruns per test.
373        max_reruns: u32,
374        /// Only rerun known flaky.
375        only_flaky: bool,
376        /// Delay between reruns ms.
377        delay_ms: u32,
378    },
379
380    // --- Phase 5: Fixture Responses ---
381    /// Fixture configuration.
382    FixtureConfig {
383        /// Whether enabled.
384        enabled: bool,
385        /// Max fixture age seconds.
386        max_fixture_age_seconds: f64,
387        /// Teardown on conftest change.
388        teardown_on_conftest_change: bool,
389        /// Teardown on test file change.
390        teardown_on_test_file_change: bool,
391        /// Scopes to reuse.
392        scopes_to_reuse: Vec<String>,
393    },
394
395    /// Session status.
396    SessionStatus {
397        /// Session ID.
398        session_id: String,
399        /// Repo path.
400        repo_path: String,
401        /// Creation timestamp.
402        created_at: f64,
403        /// Last run timestamp.
404        last_run_at: f64,
405        /// Total runs.
406        total_runs: u32,
407        /// Whether enabled.
408        enabled: bool,
409    },
410
411    // --- Phase 5: Sharding Responses ---
412    /// Tests assigned to a shard.
413    ShardedTests {
414        /// Shard index.
415        shard_index: u32,
416        /// Total shards.
417        total_shards: u32,
418        /// Node IDs in this shard.
419        node_ids: Vec<String>,
420    },
421
422    /// Sharding distribution info.
423    ShardInfo {
424        /// Strategy used.
425        strategy: String,
426        /// Total shards.
427        total_shards: u32,
428        /// Total tests.
429        total_tests: usize,
430        /// Test counts per shard.
431        shard_test_counts: Vec<usize>,
432        /// Duration estimates per shard.
433        shard_durations_ms: Vec<u64>,
434        /// Count imbalance percentage.
435        count_imbalance_percent: f64,
436        /// Duration imbalance percentage.
437        duration_imbalance_percent: f64,
438        /// Estimated wall time.
439        estimated_wall_time_ms: u64,
440    },
441
442    /// Generic config acknowledgment.
443    ConfigAck {
444        /// Config type.
445        config_type: String,
446        /// The configuration.
447        config: serde_json::Value,
448    },
449}
450
451/// Individual test result info for streaming.
452#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
453pub struct TestResultInfo {
454    /// Test node ID.
455    pub node_id: String,
456    /// Test outcome.
457    pub outcome: String,
458    /// Duration in milliseconds.
459    pub duration_ms: u64,
460    /// Optional failure message.
461    pub message: Option<String>,
462}
463
464/// Flakiness info for a test.
465#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
466pub struct FlakinessInfo {
467    /// Test node ID.
468    pub node_id: String,
469    /// Failure rate (0.0-1.0).
470    pub failure_rate: f64,
471    /// Number of outcome flips.
472    pub flaky_streak: u32,
473    /// Total runs.
474    pub total_runs: u32,
475    /// Consecutive failures.
476    pub consecutive_failures: u32,
477    /// Consecutive passes.
478    pub consecutive_passes: u32,
479}
480
481/// Error codes for categorizing failures.
482#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
483#[serde(rename_all = "snake_case")]
484pub enum ErrorCode {
485    /// Context not found or not initialized.
486    ContextNotFound,
487    /// Collection failed (syntax error, import error, etc.).
488    CollectionFailed,
489    /// Invalid request parameters.
490    InvalidRequest,
491    /// Internal daemon error.
492    InternalError,
493    /// Operation timed out.
494    Timeout,
495    /// Python interpreter not found or invalid.
496    PythonNotFound,
497    /// Protocol version mismatch between CLI and daemon.
498    VersionMismatch,
499}
500
501#[cfg(test)]
502mod tests {
503    use super::*;
504
505    #[test]
506    fn request_roundtrip() {
507        let requests = vec![
508            Request::InitContext {
509                protocol_version: PROTOCOL_VERSION,
510                repo_path: "/path/to/repo".to_string(),
511                python_path: Some("/usr/bin/python3".to_string()),
512                execution_mode: Some("auto".to_string()),
513            },
514            Request::Collect {
515                context_id: "ctx-123".to_string(),
516                force: true,
517            },
518            Request::Run {
519                context_id: "ctx-123".to_string(),
520                node_ids: vec!["test_foo.py::test_bar".to_string()],
521                workers: Some(4),
522                maxfail: Some(1),
523            },
524            Request::List {
525                context_id: "ctx-123".to_string(),
526                keyword: Some("auth".to_string()),
527                marker: None,
528            },
529            Request::GetInventory {
530                context_id: "ctx-123".to_string(),
531            },
532            Request::Shutdown {
533                context_id: Some("ctx-123".to_string()),
534            },
535            Request::Ping,
536            Request::RunStream {
537                context_id: "ctx-123".to_string(),
538                node_ids: vec!["test_foo.py::test_bar".to_string()],
539                workers: Some(4),
540                maxfail: None,
541            },
542            Request::GetRunProgress {
543                context_id: "ctx-123".to_string(),
544                run_id: "run-123".to_string(),
545            },
546        ];
547
548        for req in requests {
549            let encoded = rmp_serde::to_vec(&req).unwrap();
550            let decoded: Request = rmp_serde::from_slice(&encoded).unwrap();
551            assert_eq!(req, decoded);
552        }
553    }
554
555    #[test]
556    fn response_roundtrip() {
557        let responses = vec![
558            Response::ContextReady {
559                protocol_version: PROTOCOL_VERSION,
560                context_id: "ctx-123".to_string(),
561                inventory_hash: "abc123".to_string(),
562            },
563            Response::CollectionComplete {
564                node_count: 42,
565                duration_ms: 150,
566            },
567            Response::TestList {
568                node_ids: vec!["test_a".to_string(), "test_b".to_string()],
569            },
570            Response::InventoryData {
571                hash: "abc123".to_string(),
572                collected_at: 1234567890,
573                nodes: vec![TestNodeInfo {
574                    node_id: "test.py::test_func".to_string(),
575                    file_path: "test.py".to_string(),
576                    lineno: Some(10),
577                    name: "test_func".to_string(),
578                    class_name: None,
579                    markers: vec!["slow".to_string()],
580                    skip: false,
581                    xfail: false,
582                }],
583            },
584            Response::RunComplete {
585                total: 10,
586                passed: 8,
587                failed: 1,
588                skipped: 1,
589                errors: 0,
590                duration_ms: 5000,
591            },
592            Response::ShutdownAck,
593            Response::Pong,
594            Response::RunStarted {
595                run_id: "run-123".to_string(),
596                total_tests: 10,
597            },
598            Response::RunProgress {
599                run_id: "run-123".to_string(),
600                total: 10,
601                completed: 5,
602                running: 2,
603                done: false,
604                results: vec![TestResultInfo {
605                    node_id: "test.py::test_foo".to_string(),
606                    outcome: "passed".to_string(),
607                    duration_ms: 100,
608                    message: None,
609                }],
610            },
611            Response::Error {
612                code: ErrorCode::ContextNotFound,
613                message: "Context not found".to_string(),
614            },
615        ];
616
617        for resp in responses {
618            let encoded = rmp_serde::to_vec(&resp).unwrap();
619            let decoded: Response = rmp_serde::from_slice(&encoded).unwrap();
620            assert_eq!(resp, decoded);
621        }
622    }
623}