d-engine-server 0.2.3

Production-ready Raft consensus engine server and runtime
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
//! Unit tests for EmbeddedEngine leader election APIs

#[cfg(test)]
mod embedded_engine_tests {
    use std::sync::Arc;
    use std::time::Duration;

    use crate::api::EmbeddedEngine;
    use crate::storage::FileStateMachine;
    use crate::storage::FileStorageEngine;

    async fn create_test_storage_and_sm() -> (
        Arc<FileStorageEngine>,
        Arc<FileStateMachine>,
        tempfile::TempDir,
    ) {
        // Clear CONFIG_PATH to avoid external config interference
        unsafe {
            std::env::remove_var("CONFIG_PATH");
        }

        let temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
        let storage_path = temp_dir.path().join("storage");
        let sm_path = temp_dir.path().join("sm");

        std::fs::create_dir_all(&storage_path).unwrap();
        std::fs::create_dir_all(&sm_path).unwrap();

        let storage =
            Arc::new(FileStorageEngine::new(storage_path).expect("Failed to create storage"));
        let sm =
            Arc::new(FileStateMachine::new(sm_path).await.expect("Failed to create state machine"));

        (storage, sm, temp_dir)
    }

    #[tokio::test]
    async fn test_wait_ready_single_node_success() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        // Start embedded engine (single node)
        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Wait for leader election (should succeed quickly in single-node mode)
        let result = engine.wait_ready(Duration::from_secs(5)).await;

        assert!(
            result.is_ok(),
            "Leader election should succeed in single-node mode"
        );
        let leader_info = result.unwrap();
        assert_eq!(
            leader_info.leader_id, 1,
            "Single node should elect itself as leader"
        );
        assert!(leader_info.term > 0, "Term should be positive");

        // Cleanup
        engine.stop().await.expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_wait_ready_timeout() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // In single-node mode, leader should be elected immediately
        // But if we had a cluster without quorum, this would timeout
        // For this test, we verify timeout mechanism works
        let very_short_timeout = Duration::from_nanos(1);

        // Note: This might still succeed if election happens instantly
        // The test verifies timeout handling exists
        let _ = engine.wait_ready(very_short_timeout).await;

        engine.stop().await.expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_leader_change_notifier_basic() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Subscribe to leader changes
        let mut leader_rx = engine.leader_change_notifier();

        // Wait for leader election
        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // The notifier should have the current leader
        tokio::time::timeout(Duration::from_secs(1), leader_rx.changed())
            .await
            .expect("Should receive leader notification within timeout")
            .expect("Should receive change event");

        let current_leader = *leader_rx.borrow();
        assert!(current_leader.is_some(), "Leader should be elected");

        if let Some(info) = current_leader {
            assert_eq!(info.leader_id, 1);
            assert!(info.term > 0);
        }

        engine.stop().await.expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_ready_and_wait_ready_sequence() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Wait for leader election (wait_ready handles both node init and leader election)
        let start = std::time::Instant::now();
        let leader_info = engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");
        let duration = start.elapsed();

        // Verify timing (single-node should be fast)
        assert!(
            duration < Duration::from_secs(2),
            "Leader election should be fast in single-node"
        );

        // Verify leader info
        assert_eq!(leader_info.leader_id, 1);
        assert!(leader_info.term > 0);

        engine.stop().await.expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_client_available_after_wait_ready() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // Client should be usable
        let client = engine.client();

        // Perform a write operation
        let result = client.put(b"test_key".to_vec(), b"test_value".to_vec()).await;
        assert!(
            result.is_ok(),
            "Put operation should succeed after leader election"
        );

        engine.stop().await.expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_multiple_leader_change_notifier_subscribers() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Create multiple subscribers
        let mut rx1 = engine.leader_change_notifier();
        let mut rx2 = engine.leader_change_notifier();

        // Wait for leader election
        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // Both subscribers should receive notification
        tokio::time::timeout(Duration::from_secs(1), rx1.changed())
            .await
            .expect("Subscriber 1 should receive within timeout")
            .expect("Subscriber 1 should receive change");

        tokio::time::timeout(Duration::from_secs(1), rx2.changed())
            .await
            .expect("Subscriber 2 should receive within timeout")
            .expect("Subscriber 2 should receive change");

        // Both should have same leader info
        let leader1 = *rx1.borrow();
        let leader2 = *rx2.borrow();
        assert_eq!(leader1, leader2, "Both subscribers should see same leader");

        engine.stop().await.expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_engine_stop_cleans_up() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Stop should complete without error
        let stop_result = engine.stop().await;
        assert!(stop_result.is_ok(), "Stop should succeed");
    }

    #[tokio::test]
    async fn test_wait_ready_race_condition_already_elected() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // First call - wait for leader election
        let first_result = engine.wait_ready(Duration::from_secs(5)).await;
        assert!(first_result.is_ok(), "First wait_ready should succeed");
        let first_info = first_result.unwrap();

        // Second call - leader already elected, should return immediately
        let second_start = std::time::Instant::now();
        let second_result = engine.wait_ready(Duration::from_secs(5)).await;
        let second_duration = second_start.elapsed();

        assert!(second_result.is_ok(), "Second wait_ready should succeed");
        let second_info = second_result.unwrap();

        // Should return almost instantly (< 100ms)
        assert!(
            second_duration < Duration::from_millis(100),
            "wait_ready should return immediately when leader already elected, took {second_duration:?}"
        );

        // Should return same leader info
        assert_eq!(first_info.leader_id, second_info.leader_id);
        assert_eq!(first_info.term, second_info.term);

        engine.stop().await.expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_wait_ready_multiple_calls_concurrent() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = Arc::new(
            EmbeddedEngine::start_custom(storage, sm, None)
                .await
                .expect("Failed to start engine"),
        );

        // Wait for initial leader election
        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Initial leader election should succeed");

        // Spawn multiple concurrent wait_ready calls
        let mut handles = vec![];
        for _ in 0..10 {
            let engine_clone = engine.clone();
            let handle = tokio::spawn(async move {
                let start = std::time::Instant::now();
                let result = engine_clone.wait_ready(Duration::from_secs(5)).await;
                let duration = start.elapsed();
                (result, duration)
            });
            handles.push(handle);
        }

        // All should complete successfully and quickly
        for handle in handles {
            let (result, duration) = handle.await.expect("Task should not panic");
            assert!(result.is_ok(), "wait_ready should succeed");
            assert!(
                duration < Duration::from_millis(100),
                "Should return immediately, took {duration:?}"
            );
        }

        Arc::try_unwrap(engine)
            .ok()
            .expect("Arc should have single owner")
            .stop()
            .await
            .expect("Failed to stop engine");
    }

    #[tokio::test]
    async fn test_wait_ready_check_current_value_first() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Wait for leader election
        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // Verify current value is set
        let leader_rx = engine.leader_change_notifier();
        let current_leader = *leader_rx.borrow();
        assert!(current_leader.is_some(), "Current leader should be set");

        // Now call wait_ready again - it should check current value first
        // and return immediately without waiting for changed() event
        let start = std::time::Instant::now();
        let result = engine.wait_ready(Duration::from_secs(5)).await;
        let duration = start.elapsed();

        assert!(result.is_ok(), "Should succeed");
        assert!(
            duration < Duration::from_millis(50),
            "Should check current value first and return immediately, took {duration:?}"
        );

        engine.stop().await.expect("Failed to stop engine");
    }

    /// Tests for configuration validation (start/start_with with various configs)
    #[cfg(feature = "rocksdb")]
    mod config_validation_tests {
        use serial_test::serial;

        use super::*;

        // Tests for start() method with CONFIG_PATH have been moved to embedded_env_test.rs
        // to run sequentially and avoid environment variable race conditions

        #[tokio::test]
        #[cfg(debug_assertions)]
        #[serial(tmp_db)]
        async fn test_start_without_config_path_env_allows_in_debug() {
            // No CONFIG_PATH env var - uses default config with /tmp/db
            unsafe {
                std::env::remove_var("CONFIG_PATH");
            }

            // Clean up /tmp/db before test to avoid corruption from previous runs
            let _ = std::fs::remove_dir_all("/tmp/db");

            let result = EmbeddedEngine::start().await;

            assert!(
                result.is_ok(),
                "start() should allow default /tmp/db in debug mode without CONFIG_PATH. Error: {:?}",
                result.as_ref().err()
            );

            if let Ok(engine) = result {
                engine.stop().await.ok();
            }

            // Clean up after test
            let _ = std::fs::remove_dir_all("/tmp/db");
        }

        #[tokio::test]
        #[cfg(not(debug_assertions))]
        #[serial]
        async fn test_start_without_config_path_env_rejects_in_release() {
            // No CONFIG_PATH env var - should reject default /tmp/db in release
            unsafe {
                std::env::remove_var("CONFIG_PATH");
            }
            let result = EmbeddedEngine::start().await;

            assert!(
                result.is_err(),
                "start() should reject default /tmp/db in release mode without CONFIG_PATH"
            );

            if let Err(e) = result {
                let err_msg = format!("{:?}", e);
                assert!(err_msg.contains("/tmp/db") || err_msg.contains("db_root_dir"));
            }
        }

        // Tests for start_with() method (explicit config path)

        #[tokio::test]
        async fn test_start_with_valid_config() {
            let _temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
            let config_path = _temp_dir.path().join("test_config.toml");
            let data_dir = _temp_dir.path().join("data");

            // Create valid config with custom db_root_dir
            let config_content = format!(
                r#"
[cluster]
node_id = 1
db_root_dir = "{}"

[cluster.rpc]
listen_addr = "127.0.0.1:0"

[raft]
heartbeat_interval_ms = 500
election_timeout_min_ms = 1500
election_timeout_max_ms = 3000
"#,
                data_dir.display()
            );
            std::fs::write(&config_path, config_content).expect("Failed to write config");

            // Should succeed with valid config
            let result = EmbeddedEngine::start_with(config_path.to_str().unwrap()).await;
            assert!(
                result.is_ok(),
                "start_with() should succeed with valid config"
            );

            if let Ok(engine) = result {
                engine.stop().await.ok();
            }
            // _temp_dir stays alive until here
        }

        #[tokio::test]
        async fn test_start_with_nonexistent_config() {
            let result = EmbeddedEngine::start_with("/nonexistent/config.toml").await;

            assert!(
                result.is_err(),
                "start_with() should fail with nonexistent config"
            );
        }

        #[tokio::test]
        #[cfg(debug_assertions)]
        #[serial(tmp_db)]
        async fn test_start_with_tmp_db_allows_in_debug() {
            let _temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
            let config_path = _temp_dir.path().join("test_config.toml");

            // Clean up /tmp/db before test
            let _ = std::fs::remove_dir_all("/tmp/db");

            // Create config with /tmp/db
            let config_content = r#"
[cluster]
node_id = 1
db_root_dir = "/tmp/db"

[cluster.rpc]
listen_addr = "127.0.0.1:0"
"#;
            std::fs::write(&config_path, config_content).expect("Failed to write config");

            // In debug mode, should succeed with warning
            let result = EmbeddedEngine::start_with(config_path.to_str().unwrap()).await;
            assert!(
                result.is_ok(),
                "start_with() should allow /tmp/db in debug mode"
            );

            if let Ok(engine) = result {
                engine.stop().await.ok();
            }

            // Clean up after test
            let _ = std::fs::remove_dir_all("/tmp/db");
        }

        #[tokio::test]
        #[cfg(not(debug_assertions))]
        #[serial]
        async fn test_start_with_tmp_db_rejects_in_release() {
            let _temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
            let config_path = _temp_dir.path().join("test_config.toml");

            // Create config with /tmp/db
            let config_content = r#"
[cluster]
node_id = 1
db_root_dir = "/tmp/db"

[cluster.rpc]
listen_addr = "127.0.0.1:0"
"#;
            std::fs::write(&config_path, config_content).expect("Failed to write config");

            // In release mode, should reject
            let result = EmbeddedEngine::start_with(config_path.to_str().unwrap()).await;
            assert!(
                result.is_err(),
                "start_with() should reject /tmp/db in release mode"
            );

            if let Err(e) = result {
                let err_msg = format!("{:?}", e);
                assert!(err_msg.contains("/tmp/db") || err_msg.contains("db_root_dir"));
            }
        }

        #[tokio::test]
        async fn test_drop_without_stop_warning() {
            let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

            let engine = EmbeddedEngine::start_custom(storage, sm, None)
                .await
                .expect("Failed to start engine");

            // Drop without calling stop() - should trigger warning in Drop impl
            // Note: We can't easily capture the warning log in test, but this
            // verifies the code path doesn't panic
            drop(engine);
        }
    }

    #[cfg(feature = "watch")]
    mod watch_tests {
        use serial_test::serial;

        use super::*;

        #[tokio::test]
        #[serial]
        async fn test_watch_registers_successfully() {
            let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

            let engine = EmbeddedEngine::start_custom(storage, sm, None)
                .await
                .expect("Failed to start engine");

            engine
                .wait_ready(Duration::from_secs(5))
                .await
                .expect("Leader should be elected");

            // Register watcher
            let result = engine.client().watch(b"test_key");
            assert!(
                result.is_ok(),
                "watch() should succeed when feature is enabled"
            );

            if let Ok(mut handle) = result {
                // Give watcher time to register
                tokio::time::sleep(Duration::from_millis(100)).await;

                // Trigger a change
                let client = engine.client();
                client
                    .put(b"test_key".to_vec(), b"value1".to_vec())
                    .await
                    .expect("Put should succeed");

                // Should receive watch event
                let event =
                    tokio::time::timeout(Duration::from_secs(2), handle.receiver_mut().recv())
                        .await
                        .expect("Should receive event within timeout");

                assert!(event.is_some(), "Should receive watch event");
            }

            engine.stop().await.expect("Failed to stop engine");
        }
    }

    #[cfg(feature = "watch")]
    mod watch_tempdir_tests {
        use super::*;

        /// Test to verify engine behavior when underlying storage is removed at runtime.
        ///
        /// When TempDir is dropped, the file system paths become invalid. Raft requires
        /// HardState to be durably persisted before proceeding (protocol correctness).
        /// When File::create() fails on the deleted path, the error propagates as a
        /// fatal I/O error, causing node.run() to exit and closing all channels.
        ///
        /// Expected behavior:
        /// - Engine starts (storage initialized before TempDir drop)
        /// - wait_ready() may succeed or fail depending on timing
        /// - PUT fails with channel-closed error (engine crashed due to storage failure)
        /// - Watch receives no events
        #[tokio::test]
        async fn test_watch_with_tempdir_dropped() {
            let (storage, sm) = {
                let temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
                let storage_path = temp_dir.path().join("storage");
                let sm_path = temp_dir.path().join("sm");

                std::fs::create_dir_all(&storage_path).unwrap();
                std::fs::create_dir_all(&sm_path).unwrap();

                let storage = Arc::new(
                    FileStorageEngine::new(storage_path).expect("Failed to create storage"),
                );
                let sm = Arc::new(
                    FileStateMachine::new(sm_path).await.expect("Failed to create state machine"),
                );

                (storage, sm)
                // temp_dir dropped here — underlying paths are now invalid
            };

            let engine = EmbeddedEngine::start_custom(storage, sm, None)
                .await
                .expect("Failed to start engine");

            // wait_ready may fail due to storage I/O error — that is acceptable
            let _ = engine.wait_ready(Duration::from_secs(5)).await;

            // Register watcher before engine crashes
            let watch_result = engine.client().watch(b"test_key");

            // Give engine time to encounter the fatal I/O error and shut down
            tokio::time::sleep(Duration::from_millis(500)).await;

            // PUT must fail — engine crashed due to storage path being removed,
            // which violates Raft's HardState durability requirement
            let put_result = engine.client().put(b"test_key".to_vec(), b"value1".to_vec()).await;
            assert!(
                put_result.is_err(),
                "PUT should fail after storage path is removed: engine must crash on HardState I/O failure"
            );

            // Watch must not receive any events — engine is down
            if let Ok(mut handle) = watch_result {
                let event_result =
                    tokio::time::timeout(Duration::from_secs(1), handle.receiver_mut().recv())
                        .await;
                assert!(
                    event_result.is_err() || event_result.unwrap().is_none(),
                    "Watch should not receive events after engine crash"
                );
            }

            // stop() may fail — acceptable, engine may already be down
            let _ = engine.stop().await;
        }

        /// Test to verify Watch works when TempDir is kept alive
        #[tokio::test]
        async fn test_watch_with_tempdir_alive() {
            println!("\n=== Testing Watch with TempDir ALIVE ===");

            let _temp_dir = tempfile::tempdir().expect("Failed to create temp dir");
            let storage_path = _temp_dir.path().join("storage");
            let sm_path = _temp_dir.path().join("sm");

            std::fs::create_dir_all(&storage_path).unwrap();
            std::fs::create_dir_all(&sm_path).unwrap();

            let storage =
                Arc::new(FileStorageEngine::new(storage_path).expect("Failed to create storage"));
            let sm = Arc::new(
                FileStateMachine::new(sm_path).await.expect("Failed to create state machine"),
            );

            println!("Created storage and SM, TempDir kept alive");

            let engine = EmbeddedEngine::start_custom(storage, sm, None)
                .await
                .expect("Failed to start engine");

            engine
                .wait_ready(Duration::from_secs(5))
                .await
                .expect("Leader should be elected");

            println!("Engine started and leader elected");

            // Register watcher
            let result = engine.client().watch(b"test_key");
            println!("Watch registration result: {:?}", result.is_ok());
            assert!(result.is_ok(), "watch() should succeed");

            if let Ok(mut handle) = result {
                tokio::time::sleep(Duration::from_millis(100)).await;

                // Trigger a change
                println!("Performing PUT operation...");
                let client = engine.client();
                client
                    .put(b"test_key".to_vec(), b"value1".to_vec())
                    .await
                    .expect("Put should succeed");

                println!("PUT succeeded, waiting for watch event...");

                // Try to receive watch event
                let event_result =
                    tokio::time::timeout(Duration::from_secs(2), handle.receiver_mut().recv())
                        .await;

                match event_result {
                    Ok(Some(_)) => {
                        println!("✅ Watch event RECEIVED (expected!)");
                    }
                    Ok(None) => {
                        println!("❌ Watch channel closed (unexpected)");
                        panic!("Watch channel should not be closed");
                    }
                    Err(_) => {
                        println!("❌ Watch event TIMEOUT (unexpected)");
                        panic!("Watch event should have been received");
                    }
                }
            }

            engine.stop().await.expect("Failed to stop engine");
            // _temp_dir stays alive until here
        }
    }

    // ========================================
    // Cluster State API Tests (Ticket #234)
    // ========================================

    /// Test: Single-node deployment should elect itself as leader
    ///
    /// Setup:
    /// - Start single EmbeddedEngine instance
    /// - Wait for leader election to complete
    ///
    /// Verification:
    /// - `is_leader()` returns true
    /// - `leader_info()` returns Some(LeaderInfo) with leader_id=1
    ///
    /// Business scenario: Most basic embedded deployment (1 node)
    #[tokio::test]
    async fn test_is_leader_single_node() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Wait for leader election
        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // Single node should be leader
        assert!(
            engine.is_leader(),
            "Single node should be the leader after election"
        );

        // Leader info should be available
        let info = engine.leader_info();
        assert!(info.is_some(), "Leader info should be available");
        assert_eq!(info.unwrap().leader_id, 1, "Leader ID should be 1");

        engine.stop().await.expect("Failed to stop engine");
    }

    /// Test: APIs should not panic when called before leader election
    ///
    /// Setup:
    /// - Start EmbeddedEngine
    /// - Call `is_leader()` and `leader_info()` BEFORE wait_ready()
    ///
    /// Verification:
    /// - APIs do not panic when called early
    /// - `is_leader()` and `leader_info()` are consistent (both false/None or both true/Some)
    /// - After wait_ready(), both APIs return leadership state
    ///
    /// Business scenario: Application calls APIs during startup before cluster is ready
    #[tokio::test]
    async fn test_leader_info_before_election() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        // Check immediately after start (before wait_ready)
        // Leader might not be elected yet
        let initial_is_leader = engine.is_leader();
        let initial_info = engine.leader_info();

        // In single-node mode, election is very fast, so this might already be true
        // But we verify the API doesn't panic when called early
        assert!(
            initial_is_leader == initial_info.is_some(),
            "is_leader() and leader_info() should be consistent"
        );

        // Wait for election to complete
        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // Now it must be leader
        assert!(engine.is_leader(), "Should be leader after wait_ready");
        assert!(
            engine.leader_info().is_some(),
            "Leader info must be available after wait_ready"
        );

        engine.stop().await.expect("Failed to stop engine");
    }

    /// Test: Concurrent access to is_leader() and leader_info() is safe
    ///
    /// Setup:
    /// - Start single EmbeddedEngine
    /// - Spawn 10 tokio tasks
    /// - Each task calls is_leader() and leader_info() 1000 times concurrently
    ///
    /// Verification:
    /// - No panics or deadlocks occur
    /// - All tasks complete successfully
    /// - Final state remains consistent (still leader)
    ///
    /// Business scenario: High-traffic application with concurrent health checks
    /// (e.g., HAProxy checking /primary endpoint from multiple load balancers)
    #[tokio::test]
    async fn test_is_leader_concurrent_access() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = Arc::new(
            EmbeddedEngine::start_custom(storage, sm, None)
                .await
                .expect("Failed to start engine"),
        );

        // Wait for leader election
        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // Collect results from concurrent calls
        let results = Arc::new(std::sync::Mutex::new(Vec::new()));

        // Spawn 10 tasks to concurrently call is_leader() and leader_info()
        let mut handles = vec![];
        for _ in 0..10 {
            let engine_clone = Arc::clone(&engine);
            let results_clone = Arc::clone(&results);
            let handle = tokio::spawn(async move {
                for _ in 0..100 {
                    let is_leader = engine_clone.is_leader();
                    let info = engine_clone.leader_info();

                    // Store result for verification
                    results_clone.lock().unwrap().push((is_leader, info));

                    // Verify is_leader() matches leader_info()
                    assert_eq!(
                        is_leader,
                        info.is_some(),
                        "is_leader() and leader_info() should be consistent"
                    );
                }
            });
            handles.push(handle);
        }

        // Wait for all tasks to complete
        for handle in handles {
            handle.await.expect("Task should not panic");
        }

        // Verify all results are consistent
        {
            let results = results.lock().unwrap();
            let first_info = results[0].1;

            for (is_leader, info) in results.iter() {
                assert!(*is_leader, "All calls should see this node as leader");
                assert_eq!(
                    *info, first_info,
                    "All concurrent calls should return same LeaderInfo"
                );
            }
        } // Drop MutexGuard before await

        // Workaround: extract engine from Arc for cleanup
        let engine = Arc::try_unwrap(engine).unwrap_or_else(|arc| {
            panic!(
                "Failed to unwrap Arc, remaining references: {}",
                Arc::strong_count(&arc)
            )
        });
        engine.stop().await.expect("Failed to stop engine");
    }

    /// Test: leader_info() returns consistent results across multiple calls
    ///
    /// Setup:
    /// - Start single EmbeddedEngine
    /// - Wait for leader election
    /// - Call leader_info() multiple times
    ///
    /// Verification:
    /// - All calls return the same LeaderInfo (same leader_id and term)
    /// - No unexpected state changes between calls
    ///
    /// Business scenario: Monitoring dashboard polling cluster state every second
    #[tokio::test]
    async fn test_leader_info_consistency() {
        let (storage, sm, _temp_dir) = create_test_storage_and_sm().await;

        let engine = EmbeddedEngine::start_custom(storage, sm, None)
            .await
            .expect("Failed to start engine");

        engine
            .wait_ready(Duration::from_secs(5))
            .await
            .expect("Leader should be elected");

        // Call leader_info() multiple times, should return same result
        let info1 = engine.leader_info();
        let info2 = engine.leader_info();
        let info3 = engine.leader_info();

        assert_eq!(info1, info2, "leader_info() should be consistent");
        assert_eq!(info2, info3, "leader_info() should be consistent");

        // All should indicate same leader
        if let Some(info) = info1 {
            assert_eq!(info.leader_id, 1);
            assert!(info.term > 0);
        } else {
            panic!("Leader info should be available");
        }

        engine.stop().await.expect("Failed to stop engine");
    }
}