1use std::collections::HashMap;
21use std::time::Duration;
22
23use dashmap::DashMap;
24use serde::{Deserialize, Serialize};
25use tracing::debug;
26
27use async_trait::async_trait;
28
29use crate::health::HealthStatus;
30use crate::service::{ServiceType, SystemService};
31
32#[derive(Debug, Clone, Serialize, Deserialize)]
34pub struct ContainerConfig {
35 #[serde(default = "default_docker_socket")]
38 pub docker_socket: String,
39
40 #[serde(default = "default_network_name")]
43 pub network_name: String,
44
45 #[serde(default)]
47 pub default_restart_policy: RestartPolicy,
48
49 #[serde(default = "default_health_check_interval")]
51 pub health_check_interval_secs: u64,
52}
53
54fn default_docker_socket() -> String {
55 "unix:///var/run/docker.sock".into()
56}
57
58fn default_network_name() -> String {
59 "weftos".into()
60}
61
62fn default_health_check_interval() -> u64 {
63 30
64}
65
66impl Default for ContainerConfig {
67 fn default() -> Self {
68 Self {
69 docker_socket: default_docker_socket(),
70 network_name: default_network_name(),
71 default_restart_policy: RestartPolicy::default(),
72 health_check_interval_secs: default_health_check_interval(),
73 }
74 }
75}
76
77impl ContainerConfig {
78 pub fn health_check_interval(&self) -> Duration {
80 Duration::from_secs(self.health_check_interval_secs)
81 }
82}
83
84#[non_exhaustive]
86#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
87pub enum ContainerState {
88 Pulling,
90 Creating,
92 Running,
94 Stopping,
96 Stopped,
98 Failed(String),
100}
101
102impl std::fmt::Display for ContainerState {
103 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
104 match self {
105 ContainerState::Pulling => write!(f, "pulling"),
106 ContainerState::Creating => write!(f, "creating"),
107 ContainerState::Running => write!(f, "running"),
108 ContainerState::Stopping => write!(f, "stopping"),
109 ContainerState::Stopped => write!(f, "stopped"),
110 ContainerState::Failed(reason) => write!(f, "failed: {reason}"),
111 }
112 }
113}
114
115#[derive(Debug, Clone, Serialize, Deserialize)]
117pub struct PortMapping {
118 pub host_port: u16,
120 pub container_port: u16,
122 #[serde(default = "default_protocol")]
124 pub protocol: String,
125}
126
127fn default_protocol() -> String {
128 "tcp".into()
129}
130
131#[derive(Debug, Clone, Serialize, Deserialize)]
133pub struct VolumeMount {
134 pub host_path: String,
136 pub container_path: String,
138 #[serde(default)]
140 pub read_only: bool,
141}
142
143#[non_exhaustive]
145#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
146pub enum RestartPolicy {
147 #[default]
149 Never,
150 OnFailure {
152 max_retries: u32,
154 },
155 Always,
157}
158
159#[derive(Debug, Clone, Serialize, Deserialize)]
161pub struct ManagedContainer {
162 pub name: String,
164
165 pub image: String,
167
168 #[serde(default, skip_serializing_if = "Option::is_none")]
170 pub container_id: Option<String>,
171
172 #[serde(default = "default_container_state")]
174 pub state: ContainerState,
175
176 #[serde(default)]
178 pub ports: Vec<PortMapping>,
179
180 #[serde(default)]
182 pub env: HashMap<String, String>,
183
184 #[serde(default)]
186 pub volumes: Vec<VolumeMount>,
187
188 #[serde(default, skip_serializing_if = "Option::is_none")]
190 pub health_endpoint: Option<String>,
191
192 #[serde(default, skip_serializing_if = "Option::is_none")]
194 pub restart_policy: Option<RestartPolicy>,
195}
196
197fn default_container_state() -> ContainerState {
198 ContainerState::Stopped
199}
200
201#[derive(Debug, Clone, Serialize, Deserialize)]
203pub struct ContainerHealth {
204 pub container_id: String,
206 pub status: ContainerState,
208 pub healthy: bool,
210 pub message: Option<String>,
212}
213
214#[non_exhaustive]
216#[derive(Debug, thiserror::Error)]
217pub enum ContainerError {
218 #[error("Docker not available: {0}")]
220 DockerNotAvailable(String),
221
222 #[error("image pull failed for '{image}': {reason}")]
224 ImagePullFailed {
225 image: String,
227 reason: String,
229 },
230
231 #[error("container creation failed for '{name}': {reason}")]
233 CreateFailed {
234 name: String,
236 reason: String,
238 },
239
240 #[error("container start failed for '{name}': {reason}")]
242 StartFailed {
243 name: String,
245 reason: String,
247 },
248
249 #[error("port conflict: host port {port} already in use")]
251 PortConflict {
252 port: u16,
254 },
255
256 #[error("container not found: '{name}'")]
258 ContainerNotFound {
259 name: String,
261 },
262
263 #[error("health check failed for '{name}': {reason}")]
265 HealthCheckFailed {
266 name: String,
268 reason: String,
270 },
271
272 #[error("invalid config: {0}")]
274 InvalidConfig(String),
275}
276
277pub struct ContainerManager {
283 config: ContainerConfig,
284 managed: DashMap<String, ManagedContainer>,
285}
286
287impl ContainerManager {
288 pub fn new(config: ContainerConfig) -> Self {
293 Self {
294 config,
295 managed: DashMap::new(),
296 }
297 }
298
299 pub fn config(&self) -> &ContainerConfig {
301 &self.config
302 }
303
304 pub fn configure(&self, spec: ManagedContainer) -> Result<String, ContainerError> {
315 if spec.image.trim().is_empty() {
317 return Err(ContainerError::InvalidConfig(
318 "image name must not be empty".into(),
319 ));
320 }
321 if spec.name.trim().is_empty() {
323 return Err(ContainerError::InvalidConfig(
324 "container name must not be empty".into(),
325 ));
326 }
327 for pm in &spec.ports {
329 if pm.host_port == 0 {
330 return Err(ContainerError::InvalidConfig(
331 "host port must be > 0".into(),
332 ));
333 }
334 if pm.container_port == 0 {
335 return Err(ContainerError::InvalidConfig(
336 "container port must be > 0".into(),
337 ));
338 }
339 }
340 let name = spec.name.clone();
341 debug!(name = %spec.name, image = %spec.image, "configuring container");
342 self.managed.insert(spec.name.clone(), spec);
343 Ok(name)
344 }
345
346 pub fn register(&self, spec: ManagedContainer) {
351 debug!(name = %spec.name, image = %spec.image, "registering container");
352 self.managed.insert(spec.name.clone(), spec);
353 }
354
355 pub fn start_container(&self, name: &str) -> Result<(), ContainerError> {
369 let mut entry =
370 self.managed
371 .get_mut(name)
372 .ok_or_else(|| ContainerError::ContainerNotFound {
373 name: name.to_owned(),
374 })?;
375
376 match &entry.state {
377 ContainerState::Stopped | ContainerState::Creating | ContainerState::Failed(_) => {
378 debug!(name, "starting container (simulated)");
379 entry.state = ContainerState::Running;
381 if entry.container_id.is_none() {
383 use std::collections::hash_map::DefaultHasher;
384 use std::hash::{Hash, Hasher};
385 let mut h = DefaultHasher::new();
386 name.hash(&mut h);
387 entry.container_id = Some(format!("sim-{:08x}", h.finish() as u32));
388 }
389 Ok(())
390 }
391 ContainerState::Running => {
392 Ok(())
394 }
395 other => Err(ContainerError::StartFailed {
396 name: name.to_owned(),
397 reason: format!("cannot start from state: {other}"),
398 }),
399 }
400 }
401
402 pub fn stop_container(&self, name: &str) -> Result<(), ContainerError> {
406 let mut entry =
407 self.managed
408 .get_mut(name)
409 .ok_or_else(|| ContainerError::ContainerNotFound {
410 name: name.to_owned(),
411 })?;
412
413 debug!(name, "stopping container");
414 entry.state = ContainerState::Stopped;
415 Ok(())
416 }
417
418 pub fn container_state(&self, name: &str) -> Option<ContainerState> {
420 self.managed.get(name).map(|e| e.state.clone())
421 }
422
423 pub fn list_containers(&self) -> Vec<(String, ContainerState)> {
425 self.managed
426 .iter()
427 .map(|e| (e.key().clone(), e.value().state.clone()))
428 .collect()
429 }
430
431 pub fn health_check(&self, name: &str) -> Result<HealthStatus, ContainerError> {
433 let entry = self
434 .managed
435 .get(name)
436 .ok_or_else(|| ContainerError::ContainerNotFound {
437 name: name.to_owned(),
438 })?;
439
440 match &entry.state {
441 ContainerState::Running => Ok(HealthStatus::Healthy),
442 ContainerState::Stopped => Ok(HealthStatus::Unhealthy("stopped".into())),
443 ContainerState::Failed(reason) => {
444 Ok(HealthStatus::Unhealthy(format!("failed: {reason}")))
445 }
446 other => Ok(HealthStatus::Degraded(format!("state: {other}"))),
447 }
448 }
449
450 pub fn container_health(&self, name: &str) -> Result<ContainerHealth, ContainerError> {
452 let entry = self
453 .managed
454 .get(name)
455 .ok_or_else(|| ContainerError::ContainerNotFound {
456 name: name.to_owned(),
457 })?;
458
459 let (healthy, message) = match &entry.state {
460 ContainerState::Running => (true, None),
461 ContainerState::Stopped => (false, Some("container is stopped".into())),
462 ContainerState::Failed(reason) => (false, Some(format!("failed: {reason}"))),
463 other => (false, Some(format!("transitional state: {other}"))),
464 };
465
466 Ok(ContainerHealth {
467 container_id: entry.name.clone(),
468 status: entry.state.clone(),
469 healthy,
470 message,
471 })
472 }
473
474 pub fn stop_all(&self) {
476 for mut entry in self.managed.iter_mut() {
477 if matches!(entry.state, ContainerState::Running) {
478 debug!(name = %entry.key(), "stopping container");
479 entry.state = ContainerState::Stopped;
480 }
481 }
482 }
483
484 pub fn len(&self) -> usize {
486 self.managed.len()
487 }
488
489 pub fn is_empty(&self) -> bool {
491 self.managed.is_empty()
492 }
493}
494
495pub struct ContainerService {
502 manager: std::sync::Arc<ContainerManager>,
503}
504
505impl ContainerService {
506 pub fn new(manager: std::sync::Arc<ContainerManager>) -> Self {
508 Self { manager }
509 }
510
511 pub fn manager(&self) -> &std::sync::Arc<ContainerManager> {
513 &self.manager
514 }
515}
516
517#[async_trait]
518impl SystemService for ContainerService {
519 fn name(&self) -> &str {
520 "containers"
521 }
522
523 fn service_type(&self) -> ServiceType {
524 ServiceType::Custom("containers".into())
525 }
526
527 async fn start(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
528 debug!("container service starting ({} managed)", self.manager.len());
529 Ok(())
530 }
531
532 async fn stop(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
533 debug!("container service stopping — stopping all containers");
534 self.manager.stop_all();
535 Ok(())
536 }
537
538 async fn health_check(&self) -> HealthStatus {
539 let containers = self.manager.list_containers();
540 if containers.is_empty() {
541 return HealthStatus::Healthy;
542 }
543 let mut unhealthy = Vec::new();
544 for (name, state) in &containers {
545 if !matches!(state, ContainerState::Running) {
546 unhealthy.push(format!("{name}: {state}"));
547 }
548 }
549 if unhealthy.is_empty() {
550 HealthStatus::Healthy
551 } else if unhealthy.len() == containers.len() {
552 HealthStatus::Unhealthy(format!("all containers down: {}", unhealthy.join(", ")))
553 } else {
554 HealthStatus::Degraded(format!(
555 "{}/{} unhealthy: {}",
556 unhealthy.len(),
557 containers.len(),
558 unhealthy.join(", ")
559 ))
560 }
561 }
562}
563
564#[cfg(test)]
565mod tests {
566 use super::*;
567
568 #[test]
569 fn default_config() {
570 let config = ContainerConfig::default();
571 assert!(config.docker_socket.contains("docker.sock"));
572 assert_eq!(config.network_name, "weftos");
573 assert_eq!(config.default_restart_policy, RestartPolicy::Never);
574 assert_eq!(config.health_check_interval_secs, 30);
575 }
576
577 #[test]
578 fn config_serde_roundtrip() {
579 let config = ContainerConfig {
580 docker_socket: "tcp://localhost:2375".into(),
581 network_name: "custom-net".into(),
582 default_restart_policy: RestartPolicy::Always,
583 health_check_interval_secs: 10,
584 };
585 let json = serde_json::to_string(&config).unwrap();
586 let restored: ContainerConfig = serde_json::from_str(&json).unwrap();
587 assert_eq!(restored.network_name, "custom-net");
588 assert_eq!(restored.default_restart_policy, RestartPolicy::Always);
589 }
590
591 #[test]
592 fn health_check_interval_duration() {
593 let config = ContainerConfig {
594 health_check_interval_secs: 15,
595 ..Default::default()
596 };
597 assert_eq!(config.health_check_interval(), Duration::from_secs(15));
598 }
599
600 #[test]
601 fn container_state_display() {
602 assert_eq!(ContainerState::Pulling.to_string(), "pulling");
603 assert_eq!(ContainerState::Running.to_string(), "running");
604 assert_eq!(ContainerState::Stopped.to_string(), "stopped");
605 assert_eq!(
606 ContainerState::Failed("oom".into()).to_string(),
607 "failed: oom"
608 );
609 }
610
611 #[test]
612 fn register_and_list() {
613 let manager = ContainerManager::new(ContainerConfig::default());
614 manager.register(ManagedContainer {
615 name: "redis".into(),
616 image: "redis:7-alpine".into(),
617 container_id: None,
618 state: ContainerState::Stopped,
619 ports: vec![PortMapping {
620 host_port: 6379,
621 container_port: 6379,
622 protocol: "tcp".into(),
623 }],
624 env: HashMap::new(),
625 volumes: Vec::new(),
626 health_endpoint: None,
627 restart_policy: None,
628 });
629
630 let containers = manager.list_containers();
631 assert_eq!(containers.len(), 1);
632 assert_eq!(containers[0].0, "redis");
633 }
634
635 #[test]
636 fn stop_container() {
637 let manager = ContainerManager::new(ContainerConfig::default());
638 manager.register(ManagedContainer {
639 name: "redis".into(),
640 image: "redis:7-alpine".into(),
641 container_id: None,
642 state: ContainerState::Running,
643 ports: Vec::new(),
644 env: HashMap::new(),
645 volumes: Vec::new(),
646 health_endpoint: None,
647 restart_policy: None,
648 });
649
650 manager.stop_container("redis").unwrap();
651 assert_eq!(
652 manager.container_state("redis"),
653 Some(ContainerState::Stopped)
654 );
655 }
656
657 #[test]
658 fn stop_nonexistent_fails() {
659 let manager = ContainerManager::new(ContainerConfig::default());
660 let result = manager.stop_container("nonexistent");
661 assert!(matches!(
662 result,
663 Err(ContainerError::ContainerNotFound { .. })
664 ));
665 }
666
667 #[test]
668 fn health_check_running() {
669 let manager = ContainerManager::new(ContainerConfig::default());
670 manager.register(ManagedContainer {
671 name: "redis".into(),
672 image: "redis:7-alpine".into(),
673 container_id: None,
674 state: ContainerState::Running,
675 ports: Vec::new(),
676 env: HashMap::new(),
677 volumes: Vec::new(),
678 health_endpoint: None,
679 restart_policy: None,
680 });
681
682 let health = manager.health_check("redis").unwrap();
683 assert!(matches!(health, HealthStatus::Healthy));
684 }
685
686 #[test]
687 fn health_check_stopped() {
688 let manager = ContainerManager::new(ContainerConfig::default());
689 manager.register(ManagedContainer {
690 name: "redis".into(),
691 image: "redis:7-alpine".into(),
692 container_id: None,
693 state: ContainerState::Stopped,
694 ports: Vec::new(),
695 env: HashMap::new(),
696 volumes: Vec::new(),
697 health_endpoint: None,
698 restart_policy: None,
699 });
700
701 let health = manager.health_check("redis").unwrap();
702 assert!(matches!(health, HealthStatus::Unhealthy(_)));
703 }
704
705 #[test]
706 fn health_check_nonexistent() {
707 let manager = ContainerManager::new(ContainerConfig::default());
708 assert!(manager.health_check("nope").is_err());
709 }
710
711 #[test]
712 fn stop_all() {
713 let manager = ContainerManager::new(ContainerConfig::default());
714 for name in &["redis", "postgres", "memcached"] {
715 manager.register(ManagedContainer {
716 name: (*name).into(),
717 image: format!("{name}:latest"),
718 container_id: None,
719 state: ContainerState::Running,
720 ports: Vec::new(),
721 env: HashMap::new(),
722 volumes: Vec::new(),
723 health_endpoint: None,
724 restart_policy: None,
725 });
726 }
727
728 manager.stop_all();
729
730 for (_, state) in manager.list_containers() {
731 assert_eq!(state, ContainerState::Stopped);
732 }
733 }
734
735 #[test]
736 fn start_container_transitions_to_running() {
737 let manager = ContainerManager::new(ContainerConfig::default());
738 manager.register(ManagedContainer {
739 name: "redis".into(),
740 image: "redis:7-alpine".into(),
741 container_id: None,
742 state: ContainerState::Stopped,
743 ports: Vec::new(),
744 env: HashMap::new(),
745 volumes: Vec::new(),
746 health_endpoint: None,
747 restart_policy: None,
748 });
749
750 manager.start_container("redis").unwrap();
751 assert_eq!(
752 manager.container_state("redis"),
753 Some(ContainerState::Running)
754 );
755 }
756
757 #[test]
758 fn start_container_assigns_id() {
759 let manager = ContainerManager::new(ContainerConfig::default());
760 manager.register(ManagedContainer {
761 name: "pg".into(),
762 image: "postgres:16".into(),
763 container_id: None,
764 state: ContainerState::Stopped,
765 ports: Vec::new(),
766 env: HashMap::new(),
767 volumes: Vec::new(),
768 health_endpoint: None,
769 restart_policy: None,
770 });
771
772 manager.start_container("pg").unwrap();
773 let entry = manager.managed.get("pg").unwrap();
774 assert!(entry.container_id.is_some());
775 assert!(entry.container_id.as_ref().unwrap().starts_with("sim-"));
776 }
777
778 #[test]
779 fn start_already_running_is_idempotent() {
780 let manager = ContainerManager::new(ContainerConfig::default());
781 manager.register(ManagedContainer {
782 name: "redis".into(),
783 image: "redis:7-alpine".into(),
784 container_id: None,
785 state: ContainerState::Running,
786 ports: Vec::new(),
787 env: HashMap::new(),
788 volumes: Vec::new(),
789 health_endpoint: None,
790 restart_policy: None,
791 });
792
793 manager.start_container("redis").unwrap();
795 assert_eq!(
796 manager.container_state("redis"),
797 Some(ContainerState::Running)
798 );
799 }
800
801 #[test]
802 fn managed_container_serde_roundtrip() {
803 let container = ManagedContainer {
804 name: "redis".into(),
805 image: "redis:7-alpine".into(),
806 container_id: Some("abc123".into()),
807 state: ContainerState::Running,
808 ports: vec![PortMapping {
809 host_port: 6379,
810 container_port: 6379,
811 protocol: "tcp".into(),
812 }],
813 env: HashMap::from([("REDIS_PASSWORD".into(), "secret".into())]),
814 volumes: vec![VolumeMount {
815 host_path: "/data".into(),
816 container_path: "/var/lib/redis".into(),
817 read_only: false,
818 }],
819 health_endpoint: Some("http://localhost:6379/ping".into()),
820 restart_policy: Some(RestartPolicy::OnFailure { max_retries: 3 }),
821 };
822
823 let json = serde_json::to_string(&container).unwrap();
824 let restored: ManagedContainer = serde_json::from_str(&json).unwrap();
825 assert_eq!(restored.name, "redis");
826 assert_eq!(restored.ports.len(), 1);
827 assert_eq!(restored.volumes.len(), 1);
828 assert!(!restored.volumes[0].read_only);
829 }
830
831 #[test]
832 fn container_error_display() {
833 let err = ContainerError::DockerNotAvailable("not installed".into());
834 assert!(err.to_string().contains("Docker"));
835
836 let err = ContainerError::ContainerNotFound {
837 name: "redis".into(),
838 };
839 assert!(err.to_string().contains("redis"));
840
841 let err = ContainerError::PortConflict { port: 8080 };
842 assert!(err.to_string().contains("8080"));
843 }
844
845 #[test]
846 fn restart_policy_serde() {
847 let policies = vec![
848 RestartPolicy::Never,
849 RestartPolicy::OnFailure { max_retries: 5 },
850 RestartPolicy::Always,
851 ];
852 for policy in policies {
853 let json = serde_json::to_string(&policy).unwrap();
854 let restored: RestartPolicy = serde_json::from_str(&json).unwrap();
855 assert_eq!(restored, policy);
856 }
857 }
858
859 #[test]
862 fn container_service_implements_system_service() {
863 let mgr = std::sync::Arc::new(ContainerManager::new(ContainerConfig::default()));
864 let svc = ContainerService::new(mgr);
865 assert_eq!(svc.name(), "containers");
866 assert_eq!(svc.service_type(), ServiceType::Custom("containers".into()));
867 }
868
869 #[tokio::test]
870 async fn container_service_health_empty_is_healthy() {
871 let mgr = std::sync::Arc::new(ContainerManager::new(ContainerConfig::default()));
872 let svc = ContainerService::new(mgr);
873 let health = svc.health_check().await;
874 assert!(matches!(health, HealthStatus::Healthy));
875 }
876
877 #[tokio::test]
878 async fn container_service_health_propagates() {
879 let mgr = std::sync::Arc::new(ContainerManager::new(ContainerConfig::default()));
880 mgr.register(ManagedContainer {
881 name: "redis".into(),
882 image: "redis:7-alpine".into(),
883 container_id: None,
884 state: ContainerState::Running,
885 ports: Vec::new(),
886 env: HashMap::new(),
887 volumes: Vec::new(),
888 health_endpoint: None,
889 restart_policy: None,
890 });
891 mgr.register(ManagedContainer {
892 name: "pg".into(),
893 image: "postgres:16".into(),
894 container_id: None,
895 state: ContainerState::Stopped,
896 ports: Vec::new(),
897 env: HashMap::new(),
898 volumes: Vec::new(),
899 health_endpoint: None,
900 restart_policy: None,
901 });
902 let svc = ContainerService::new(mgr);
903 let health = svc.health_check().await;
904 assert!(matches!(health, HealthStatus::Degraded(_)));
906 }
907
908 #[tokio::test]
909 async fn container_service_stop_halts_all() {
910 let mgr = std::sync::Arc::new(ContainerManager::new(ContainerConfig::default()));
911 mgr.register(ManagedContainer {
912 name: "redis".into(),
913 image: "redis:7-alpine".into(),
914 container_id: None,
915 state: ContainerState::Running,
916 ports: Vec::new(),
917 env: HashMap::new(),
918 volumes: Vec::new(),
919 health_endpoint: None,
920 restart_policy: None,
921 });
922 let svc = ContainerService::new(mgr.clone());
923 svc.stop().await.unwrap();
924 assert_eq!(
925 mgr.container_state("redis"),
926 Some(ContainerState::Stopped)
927 );
928 }
929
930 #[test]
933 fn container_config_validates() {
934 let manager = ContainerManager::new(ContainerConfig::default());
935 let spec = ManagedContainer {
936 name: "alpine-test".into(),
937 image: "alpine:latest".into(),
938 container_id: None,
939 state: ContainerState::Stopped,
940 ports: vec![PortMapping {
941 host_port: 8080,
942 container_port: 80,
943 protocol: "tcp".into(),
944 }],
945 env: HashMap::new(),
946 volumes: Vec::new(),
947 health_endpoint: None,
948 restart_policy: None,
949 };
950 let id = manager.configure(spec).unwrap();
951 assert_eq!(id, "alpine-test");
952 assert_eq!(
954 manager.container_state("alpine-test"),
955 Some(ContainerState::Stopped)
956 );
957 }
958
959 #[test]
960 fn container_invalid_config_empty_image_rejected() {
961 let manager = ContainerManager::new(ContainerConfig::default());
962 let spec = ManagedContainer {
963 name: "bad".into(),
964 image: "".into(),
965 container_id: None,
966 state: ContainerState::Stopped,
967 ports: Vec::new(),
968 env: HashMap::new(),
969 volumes: Vec::new(),
970 health_endpoint: None,
971 restart_policy: None,
972 };
973 let result = manager.configure(spec);
974 assert!(matches!(result, Err(ContainerError::InvalidConfig(_))));
975 }
976
977 #[test]
978 fn container_invalid_config_empty_name_rejected() {
979 let manager = ContainerManager::new(ContainerConfig::default());
980 let spec = ManagedContainer {
981 name: "".into(),
982 image: "alpine:latest".into(),
983 container_id: None,
984 state: ContainerState::Stopped,
985 ports: Vec::new(),
986 env: HashMap::new(),
987 volumes: Vec::new(),
988 health_endpoint: None,
989 restart_policy: None,
990 };
991 let result = manager.configure(spec);
992 assert!(matches!(result, Err(ContainerError::InvalidConfig(_))));
993 }
994
995 #[test]
996 fn container_invalid_config_zero_port_rejected() {
997 let manager = ContainerManager::new(ContainerConfig::default());
998 let spec = ManagedContainer {
999 name: "bad-port".into(),
1000 image: "alpine:latest".into(),
1001 container_id: None,
1002 state: ContainerState::Stopped,
1003 ports: vec![PortMapping {
1004 host_port: 0,
1005 container_port: 80,
1006 protocol: "tcp".into(),
1007 }],
1008 env: HashMap::new(),
1009 volumes: Vec::new(),
1010 health_endpoint: None,
1011 restart_policy: None,
1012 };
1013 let result = manager.configure(spec);
1014 assert!(matches!(result, Err(ContainerError::InvalidConfig(_))));
1015 }
1016
1017 #[test]
1018 fn container_lifecycle_configure_start_stop() {
1019 let manager = ContainerManager::new(ContainerConfig::default());
1020
1021 let spec = ManagedContainer {
1023 name: "lifecycle-test".into(),
1024 image: "redis:7-alpine".into(),
1025 container_id: None,
1026 state: ContainerState::Stopped,
1027 ports: Vec::new(),
1028 env: HashMap::new(),
1029 volumes: Vec::new(),
1030 health_endpoint: None,
1031 restart_policy: None,
1032 };
1033 let name = manager.configure(spec).unwrap();
1034
1035 manager.start_container(&name).unwrap();
1037 assert_eq!(
1038 manager.container_state(&name),
1039 Some(ContainerState::Running)
1040 );
1041
1042 let health = manager.health_check(&name).unwrap();
1044 assert_eq!(health, HealthStatus::Healthy);
1045
1046 manager.stop_container(&name).unwrap();
1048 assert_eq!(
1049 manager.container_state(&name),
1050 Some(ContainerState::Stopped)
1051 );
1052
1053 let health = manager.health_check(&name).unwrap();
1055 assert!(matches!(health, HealthStatus::Unhealthy(_)));
1056 }
1057
1058 #[test]
1059 fn container_health_report_detail() {
1060 let manager = ContainerManager::new(ContainerConfig::default());
1061 manager.register(ManagedContainer {
1062 name: "detail".into(),
1063 image: "alpine:latest".into(),
1064 container_id: None,
1065 state: ContainerState::Running,
1066 ports: Vec::new(),
1067 env: HashMap::new(),
1068 volumes: Vec::new(),
1069 health_endpoint: None,
1070 restart_policy: None,
1071 });
1072
1073 let report = manager.container_health("detail").unwrap();
1074 assert!(report.healthy);
1075 assert_eq!(report.status, ContainerState::Running);
1076 assert!(report.message.is_none());
1077
1078 manager.stop_container("detail").unwrap();
1080 let report = manager.container_health("detail").unwrap();
1081 assert!(!report.healthy);
1082 assert_eq!(report.status, ContainerState::Stopped);
1083 assert!(report.message.is_some());
1084 }
1085
1086 #[tokio::test]
1087 async fn container_health_propagates_to_kernel_health_system() {
1088 use crate::health::HealthSystem;
1089 use crate::service::ServiceRegistry;
1090
1091 let mgr = std::sync::Arc::new(ContainerManager::new(ContainerConfig::default()));
1092
1093 let spec = ManagedContainer {
1095 name: "redis".into(),
1096 image: "redis:7-alpine".into(),
1097 container_id: None,
1098 state: ContainerState::Stopped,
1099 ports: Vec::new(),
1100 env: HashMap::new(),
1101 volumes: Vec::new(),
1102 health_endpoint: None,
1103 restart_policy: None,
1104 };
1105 mgr.configure(spec).unwrap();
1106 mgr.start_container("redis").unwrap();
1107
1108 let svc = std::sync::Arc::new(ContainerService::new(mgr.clone()));
1110 let registry = std::sync::Arc::new(ServiceRegistry::new());
1111 registry.register(svc).unwrap();
1112
1113 let hs = HealthSystem::new(30);
1115 let (overall, results) = hs.aggregate(®istry).await;
1116 assert!(
1117 matches!(overall, crate::health::OverallHealth::Healthy),
1118 "expected Healthy, got {overall:?}"
1119 );
1120 assert_eq!(results.len(), 1);
1121 assert_eq!(results[0].0, "containers");
1122 assert_eq!(results[0].1, HealthStatus::Healthy);
1123
1124 mgr.stop_container("redis").unwrap();
1126 let (overall, _) = hs.aggregate(®istry).await;
1127 assert!(
1128 matches!(overall, crate::health::OverallHealth::Down),
1129 "expected Down after stopping all containers, got {overall:?}"
1130 );
1131 }
1132
1133 #[test]
1136 fn container_state_serde_roundtrip_all_variants() {
1137 let variants = vec![
1138 ContainerState::Pulling,
1139 ContainerState::Creating,
1140 ContainerState::Running,
1141 ContainerState::Stopping,
1142 ContainerState::Stopped,
1143 ContainerState::Failed("oom killed".into()),
1144 ];
1145 for state in variants {
1146 let json = serde_json::to_string(&state).unwrap();
1147 let restored: ContainerState = serde_json::from_str(&json).unwrap();
1148 assert_eq!(restored, state);
1149 }
1150 }
1151
1152 #[test]
1153 fn port_mapping_serde_roundtrip() {
1154 let pm = PortMapping {
1155 host_port: 8080,
1156 container_port: 80,
1157 protocol: "tcp".into(),
1158 };
1159 let json = serde_json::to_string(&pm).unwrap();
1160 let restored: PortMapping = serde_json::from_str(&json).unwrap();
1161 assert_eq!(restored.host_port, 8080);
1162 assert_eq!(restored.container_port, 80);
1163 assert_eq!(restored.protocol, "tcp");
1164 }
1165
1166 #[test]
1167 fn port_mapping_default_protocol() {
1168 let json = r#"{"host_port": 3000, "container_port": 3000}"#;
1169 let pm: PortMapping = serde_json::from_str(json).unwrap();
1170 assert_eq!(pm.protocol, "tcp");
1171 }
1172
1173 #[test]
1174 fn volume_mount_serde_roundtrip() {
1175 let vm = VolumeMount {
1176 host_path: "/data".into(),
1177 container_path: "/var/data".into(),
1178 read_only: true,
1179 };
1180 let json = serde_json::to_string(&vm).unwrap();
1181 let restored: VolumeMount = serde_json::from_str(&json).unwrap();
1182 assert_eq!(restored.host_path, "/data");
1183 assert_eq!(restored.container_path, "/var/data");
1184 assert!(restored.read_only);
1185 }
1186
1187 #[test]
1188 fn volume_mount_default_read_only() {
1189 let json = r#"{"host_path": "/a", "container_path": "/b"}"#;
1190 let vm: VolumeMount = serde_json::from_str(json).unwrap();
1191 assert!(!vm.read_only);
1192 }
1193
1194 #[test]
1195 fn restart_policy_serde_roundtrip_all_variants() {
1196 let variants = vec![
1197 RestartPolicy::Never,
1198 RestartPolicy::OnFailure { max_retries: 5 },
1199 RestartPolicy::Always,
1200 ];
1201 for policy in variants {
1202 let json = serde_json::to_string(&policy).unwrap();
1203 let restored: RestartPolicy = serde_json::from_str(&json).unwrap();
1204 assert_eq!(restored, policy);
1205 }
1206 }
1207
1208 #[test]
1209 fn restart_policy_default_is_never() {
1210 assert_eq!(RestartPolicy::default(), RestartPolicy::Never);
1211 }
1212
1213 #[test]
1214 fn container_health_serde_roundtrip() {
1215 let health = ContainerHealth {
1216 container_id: "redis-1".into(),
1217 status: ContainerState::Running,
1218 healthy: true,
1219 message: None,
1220 };
1221 let json = serde_json::to_string(&health).unwrap();
1222 let restored: ContainerHealth = serde_json::from_str(&json).unwrap();
1223 assert_eq!(restored.container_id, "redis-1");
1224 assert!(restored.healthy);
1225 assert!(restored.message.is_none());
1226 }
1227
1228 #[test]
1229 fn container_health_with_message_roundtrip() {
1230 let health = ContainerHealth {
1231 container_id: "pg-1".into(),
1232 status: ContainerState::Failed("timeout".into()),
1233 healthy: false,
1234 message: Some("health check failed after 30s".into()),
1235 };
1236 let json = serde_json::to_string(&health).unwrap();
1237 let restored: ContainerHealth = serde_json::from_str(&json).unwrap();
1238 assert!(!restored.healthy);
1239 assert_eq!(restored.message.unwrap(), "health check failed after 30s");
1240 }
1241
1242 #[test]
1243 fn container_state_display_all_variants() {
1244 assert_eq!(ContainerState::Pulling.to_string(), "pulling");
1245 assert_eq!(ContainerState::Creating.to_string(), "creating");
1246 assert_eq!(ContainerState::Running.to_string(), "running");
1247 assert_eq!(ContainerState::Stopping.to_string(), "stopping");
1248 assert_eq!(ContainerState::Stopped.to_string(), "stopped");
1249 assert_eq!(
1250 ContainerState::Failed("oom".into()).to_string(),
1251 "failed: oom"
1252 );
1253 }
1254
1255 #[test]
1256 fn container_config_health_check_interval() {
1257 let cfg = ContainerConfig {
1258 health_check_interval_secs: 10,
1259 ..Default::default()
1260 };
1261 assert_eq!(cfg.health_check_interval(), Duration::from_secs(10));
1262 }
1263
1264 #[test]
1265 fn container_config_defaults_populated() {
1266 let cfg = ContainerConfig::default();
1267 assert_eq!(cfg.docker_socket, "unix:///var/run/docker.sock");
1268 assert_eq!(cfg.network_name, "weftos");
1269 assert_eq!(cfg.default_restart_policy, RestartPolicy::Never);
1270 assert_eq!(cfg.health_check_interval_secs, 30);
1271 }
1272
1273 #[test]
1274 fn managed_container_with_env_and_volumes_roundtrip() {
1275 let mut env = HashMap::new();
1276 env.insert("REDIS_URL".into(), "redis://localhost".into());
1277 env.insert("LOG_LEVEL".into(), "debug".into());
1278
1279 let mc = ManagedContainer {
1280 name: "full-spec".into(),
1281 image: "redis:7".into(),
1282 container_id: Some("abc123".into()),
1283 state: ContainerState::Running,
1284 ports: vec![PortMapping {
1285 host_port: 6379,
1286 container_port: 6379,
1287 protocol: "tcp".into(),
1288 }],
1289 env,
1290 volumes: vec![VolumeMount {
1291 host_path: "/data/redis".into(),
1292 container_path: "/data".into(),
1293 read_only: false,
1294 }],
1295 health_endpoint: Some("http://localhost:6379/ping".into()),
1296 restart_policy: Some(RestartPolicy::Always),
1297 };
1298
1299 let json = serde_json::to_string(&mc).unwrap();
1300 let restored: ManagedContainer = serde_json::from_str(&json).unwrap();
1301 assert_eq!(restored.name, "full-spec");
1302 assert_eq!(restored.container_id, Some("abc123".into()));
1303 assert_eq!(restored.ports.len(), 1);
1304 assert_eq!(restored.env.len(), 2);
1305 assert_eq!(restored.volumes.len(), 1);
1306 assert_eq!(restored.restart_policy, Some(RestartPolicy::Always));
1307 }
1308
1309 #[test]
1310 fn configure_multiple_containers_succeeds() {
1311 let manager = ContainerManager::new(ContainerConfig::default());
1312 let spec1 = ManagedContainer {
1313 name: "svc-a".into(),
1314 image: "alpine:latest".into(),
1315 container_id: None,
1316 state: ContainerState::Stopped,
1317 ports: vec![PortMapping {
1318 host_port: 8080,
1319 container_port: 80,
1320 protocol: "tcp".into(),
1321 }],
1322 env: HashMap::new(),
1323 volumes: Vec::new(),
1324 health_endpoint: None,
1325 restart_policy: None,
1326 };
1327 manager.configure(spec1).unwrap();
1328
1329 let spec2 = ManagedContainer {
1330 name: "svc-b".into(),
1331 image: "nginx:latest".into(),
1332 container_id: None,
1333 state: ContainerState::Stopped,
1334 ports: vec![PortMapping {
1335 host_port: 9090,
1336 container_port: 80,
1337 protocol: "tcp".into(),
1338 }],
1339 env: HashMap::new(),
1340 volumes: Vec::new(),
1341 health_endpoint: None,
1342 restart_policy: None,
1343 };
1344 manager.configure(spec2).unwrap();
1345
1346 assert_eq!(manager.container_state("svc-a"), Some(ContainerState::Stopped));
1347 assert_eq!(manager.container_state("svc-b"), Some(ContainerState::Stopped));
1348 }
1349}