docker_pyo3/
compose.rs

1//! Docker Compose file parsing and orchestration support.
2//!
3//! This module provides types and functions for parsing Docker Compose files
4//! (docker-compose.yml / compose.yaml) and orchestrating multi-container deployments.
5
6use docker_api::opts::{
7    ContainerCreateOpts, ContainerListOpts, ContainerRestartOpts, ContainerStopOpts,
8    ExecCreateOpts, ExecStartOpts, ImageBuildOpts, ImagePushOpts, LogsOpts, NetworkCreateOpts,
9    PublishPort, PullOpts, RegistryAuth, VolumeCreateOpts,
10};
11use docker_api::{Containers, Images, Networks, Volumes};
12use pyo3::exceptions::{PyRuntimeError, PyValueError};
13use pyo3::prelude::*;
14use pythonize::pythonize;
15use serde::{Deserialize, Serialize};
16use std::collections::HashMap;
17use std::fs;
18use std::path::Path;
19
20use crate::Pyo3Docker;
21
22#[pymodule]
23pub fn compose(_py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> {
24    m.add_class::<Pyo3ComposeFile>()?;
25    m.add_class::<Pyo3ComposeProject>()?;
26    m.add_function(wrap_pyfunction!(parse_compose_string, m)?)?;
27    m.add_function(wrap_pyfunction!(parse_compose_file, m)?)?;
28    Ok(())
29}
30
31/// Represents a parsed Docker Compose file.
32///
33/// Use `parse_compose_file()` or `parse_compose_string()` to create instances.
34#[derive(Debug, Clone, Serialize, Deserialize)]
35#[pyclass(name = "ComposeFile")]
36pub struct Pyo3ComposeFile {
37    /// Compose file format version (e.g., "3.8", "3", "2.1")
38    #[serde(default)]
39    pub version: Option<String>,
40
41    /// Service definitions
42    #[serde(default)]
43    pub services: HashMap<String, ComposeService>,
44
45    /// Network definitions
46    #[serde(default)]
47    pub networks: HashMap<String, Option<ComposeNetwork>>,
48
49    /// Volume definitions
50    #[serde(default)]
51    pub volumes: HashMap<String, Option<ComposeVolume>>,
52
53    /// Config definitions (Swarm mode)
54    #[serde(default)]
55    pub configs: HashMap<String, Option<ComposeConfig>>,
56
57    /// Secret definitions (Swarm mode)
58    #[serde(default)]
59    pub secrets: HashMap<String, Option<ComposeSecret>>,
60
61    /// Top-level name for the project
62    #[serde(default)]
63    pub name: Option<String>,
64}
65
66/// Service definition in a Compose file.
67#[derive(Debug, Clone, Serialize, Deserialize, Default)]
68pub struct ComposeService {
69    /// Image to use for this service
70    #[serde(default)]
71    pub image: Option<String>,
72
73    /// Build configuration
74    #[serde(default)]
75    pub build: Option<ComposeBuild>,
76
77    /// Container name override
78    #[serde(default)]
79    pub container_name: Option<String>,
80
81    /// Command to run
82    #[serde(default)]
83    pub command: Option<StringOrList>,
84
85    /// Entrypoint override
86    #[serde(default)]
87    pub entrypoint: Option<StringOrList>,
88
89    /// Environment variables
90    #[serde(default)]
91    pub environment: Option<EnvironmentVars>,
92
93    /// Environment file(s)
94    #[serde(default)]
95    pub env_file: Option<StringOrList>,
96
97    /// Port mappings
98    #[serde(default)]
99    pub ports: Option<Vec<PortMapping>>,
100
101    /// Volume mounts
102    #[serde(default)]
103    #[serde(alias = "volume")]
104    pub volumes: Option<Vec<VolumeMount>>,
105
106    /// Network connections
107    #[serde(default)]
108    pub networks: Option<ServiceNetworks>,
109
110    /// Service dependencies
111    #[serde(default)]
112    pub depends_on: Option<DependsOn>,
113
114    /// Restart policy
115    #[serde(default)]
116    pub restart: Option<String>,
117
118    /// Working directory
119    #[serde(default)]
120    pub working_dir: Option<String>,
121
122    /// User to run as
123    #[serde(default)]
124    pub user: Option<String>,
125
126    /// Labels
127    #[serde(default)]
128    pub labels: Option<Labels>,
129
130    /// Extra hosts
131    #[serde(default)]
132    pub extra_hosts: Option<Vec<String>>,
133
134    /// DNS servers
135    #[serde(default)]
136    pub dns: Option<StringOrList>,
137
138    /// Hostname
139    #[serde(default)]
140    pub hostname: Option<String>,
141
142    /// Domain name
143    #[serde(default)]
144    pub domainname: Option<String>,
145
146    /// Privileged mode
147    #[serde(default)]
148    pub privileged: Option<bool>,
149
150    /// Read-only root filesystem
151    #[serde(default)]
152    pub read_only: Option<bool>,
153
154    /// Stdin open
155    #[serde(default)]
156    pub stdin_open: Option<bool>,
157
158    /// TTY allocation
159    #[serde(default)]
160    pub tty: Option<bool>,
161
162    /// Stop signal
163    #[serde(default)]
164    pub stop_signal: Option<String>,
165
166    /// Stop grace period
167    #[serde(default)]
168    pub stop_grace_period: Option<String>,
169
170    /// Health check configuration
171    #[serde(default)]
172    pub healthcheck: Option<HealthCheck>,
173
174    /// Logging configuration
175    #[serde(default)]
176    pub logging: Option<LoggingConfig>,
177
178    /// Deploy configuration (Swarm mode)
179    #[serde(default)]
180    pub deploy: Option<DeployConfig>,
181
182    /// Secrets to expose
183    #[serde(default)]
184    pub secrets: Option<Vec<ServiceSecret>>,
185
186    /// Configs to expose
187    #[serde(default)]
188    pub configs: Option<Vec<ServiceConfig>>,
189
190    /// Capabilities to add
191    #[serde(default)]
192    pub cap_add: Option<Vec<String>>,
193
194    /// Capabilities to drop
195    #[serde(default)]
196    pub cap_drop: Option<Vec<String>>,
197
198    /// Devices to map
199    #[serde(default)]
200    pub devices: Option<Vec<String>>,
201
202    /// Expose ports (not published)
203    #[serde(default)]
204    pub expose: Option<Vec<StringOrInt>>,
205
206    /// Links (legacy)
207    #[serde(default)]
208    pub links: Option<Vec<String>>,
209
210    /// Network mode
211    #[serde(default)]
212    pub network_mode: Option<String>,
213
214    /// PID mode
215    #[serde(default)]
216    pub pid: Option<String>,
217
218    /// IPC mode
219    #[serde(default)]
220    pub ipc: Option<String>,
221
222    /// Security options
223    #[serde(default)]
224    pub security_opt: Option<Vec<String>>,
225
226    /// Sysctls
227    #[serde(default)]
228    pub sysctls: Option<HashMap<String, StringOrInt>>,
229
230    /// Ulimits
231    #[serde(default)]
232    pub ulimits: Option<HashMap<String, Ulimit>>,
233
234    /// tmpfs mounts
235    #[serde(default)]
236    pub tmpfs: Option<StringOrList>,
237
238    /// Init process
239    #[serde(default)]
240    pub init: Option<bool>,
241
242    /// Profiles this service belongs to
243    #[serde(default)]
244    pub profiles: Option<Vec<String>>,
245
246    /// Platform specification
247    #[serde(default)]
248    pub platform: Option<String>,
249
250    /// Pull policy
251    #[serde(default)]
252    pub pull_policy: Option<String>,
253
254    /// Scale (number of replicas)
255    #[serde(default)]
256    pub scale: Option<i32>,
257
258    /// Memory limit
259    #[serde(default)]
260    pub mem_limit: Option<StringOrInt>,
261
262    /// Memory reservation
263    #[serde(default)]
264    pub mem_reservation: Option<StringOrInt>,
265
266    /// CPU count
267    #[serde(default)]
268    pub cpus: Option<f64>,
269
270    /// Shared memory size
271    #[serde(default)]
272    pub shm_size: Option<StringOrInt>,
273}
274
275/// Build configuration for a service.
276#[derive(Debug, Clone, Serialize, Deserialize)]
277#[serde(untagged)]
278pub enum ComposeBuild {
279    /// Simple context path
280    Simple(String),
281    /// Full build configuration
282    Full(BuildConfig),
283}
284
285/// Full build configuration.
286#[derive(Debug, Clone, Serialize, Deserialize, Default)]
287pub struct BuildConfig {
288    /// Build context path
289    #[serde(default)]
290    pub context: Option<String>,
291
292    /// Dockerfile path
293    #[serde(default)]
294    pub dockerfile: Option<String>,
295
296    /// Build arguments
297    #[serde(default)]
298    pub args: Option<HashMap<String, Option<String>>>,
299
300    /// Target stage
301    #[serde(default)]
302    pub target: Option<String>,
303
304    /// Cache from images
305    #[serde(default)]
306    pub cache_from: Option<Vec<String>>,
307
308    /// Extra hosts
309    #[serde(default)]
310    pub extra_hosts: Option<Vec<String>>,
311
312    /// Network mode during build
313    #[serde(default)]
314    pub network: Option<String>,
315
316    /// SSH authentication
317    #[serde(default)]
318    pub ssh: Option<Vec<String>>,
319
320    /// Labels
321    #[serde(default)]
322    pub labels: Option<Labels>,
323
324    /// Platform
325    #[serde(default)]
326    pub platform: Option<String>,
327}
328
329/// Network definition.
330#[derive(Debug, Clone, Serialize, Deserialize, Default)]
331pub struct ComposeNetwork {
332    /// Network driver
333    #[serde(default)]
334    pub driver: Option<String>,
335
336    /// Driver options
337    #[serde(default)]
338    pub driver_opts: Option<HashMap<String, String>>,
339
340    /// External network reference
341    #[serde(default)]
342    pub external: Option<ExternalRef>,
343
344    /// Enable IPv6
345    #[serde(default)]
346    pub enable_ipv6: Option<bool>,
347
348    /// IPAM configuration
349    #[serde(default)]
350    pub ipam: Option<IpamConfig>,
351
352    /// Internal network (no external access)
353    #[serde(default)]
354    pub internal: Option<bool>,
355
356    /// Attachable
357    #[serde(default)]
358    pub attachable: Option<bool>,
359
360    /// Labels
361    #[serde(default)]
362    pub labels: Option<Labels>,
363
364    /// Network name
365    #[serde(default)]
366    pub name: Option<String>,
367}
368
369/// Volume definition.
370#[derive(Debug, Clone, Serialize, Deserialize, Default)]
371pub struct ComposeVolume {
372    /// Volume driver
373    #[serde(default)]
374    pub driver: Option<String>,
375
376    /// Driver options
377    #[serde(default)]
378    pub driver_opts: Option<HashMap<String, String>>,
379
380    /// External volume reference
381    #[serde(default)]
382    pub external: Option<ExternalRef>,
383
384    /// Labels
385    #[serde(default)]
386    pub labels: Option<Labels>,
387
388    /// Volume name
389    #[serde(default)]
390    pub name: Option<String>,
391}
392
393/// Config definition (Swarm mode).
394#[derive(Debug, Clone, Serialize, Deserialize, Default)]
395pub struct ComposeConfig {
396    /// File path
397    #[serde(default)]
398    pub file: Option<String>,
399
400    /// External config reference
401    #[serde(default)]
402    pub external: Option<ExternalRef>,
403
404    /// Config name
405    #[serde(default)]
406    pub name: Option<String>,
407
408    /// Template driver
409    #[serde(default)]
410    pub template_driver: Option<String>,
411}
412
413/// Secret definition (Swarm mode).
414#[derive(Debug, Clone, Serialize, Deserialize, Default)]
415pub struct ComposeSecret {
416    /// File path
417    #[serde(default)]
418    pub file: Option<String>,
419
420    /// Environment variable
421    #[serde(default)]
422    pub environment: Option<String>,
423
424    /// External secret reference
425    #[serde(default)]
426    pub external: Option<ExternalRef>,
427
428    /// Secret name
429    #[serde(default)]
430    pub name: Option<String>,
431
432    /// Template driver
433    #[serde(default)]
434    pub template_driver: Option<String>,
435}
436
437/// External resource reference.
438#[derive(Debug, Clone, Serialize, Deserialize)]
439#[serde(untagged)]
440pub enum ExternalRef {
441    /// Simple boolean
442    Bool(bool),
443    /// Named external resource
444    Named { name: Option<String> },
445}
446
447/// Service networks configuration.
448#[derive(Debug, Clone, Serialize, Deserialize)]
449#[serde(untagged)]
450pub enum ServiceNetworks {
451    /// Simple list of network names
452    List(Vec<String>),
453    /// Map with network-specific options
454    Map(HashMap<String, Option<ServiceNetworkConfig>>),
455}
456
457/// Service-specific network configuration.
458#[derive(Debug, Clone, Serialize, Deserialize, Default)]
459pub struct ServiceNetworkConfig {
460    /// Aliases for this service on the network
461    #[serde(default)]
462    pub aliases: Option<Vec<String>>,
463
464    /// IPv4 address
465    #[serde(default)]
466    pub ipv4_address: Option<String>,
467
468    /// IPv6 address
469    #[serde(default)]
470    pub ipv6_address: Option<String>,
471
472    /// Priority
473    #[serde(default)]
474    pub priority: Option<i32>,
475}
476
477/// Depends on configuration.
478#[derive(Debug, Clone, Serialize, Deserialize)]
479#[serde(untagged)]
480pub enum DependsOn {
481    /// Simple list of service names
482    List(Vec<String>),
483    /// Map with conditions
484    Map(HashMap<String, DependsOnCondition>),
485}
486
487/// Depends on condition.
488#[derive(Debug, Clone, Serialize, Deserialize, Default)]
489pub struct DependsOnCondition {
490    /// Condition to wait for
491    #[serde(default)]
492    pub condition: Option<String>,
493}
494
495/// Health check configuration.
496#[derive(Debug, Clone, Serialize, Deserialize, Default)]
497pub struct HealthCheck {
498    /// Test command
499    #[serde(default)]
500    pub test: Option<StringOrList>,
501
502    /// Interval between checks
503    #[serde(default)]
504    pub interval: Option<String>,
505
506    /// Timeout for check
507    #[serde(default)]
508    pub timeout: Option<String>,
509
510    /// Number of retries
511    #[serde(default)]
512    pub retries: Option<i32>,
513
514    /// Start period
515    #[serde(default)]
516    pub start_period: Option<String>,
517
518    /// Disable health check
519    #[serde(default)]
520    pub disable: Option<bool>,
521}
522
523/// Logging configuration.
524#[derive(Debug, Clone, Serialize, Deserialize, Default)]
525pub struct LoggingConfig {
526    /// Logging driver
527    #[serde(default)]
528    pub driver: Option<String>,
529
530    /// Driver options
531    #[serde(default)]
532    pub options: Option<HashMap<String, String>>,
533}
534
535/// Deploy configuration (Swarm mode).
536#[derive(Debug, Clone, Serialize, Deserialize, Default)]
537pub struct DeployConfig {
538    /// Deployment mode
539    #[serde(default)]
540    pub mode: Option<String>,
541
542    /// Number of replicas
543    #[serde(default)]
544    pub replicas: Option<i32>,
545
546    /// Endpoint mode
547    #[serde(default)]
548    pub endpoint_mode: Option<String>,
549
550    /// Placement constraints
551    #[serde(default)]
552    pub placement: Option<PlacementConfig>,
553
554    /// Resource limits and reservations
555    #[serde(default)]
556    pub resources: Option<ResourceConfig>,
557
558    /// Restart policy
559    #[serde(default)]
560    pub restart_policy: Option<RestartPolicyConfig>,
561
562    /// Rollback configuration
563    #[serde(default)]
564    pub rollback_config: Option<UpdateConfig>,
565
566    /// Update configuration
567    #[serde(default)]
568    pub update_config: Option<UpdateConfig>,
569
570    /// Labels
571    #[serde(default)]
572    pub labels: Option<Labels>,
573}
574
575/// Placement configuration.
576#[derive(Debug, Clone, Serialize, Deserialize, Default)]
577pub struct PlacementConfig {
578    /// Constraints
579    #[serde(default)]
580    pub constraints: Option<Vec<String>>,
581
582    /// Preferences
583    #[serde(default)]
584    pub preferences: Option<Vec<HashMap<String, String>>>,
585
586    /// Max replicas per node
587    #[serde(default)]
588    pub max_replicas_per_node: Option<i32>,
589}
590
591/// Resource configuration.
592#[derive(Debug, Clone, Serialize, Deserialize, Default)]
593pub struct ResourceConfig {
594    /// Resource limits
595    #[serde(default)]
596    pub limits: Option<ResourceSpec>,
597
598    /// Resource reservations
599    #[serde(default)]
600    pub reservations: Option<ResourceSpec>,
601}
602
603/// Resource specification.
604#[derive(Debug, Clone, Serialize, Deserialize, Default)]
605pub struct ResourceSpec {
606    /// CPU limit/reservation
607    #[serde(default)]
608    pub cpus: Option<String>,
609
610    /// Memory limit/reservation
611    #[serde(default)]
612    pub memory: Option<String>,
613
614    /// Devices
615    #[serde(default)]
616    pub devices: Option<Vec<DeviceSpec>>,
617}
618
619/// Device specification.
620#[derive(Debug, Clone, Serialize, Deserialize, Default)]
621pub struct DeviceSpec {
622    /// Device capabilities
623    #[serde(default)]
624    pub capabilities: Option<Vec<String>>,
625
626    /// Driver
627    #[serde(default)]
628    pub driver: Option<String>,
629
630    /// Device count
631    #[serde(default)]
632    pub count: Option<StringOrInt>,
633
634    /// Device IDs
635    #[serde(default)]
636    pub device_ids: Option<Vec<String>>,
637
638    /// Options
639    #[serde(default)]
640    pub options: Option<HashMap<String, String>>,
641}
642
643/// Restart policy configuration.
644#[derive(Debug, Clone, Serialize, Deserialize, Default)]
645pub struct RestartPolicyConfig {
646    /// Condition for restart
647    #[serde(default)]
648    pub condition: Option<String>,
649
650    /// Delay between restarts
651    #[serde(default)]
652    pub delay: Option<String>,
653
654    /// Max attempts
655    #[serde(default)]
656    pub max_attempts: Option<i32>,
657
658    /// Window for restart decisions
659    #[serde(default)]
660    pub window: Option<String>,
661}
662
663/// Update/rollback configuration.
664#[derive(Debug, Clone, Serialize, Deserialize, Default)]
665pub struct UpdateConfig {
666    /// Parallelism
667    #[serde(default)]
668    pub parallelism: Option<i32>,
669
670    /// Delay between updates
671    #[serde(default)]
672    pub delay: Option<String>,
673
674    /// Failure action
675    #[serde(default)]
676    pub failure_action: Option<String>,
677
678    /// Monitor duration
679    #[serde(default)]
680    pub monitor: Option<String>,
681
682    /// Max failure ratio
683    #[serde(default)]
684    pub max_failure_ratio: Option<f64>,
685
686    /// Order of operations
687    #[serde(default)]
688    pub order: Option<String>,
689}
690
691/// Service secret reference.
692#[derive(Debug, Clone, Serialize, Deserialize)]
693#[serde(untagged)]
694pub enum ServiceSecret {
695    /// Simple secret name
696    Simple(String),
697    /// Full secret configuration
698    Full(ServiceSecretConfig),
699}
700
701/// Service secret configuration.
702#[derive(Debug, Clone, Serialize, Deserialize, Default)]
703pub struct ServiceSecretConfig {
704    /// Source secret name
705    #[serde(default)]
706    pub source: Option<String>,
707
708    /// Target path in container
709    #[serde(default)]
710    pub target: Option<String>,
711
712    /// UID
713    #[serde(default)]
714    pub uid: Option<String>,
715
716    /// GID
717    #[serde(default)]
718    pub gid: Option<String>,
719
720    /// File mode
721    #[serde(default)]
722    pub mode: Option<i32>,
723}
724
725/// Service config reference.
726#[derive(Debug, Clone, Serialize, Deserialize)]
727#[serde(untagged)]
728pub enum ServiceConfig {
729    /// Simple config name
730    Simple(String),
731    /// Full config configuration
732    Full(ServiceConfigConfig),
733}
734
735/// Service config configuration.
736#[derive(Debug, Clone, Serialize, Deserialize, Default)]
737pub struct ServiceConfigConfig {
738    /// Source config name
739    #[serde(default)]
740    pub source: Option<String>,
741
742    /// Target path in container
743    #[serde(default)]
744    pub target: Option<String>,
745
746    /// UID
747    #[serde(default)]
748    pub uid: Option<String>,
749
750    /// GID
751    #[serde(default)]
752    pub gid: Option<String>,
753
754    /// File mode
755    #[serde(default)]
756    pub mode: Option<i32>,
757}
758
759/// IPAM configuration.
760#[derive(Debug, Clone, Serialize, Deserialize, Default)]
761pub struct IpamConfig {
762    /// IPAM driver
763    #[serde(default)]
764    pub driver: Option<String>,
765
766    /// Subnet configurations
767    #[serde(default)]
768    pub config: Option<Vec<IpamPoolConfig>>,
769
770    /// Driver options
771    #[serde(default)]
772    pub options: Option<HashMap<String, String>>,
773}
774
775/// IPAM pool configuration.
776#[derive(Debug, Clone, Serialize, Deserialize, Default)]
777pub struct IpamPoolConfig {
778    /// Subnet CIDR
779    #[serde(default)]
780    pub subnet: Option<String>,
781
782    /// Gateway address
783    #[serde(default)]
784    pub gateway: Option<String>,
785
786    /// IP range
787    #[serde(default)]
788    pub ip_range: Option<String>,
789
790    /// Auxiliary addresses
791    #[serde(default)]
792    pub aux_addresses: Option<HashMap<String, String>>,
793}
794
795/// Ulimit configuration.
796#[derive(Debug, Clone, Serialize, Deserialize)]
797#[serde(untagged)]
798pub enum Ulimit {
799    /// Simple integer value (both soft and hard)
800    Simple(i64),
801    /// Separate soft and hard values
802    Full {
803        soft: Option<i64>,
804        hard: Option<i64>,
805    },
806}
807
808/// String or list of strings.
809#[derive(Debug, Clone, Serialize, Deserialize)]
810#[serde(untagged)]
811pub enum StringOrList {
812    String(String),
813    List(Vec<String>),
814}
815
816/// String or integer.
817#[derive(Debug, Clone, Serialize, Deserialize)]
818#[serde(untagged)]
819pub enum StringOrInt {
820    String(String),
821    Int(i64),
822}
823
824/// Environment variables - can be list or map.
825#[derive(Debug, Clone, Serialize, Deserialize)]
826#[serde(untagged)]
827pub enum EnvironmentVars {
828    /// List format (KEY=value)
829    List(Vec<String>),
830    /// Map format
831    Map(HashMap<String, Option<StringOrInt>>),
832}
833
834/// Labels - can be list or map.
835#[derive(Debug, Clone, Serialize, Deserialize)]
836#[serde(untagged)]
837pub enum Labels {
838    /// List format
839    List(Vec<String>),
840    /// Map format
841    Map(HashMap<String, String>),
842}
843
844/// Port mapping.
845#[derive(Debug, Clone, Serialize, Deserialize)]
846#[serde(untagged)]
847pub enum PortMapping {
848    /// Simple string format (e.g., "8080:80")
849    Simple(String),
850    /// Integer format
851    Int(i64),
852    /// Full port configuration
853    Full(PortConfig),
854}
855
856/// Full port configuration.
857#[derive(Debug, Clone, Serialize, Deserialize, Default)]
858pub struct PortConfig {
859    /// Target port (container)
860    #[serde(default)]
861    pub target: Option<i32>,
862
863    /// Published port (host)
864    #[serde(default)]
865    pub published: Option<StringOrInt>,
866
867    /// Protocol (tcp/udp)
868    #[serde(default)]
869    pub protocol: Option<String>,
870
871    /// Mode (host/ingress)
872    #[serde(default)]
873    pub mode: Option<String>,
874
875    /// Host IP
876    #[serde(default)]
877    pub host_ip: Option<String>,
878}
879
880/// Volume mount.
881#[derive(Debug, Clone, Serialize, Deserialize)]
882#[serde(untagged)]
883pub enum VolumeMount {
884    /// Simple string format (e.g., "./data:/app/data")
885    Simple(String),
886    /// Full volume configuration
887    Full(VolumeMountConfig),
888}
889
890/// Full volume mount configuration.
891#[derive(Debug, Clone, Serialize, Deserialize, Default)]
892pub struct VolumeMountConfig {
893    /// Mount type (volume, bind, tmpfs, npipe)
894    #[serde(default, rename = "type")]
895    pub mount_type: Option<String>,
896
897    /// Source path or volume name
898    #[serde(default)]
899    pub source: Option<String>,
900
901    /// Target path in container
902    #[serde(default)]
903    pub target: Option<String>,
904
905    /// Read-only
906    #[serde(default)]
907    pub read_only: Option<bool>,
908
909    /// Bind mount options
910    #[serde(default)]
911    pub bind: Option<BindOptions>,
912
913    /// Volume options
914    #[serde(default)]
915    pub volume: Option<VolumeOptions>,
916
917    /// tmpfs options
918    #[serde(default)]
919    pub tmpfs: Option<TmpfsOptions>,
920
921    /// Consistency mode
922    #[serde(default)]
923    pub consistency: Option<String>,
924}
925
926/// Bind mount options.
927#[derive(Debug, Clone, Serialize, Deserialize, Default)]
928pub struct BindOptions {
929    /// Bind propagation
930    #[serde(default)]
931    pub propagation: Option<String>,
932
933    /// Create host path if missing
934    #[serde(default)]
935    pub create_host_path: Option<bool>,
936
937    /// SELinux label
938    #[serde(default)]
939    pub selinux: Option<String>,
940}
941
942/// Volume options.
943#[derive(Debug, Clone, Serialize, Deserialize, Default)]
944pub struct VolumeOptions {
945    /// Disable copy-up
946    #[serde(default)]
947    pub nocopy: Option<bool>,
948}
949
950/// tmpfs options.
951#[derive(Debug, Clone, Serialize, Deserialize, Default)]
952pub struct TmpfsOptions {
953    /// tmpfs size
954    #[serde(default)]
955    pub size: Option<StringOrInt>,
956
957    /// File mode
958    #[serde(default)]
959    pub mode: Option<i32>,
960}
961
962// Python methods implementation
963
964#[pymethods]
965impl Pyo3ComposeFile {
966    /// Get the compose file version.
967    ///
968    /// Returns:
969    ///     str | None: Version string if specified
970    #[getter]
971    pub fn get_version(&self) -> Option<String> {
972        self.version.clone()
973    }
974
975    /// Get the project name.
976    ///
977    /// Returns:
978    ///     str | None: Project name if specified
979    #[getter]
980    pub fn get_name(&self) -> Option<String> {
981        self.name.clone()
982    }
983
984    /// Get list of service names.
985    ///
986    /// Returns:
987    ///     list[str]: List of service names
988    pub fn service_names(&self) -> Vec<String> {
989        self.services.keys().cloned().collect()
990    }
991
992    /// Get list of network names.
993    ///
994    /// Returns:
995    ///     list[str]: List of network names
996    pub fn network_names(&self) -> Vec<String> {
997        self.networks.keys().cloned().collect()
998    }
999
1000    /// Get list of volume names.
1001    ///
1002    /// Returns:
1003    ///     list[str]: List of volume names
1004    pub fn volume_names(&self) -> Vec<String> {
1005        self.volumes.keys().cloned().collect()
1006    }
1007
1008    /// Get list of config names (Swarm mode).
1009    ///
1010    /// Returns:
1011    ///     list[str]: List of config names
1012    pub fn config_names(&self) -> Vec<String> {
1013        self.configs.keys().cloned().collect()
1014    }
1015
1016    /// Get list of secret names (Swarm mode).
1017    ///
1018    /// Returns:
1019    ///     list[str]: List of secret names
1020    pub fn secret_names(&self) -> Vec<String> {
1021        self.secrets.keys().cloned().collect()
1022    }
1023
1024    /// Get the full compose file as a dictionary.
1025    ///
1026    /// Returns:
1027    ///     dict: Complete compose file structure as nested dict
1028    pub fn to_dict(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1029        pythonize(py, self)
1030            .map(|bound| bound.unbind())
1031            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1032    }
1033
1034    /// Get a specific service configuration.
1035    ///
1036    /// Args:
1037    ///     name: Service name
1038    ///
1039    /// Returns:
1040    ///     dict | None: Service configuration if found
1041    pub fn get_service(&self, py: Python<'_>, name: &str) -> PyResult<Option<Py<PyAny>>> {
1042        if let Some(service) = self.services.get(name) {
1043            let result = pythonize(py, service)
1044                .map(|bound| bound.unbind())
1045                .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))?;
1046            Ok(Some(result))
1047        } else {
1048            Ok(None)
1049        }
1050    }
1051
1052    /// Convert back to YAML string.
1053    ///
1054    /// Returns:
1055    ///     str: YAML representation of the compose file
1056    ///
1057    /// Raises:
1058    ///     ValueError: If serialization fails
1059    pub fn to_yaml(&self) -> PyResult<String> {
1060        serde_yaml::to_string(self)
1061            .map_err(|e| PyValueError::new_err(format!("YAML serialization error: {}", e)))
1062    }
1063}
1064
1065/// Parse a Docker Compose file from a YAML string.
1066///
1067/// Args:
1068///     content: YAML content as string
1069///
1070/// Returns:
1071///     ComposeFile: Parsed compose file object
1072///
1073/// Raises:
1074///     ValueError: If parsing fails
1075///
1076/// Example:
1077///     >>> content = '''
1078///     ... version: "3.8"
1079///     ... services:
1080///     ...   web:
1081///     ...     image: nginx
1082///     ... '''
1083///     >>> compose = parse_compose_string(content)
1084///     >>> compose.service_names()
1085///     ['web']
1086#[pyfunction]
1087pub fn parse_compose_string(content: &str) -> PyResult<Pyo3ComposeFile> {
1088    serde_yaml::from_str(content)
1089        .map_err(|e| PyValueError::new_err(format!("Failed to parse compose file: {}", e)))
1090}
1091
1092/// Parse a Docker Compose file from a file path.
1093///
1094/// Args:
1095///     path: Path to the compose file (docker-compose.yml or compose.yaml)
1096///
1097/// Returns:
1098///     ComposeFile: Parsed compose file object
1099///
1100/// Raises:
1101///     ValueError: If the file cannot be read or parsing fails
1102///
1103/// Example:
1104///     >>> compose = parse_compose_file("docker-compose.yml")
1105///     >>> compose.service_names()
1106///     ['web', 'db']
1107#[pyfunction]
1108pub fn parse_compose_file(path: &str) -> PyResult<Pyo3ComposeFile> {
1109    let path = Path::new(path);
1110    let content = fs::read_to_string(path)
1111        .map_err(|e| PyValueError::new_err(format!("Failed to read file: {}", e)))?;
1112
1113    parse_compose_string(&content)
1114}
1115
1116/// Manages a Docker Compose project for orchestrating multi-container deployments.
1117///
1118/// A ComposeProject represents a running or potential deployment of a Compose file.
1119/// It provides methods to bring services up (create and start) or down (stop and remove).
1120///
1121/// Example:
1122///     >>> docker = Docker()
1123///     >>> compose = parse_compose_file("docker-compose.yml")
1124///     >>> project = ComposeProject(docker, compose, "myproject")
1125///     >>> project.up()  # Create networks, volumes, and start containers
1126///     >>> project.down()  # Stop and remove containers
1127#[derive(Debug)]
1128#[pyclass(name = "ComposeProject")]
1129pub struct Pyo3ComposeProject {
1130    docker: docker_api::Docker,
1131    compose: Pyo3ComposeFile,
1132    project_name: String,
1133}
1134
1135/// Result of a compose up operation
1136#[derive(Debug, Clone, Serialize)]
1137pub struct ComposeUpResult {
1138    /// IDs of created networks
1139    pub networks: Vec<String>,
1140    /// Names of created volumes
1141    pub volumes: Vec<String>,
1142    /// IDs of created/started containers
1143    pub containers: Vec<String>,
1144}
1145
1146/// Result of a compose down operation
1147#[derive(Debug, Clone, Serialize)]
1148pub struct ComposeDownResult {
1149    /// IDs of stopped containers
1150    pub stopped_containers: Vec<String>,
1151    /// IDs of removed containers
1152    pub removed_containers: Vec<String>,
1153    /// IDs of removed networks
1154    pub removed_networks: Vec<String>,
1155    /// Names of removed volumes
1156    pub removed_volumes: Vec<String>,
1157}
1158
1159#[pymethods]
1160impl Pyo3ComposeProject {
1161    /// Create a new ComposeProject.
1162    ///
1163    /// Args:
1164    ///     docker: Docker client instance
1165    ///     compose: Parsed ComposeFile instance
1166    ///     project_name: Name for this project (used as prefix for resources)
1167    ///
1168    /// Returns:
1169    ///     ComposeProject: Project instance ready for up/down operations
1170    #[new]
1171    pub fn new(docker: Pyo3Docker, compose: Pyo3ComposeFile, project_name: &str) -> Self {
1172        Pyo3ComposeProject {
1173            docker: docker.0,
1174            compose,
1175            project_name: project_name.to_string(),
1176        }
1177    }
1178
1179    /// Get the project name.
1180    ///
1181    /// Returns:
1182    ///     str: Project name
1183    #[getter]
1184    pub fn get_project_name(&self) -> String {
1185        self.project_name.clone()
1186    }
1187
1188    /// Bring up the compose project.
1189    ///
1190    /// Creates networks, volumes, and containers defined in the compose file,
1191    /// then starts the containers.
1192    ///
1193    /// Args:
1194    ///     detach: Run containers in the background (default: True)
1195    ///
1196    /// Returns:
1197    ///     dict: Results including created network IDs, volume names, and container IDs
1198    ///
1199    /// Raises:
1200    ///     RuntimeError: If any operation fails
1201    #[pyo3(signature = (detach=None))]
1202    pub fn up(&self, py: Python<'_>, detach: Option<bool>) -> PyResult<Py<PyAny>> {
1203        let _detach = detach.unwrap_or(true);
1204        let result = __compose_up(&self.docker, &self.compose, &self.project_name)?;
1205        pythonize(py, &result)
1206            .map(|bound| bound.unbind())
1207            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1208    }
1209
1210    /// Bring down the compose project.
1211    ///
1212    /// Stops and removes containers, and optionally removes networks and volumes.
1213    ///
1214    /// Args:
1215    ///     remove_volumes: Also remove named volumes (default: False)
1216    ///     remove_networks: Also remove networks (default: True)
1217    ///     timeout: Timeout in seconds for stopping containers (default: 10)
1218    ///
1219    /// Returns:
1220    ///     dict: Results including stopped/removed container IDs, network IDs, volume names
1221    ///
1222    /// Raises:
1223    ///     RuntimeError: If any operation fails
1224    #[pyo3(signature = (remove_volumes=None, remove_networks=None, timeout=None))]
1225    pub fn down(
1226        &self,
1227        py: Python<'_>,
1228        remove_volumes: Option<bool>,
1229        remove_networks: Option<bool>,
1230        timeout: Option<u64>,
1231    ) -> PyResult<Py<PyAny>> {
1232        let remove_volumes = remove_volumes.unwrap_or(false);
1233        let remove_networks = remove_networks.unwrap_or(true);
1234        let timeout = timeout.unwrap_or(10);
1235
1236        let result = __compose_down(
1237            &self.docker,
1238            &self.compose,
1239            &self.project_name,
1240            remove_volumes,
1241            remove_networks,
1242            timeout,
1243        )?;
1244        pythonize(py, &result)
1245            .map(|bound| bound.unbind())
1246            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1247    }
1248
1249    /// List containers for this project.
1250    ///
1251    /// Returns:
1252    ///     list[str]: List of container IDs belonging to this project
1253    pub fn ps(&self) -> PyResult<Vec<String>> {
1254        __compose_ps(&self.docker, &self.project_name)
1255    }
1256
1257    /// Start all stopped containers in the project.
1258    ///
1259    /// Starts containers that were previously stopped without recreating them.
1260    ///
1261    /// Returns:
1262    ///     list[str]: List of container IDs that were started
1263    ///
1264    /// Raises:
1265    ///     RuntimeError: If any container fails to start
1266    pub fn start(&self) -> PyResult<Vec<String>> {
1267        __compose_start(&self.docker, &self.project_name)
1268    }
1269
1270    /// Stop all running containers in the project.
1271    ///
1272    /// Stops containers without removing them.
1273    ///
1274    /// Args:
1275    ///     timeout: Timeout in seconds to wait for containers to stop (default: 10)
1276    ///
1277    /// Returns:
1278    ///     list[str]: List of container IDs that were stopped
1279    ///
1280    /// Raises:
1281    ///     RuntimeError: If any container fails to stop
1282    #[pyo3(signature = (timeout=None))]
1283    pub fn stop(&self, timeout: Option<u64>) -> PyResult<Vec<String>> {
1284        let timeout = timeout.unwrap_or(10);
1285        __compose_stop(&self.docker, &self.project_name, timeout)
1286    }
1287
1288    /// Restart all containers in the project.
1289    ///
1290    /// Restarts all containers without recreating them.
1291    ///
1292    /// Args:
1293    ///     timeout: Timeout in seconds to wait for containers to stop before restart (default: 10)
1294    ///
1295    /// Returns:
1296    ///     list[str]: List of container IDs that were restarted
1297    ///
1298    /// Raises:
1299    ///     RuntimeError: If any container fails to restart
1300    #[pyo3(signature = (timeout=None))]
1301    pub fn restart(&self, timeout: Option<u64>) -> PyResult<Vec<String>> {
1302        let timeout = timeout.unwrap_or(10);
1303        __compose_restart(&self.docker, &self.project_name, timeout)
1304    }
1305
1306    /// Pause all running containers in the project.
1307    ///
1308    /// Pauses all processes within the containers.
1309    ///
1310    /// Returns:
1311    ///     list[str]: List of container IDs that were paused
1312    ///
1313    /// Raises:
1314    ///     RuntimeError: If any container fails to pause
1315    pub fn pause(&self) -> PyResult<Vec<String>> {
1316        __compose_pause(&self.docker, &self.project_name)
1317    }
1318
1319    /// Unpause all paused containers in the project.
1320    ///
1321    /// Resumes all processes within the containers.
1322    ///
1323    /// Returns:
1324    ///     list[str]: List of container IDs that were unpaused
1325    ///
1326    /// Raises:
1327    ///     RuntimeError: If any container fails to unpause
1328    pub fn unpause(&self) -> PyResult<Vec<String>> {
1329        __compose_unpause(&self.docker, &self.project_name)
1330    }
1331
1332    /// Pull images for all services in the project.
1333    ///
1334    /// Pulls the images specified in the compose file for all services that
1335    /// have an `image` field defined.
1336    ///
1337    /// Returns:
1338    ///     list[str]: List of images that were pulled
1339    ///
1340    /// Raises:
1341    ///     RuntimeError: If any image fails to pull
1342    pub fn pull(&self) -> PyResult<Vec<String>> {
1343        __compose_pull(&self.docker, &self.compose)
1344    }
1345
1346    /// Build images for all services in the project that have a build config.
1347    ///
1348    /// Builds images for services that have a `build` field defined in the compose file.
1349    /// Services with only an `image` field are skipped.
1350    ///
1351    /// Args:
1352    ///     no_cache: Do not use cache when building (default: False)
1353    ///     pull: Always pull newer versions of base images (default: False)
1354    ///
1355    /// Returns:
1356    ///     list[str]: List of services that were built
1357    ///
1358    /// Raises:
1359    ///     RuntimeError: If any build fails
1360    #[pyo3(signature = (no_cache=None, pull=None))]
1361    pub fn build(&self, no_cache: Option<bool>, pull: Option<bool>) -> PyResult<Vec<String>> {
1362        let no_cache = no_cache.unwrap_or(false);
1363        let pull = pull.unwrap_or(false);
1364        __compose_build(
1365            &self.docker,
1366            &self.compose,
1367            &self.project_name,
1368            no_cache,
1369            pull,
1370        )
1371    }
1372
1373    /// Push images for all services in the project.
1374    ///
1375    /// Pushes images for services that have an `image` field defined to their registry.
1376    ///
1377    /// Returns:
1378    ///     list[str]: List of images that were pushed
1379    ///
1380    /// Raises:
1381    ///     RuntimeError: If any image fails to push
1382    pub fn push(&self) -> PyResult<Vec<String>> {
1383        __compose_push(&self.docker, &self.compose)
1384    }
1385
1386    /// Get detailed information about containers in the project.
1387    ///
1388    /// Returns detailed information about each container including ID, name, state,
1389    /// service name, and image.
1390    ///
1391    /// Returns:
1392    ///     list[dict]: List of container info dicts with keys:
1393    ///         - id: Container ID
1394    ///         - name: Container name
1395    ///         - service: Service name from compose file
1396    ///         - state: Container state (running, stopped, etc.)
1397    ///         - status: Container status message
1398    ///         - image: Image used by the container
1399    ///
1400    /// Raises:
1401    ///     RuntimeError: If container information cannot be retrieved
1402    pub fn ps_detailed(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1403        let result = __compose_ps_detailed(&self.docker, &self.project_name)?;
1404        pythonize(py, &result)
1405            .map(|bound| bound.unbind())
1406            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1407    }
1408
1409    /// Get logs from all containers in the project.
1410    ///
1411    /// Collects logs from all containers belonging to this project.
1412    ///
1413    /// Args:
1414    ///     service: Only get logs from this service (optional)
1415    ///     tail: Number of lines to show from the end of logs (optional)
1416    ///     timestamps: Include timestamps in output (default: False)
1417    ///
1418    /// Returns:
1419    ///     dict[str, str]: Mapping of container ID to its logs
1420    ///
1421    /// Raises:
1422    ///     RuntimeError: If logs cannot be retrieved
1423    #[pyo3(signature = (service=None, tail=None, timestamps=None))]
1424    pub fn logs(
1425        &self,
1426        py: Python<'_>,
1427        service: Option<&str>,
1428        tail: Option<usize>,
1429        timestamps: Option<bool>,
1430    ) -> PyResult<Py<PyAny>> {
1431        let timestamps = timestamps.unwrap_or(false);
1432        let result = __compose_logs(&self.docker, &self.project_name, service, tail, timestamps)?;
1433        pythonize(py, &result)
1434            .map(|bound| bound.unbind())
1435            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1436    }
1437
1438    /// Get the compose configuration as a dictionary.
1439    ///
1440    /// Returns the parsed compose file configuration, useful for inspecting
1441    /// the services, networks, and volumes defined in the project.
1442    ///
1443    /// Returns:
1444    ///     dict: The compose file configuration
1445    pub fn config(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1446        pythonize(py, &self.compose)
1447            .map(|bound| bound.unbind())
1448            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1449    }
1450
1451    /// Get running processes from all containers in the project.
1452    ///
1453    /// Returns process information from all running containers in the project.
1454    ///
1455    /// Args:
1456    ///     ps_args: Arguments to pass to ps command (e.g., "aux")
1457    ///
1458    /// Returns:
1459    ///     dict[str, dict]: Mapping of container ID to its process info
1460    ///
1461    /// Raises:
1462    ///     RuntimeError: If process information cannot be retrieved
1463    #[pyo3(signature = (ps_args=None))]
1464    pub fn top(&self, py: Python<'_>, ps_args: Option<&str>) -> PyResult<Py<PyAny>> {
1465        let result = __compose_top(&self.docker, &self.project_name, ps_args)?;
1466        pythonize(py, &result)
1467            .map(|bound| bound.unbind())
1468            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1469    }
1470
1471    /// Execute a command in a running service container.
1472    ///
1473    /// Runs a command in the first running container of the specified service,
1474    /// similar to `docker-compose exec`.
1475    ///
1476    /// Args:
1477    ///     service: Name of the service to execute the command in
1478    ///     command: Command to execute as a list (e.g., ["ls", "-la"])
1479    ///     user: User to run the command as (optional)
1480    ///     workdir: Working directory inside the container (optional)
1481    ///     env: Environment variables as a list (e.g., ["VAR=value"]) (optional)
1482    ///     privileged: Give extended privileges to the command (default: False)
1483    ///     tty: Allocate a pseudo-TTY (default: False)
1484    ///
1485    /// Returns:
1486    ///     str: Output from the executed command
1487    ///
1488    /// Raises:
1489    ///     RuntimeError: If no running container is found for the service
1490    ///     RuntimeError: If command execution fails
1491    #[pyo3(signature = (service, command, user=None, workdir=None, env=None, privileged=None, tty=None))]
1492    pub fn exec(
1493        &self,
1494        service: &str,
1495        command: Vec<String>,
1496        user: Option<&str>,
1497        workdir: Option<&str>,
1498        env: Option<Vec<String>>,
1499        privileged: Option<bool>,
1500        tty: Option<bool>,
1501    ) -> PyResult<String> {
1502        let privileged = privileged.unwrap_or(false);
1503        let tty = tty.unwrap_or(false);
1504        __compose_exec(
1505            &self.docker,
1506            &self.project_name,
1507            service,
1508            command,
1509            user,
1510            workdir,
1511            env,
1512            privileged,
1513            tty,
1514        )
1515    }
1516
1517    /// Run a one-off command in a new container for a service.
1518    ///
1519    /// Creates a new container based on the service configuration, runs the
1520    /// specified command, and optionally removes the container afterward.
1521    /// Similar to `docker-compose run`.
1522    ///
1523    /// Args:
1524    ///     service: Name of the service to run
1525    ///     command: Command to execute as a list (e.g., ["python", "script.py"]).
1526    ///              If not provided, uses the service's default command.
1527    ///     user: User to run the command as (optional)
1528    ///     workdir: Working directory inside the container (optional)
1529    ///     env: Additional environment variables as a list (e.g., ["VAR=value"])
1530    ///     rm: Remove the container after exit (default: True)
1531    ///     detach: Run container in the background (default: False)
1532    ///
1533    /// Returns:
1534    ///     dict: Result containing container_id and output (if not detached)
1535    ///
1536    /// Raises:
1537    ///     RuntimeError: If the service is not found in the compose file
1538    ///     RuntimeError: If container creation or execution fails
1539    #[pyo3(signature = (service, command=None, user=None, workdir=None, env=None, rm=None, detach=None))]
1540    pub fn run(
1541        &self,
1542        py: Python<'_>,
1543        service: &str,
1544        command: Option<Vec<String>>,
1545        user: Option<&str>,
1546        workdir: Option<&str>,
1547        env: Option<Vec<String>>,
1548        rm: Option<bool>,
1549        detach: Option<bool>,
1550    ) -> PyResult<Py<PyAny>> {
1551        let rm = rm.unwrap_or(true);
1552        let detach = detach.unwrap_or(false);
1553        let result = __compose_run(
1554            &self.docker,
1555            &self.compose,
1556            &self.project_name,
1557            service,
1558            command,
1559            user,
1560            workdir,
1561            env,
1562            rm,
1563            detach,
1564        )?;
1565        pythonize(py, &result)
1566            .map(|bound| bound.unbind())
1567            .map_err(|e| PyValueError::new_err(format!("Serialization error: {}", e)))
1568    }
1569}
1570
1571// Helper function to generate resource names with project prefix
1572fn resource_name(project_name: &str, resource: &str) -> String {
1573    format!("{}_{}", project_name, resource)
1574}
1575
1576// Helper to convert environment from compose format to Docker format
1577fn env_to_vec(env: &Option<EnvironmentVars>) -> Vec<String> {
1578    match env {
1579        Some(EnvironmentVars::List(list)) => list.clone(),
1580        Some(EnvironmentVars::Map(map)) => map
1581            .iter()
1582            .map(|(k, v)| match v {
1583                Some(StringOrInt::String(s)) => format!("{}={}", k, s),
1584                Some(StringOrInt::Int(i)) => format!("{}={}", k, i),
1585                None => k.clone(),
1586            })
1587            .collect(),
1588        None => vec![],
1589    }
1590}
1591
1592// Helper to convert command from compose format
1593fn command_to_vec(cmd: &Option<StringOrList>) -> Option<Vec<String>> {
1594    match cmd {
1595        Some(StringOrList::String(s)) => {
1596            Some(vec!["/bin/sh".to_string(), "-c".to_string(), s.clone()])
1597        }
1598        Some(StringOrList::List(list)) => Some(list.clone()),
1599        None => None,
1600    }
1601}
1602
1603// Helper to convert labels from compose format
1604fn labels_to_map(
1605    labels: &Option<Labels>,
1606    project_name: &str,
1607    service_name: &str,
1608) -> HashMap<String, String> {
1609    let mut result = HashMap::new();
1610    // Add compose project labels for tracking
1611    result.insert(
1612        "com.docker.compose.project".to_string(),
1613        project_name.to_string(),
1614    );
1615    result.insert(
1616        "com.docker.compose.service".to_string(),
1617        service_name.to_string(),
1618    );
1619
1620    match labels {
1621        Some(Labels::List(list)) => {
1622            for item in list {
1623                if let Some((k, v)) = item.split_once('=') {
1624                    result.insert(k.to_string(), v.to_string());
1625                }
1626            }
1627        }
1628        Some(Labels::Map(map)) => {
1629            for (k, v) in map {
1630                result.insert(k.clone(), v.clone());
1631            }
1632        }
1633        None => {}
1634    }
1635    result
1636}
1637
1638#[tokio::main]
1639async fn __compose_up(
1640    docker: &docker_api::Docker,
1641    compose: &Pyo3ComposeFile,
1642    project_name: &str,
1643) -> PyResult<ComposeUpResult> {
1644    let mut result = ComposeUpResult {
1645        networks: vec![],
1646        volumes: vec![],
1647        containers: vec![],
1648    };
1649
1650    // 1. Create networks
1651    let networks = Networks::new(docker.clone());
1652    for (name, network_config) in &compose.networks {
1653        let network_name = resource_name(project_name, name);
1654
1655        // Check if network already exists
1656        let existing: Vec<docker_api::models::Network> =
1657            networks
1658                .list(&Default::default())
1659                .await
1660                .map_err(|e| PyRuntimeError::new_err(format!("Failed to list networks: {}", e)))?;
1661
1662        if existing
1663            .iter()
1664            .any(|n| n.name.as_ref() == Some(&network_name))
1665        {
1666            result.networks.push(network_name);
1667            continue;
1668        }
1669
1670        let mut opts = NetworkCreateOpts::builder(&network_name);
1671
1672        if let Some(Some(config)) = network_config.as_ref().map(|c| Some(c)) {
1673            if let Some(driver) = &config.driver {
1674                opts = opts.driver(driver.as_str());
1675            }
1676            if let Some(internal) = config.internal {
1677                opts = opts.internal(internal);
1678            }
1679            if let Some(attachable) = config.attachable {
1680                opts = opts.attachable(attachable);
1681            }
1682        }
1683
1684        // Add project label
1685        opts = opts.labels([("com.docker.compose.project", project_name)]);
1686
1687        let network = networks.create(&opts.build()).await.map_err(|e| {
1688            PyRuntimeError::new_err(format!("Failed to create network {}: {}", network_name, e))
1689        })?;
1690
1691        result.networks.push(network.id().to_string());
1692    }
1693
1694    // If no networks defined, create default network
1695    if compose.networks.is_empty() {
1696        let default_network_name = format!("{}_default", project_name);
1697        let existing: Vec<docker_api::models::Network> =
1698            networks
1699                .list(&Default::default())
1700                .await
1701                .map_err(|e| PyRuntimeError::new_err(format!("Failed to list networks: {}", e)))?;
1702
1703        if !existing
1704            .iter()
1705            .any(|n| n.name.as_ref() == Some(&default_network_name))
1706        {
1707            let opts = NetworkCreateOpts::builder(&default_network_name)
1708                .labels([("com.docker.compose.project", project_name)])
1709                .build();
1710
1711            let network = networks.create(&opts).await.map_err(|e| {
1712                PyRuntimeError::new_err(format!("Failed to create default network: {}", e))
1713            })?;
1714
1715            result.networks.push(network.id().to_string());
1716        }
1717    }
1718
1719    // 2. Create volumes
1720    let volumes = Volumes::new(docker.clone());
1721    for (name, volume_config) in &compose.volumes {
1722        let volume_name = resource_name(project_name, name);
1723
1724        // Check if volume already exists
1725        let existing = volumes
1726            .list(&Default::default())
1727            .await
1728            .map_err(|e| PyRuntimeError::new_err(format!("Failed to list volumes: {}", e)))?;
1729
1730        if let Some(vols) = existing.volumes {
1731            if vols.iter().any(|v| v.name == volume_name) {
1732                result.volumes.push(volume_name);
1733                continue;
1734            }
1735        }
1736
1737        let mut opts = VolumeCreateOpts::builder();
1738        opts = opts.name(&volume_name);
1739
1740        if let Some(Some(config)) = volume_config.as_ref().map(|c| Some(c)) {
1741            if let Some(driver) = &config.driver {
1742                opts = opts.driver(driver.as_str());
1743            }
1744        }
1745
1746        // Add project label
1747        opts = opts.labels([("com.docker.compose.project", project_name)]);
1748
1749        volumes.create(&opts.build()).await.map_err(|e| {
1750            PyRuntimeError::new_err(format!("Failed to create volume {}: {}", volume_name, e))
1751        })?;
1752
1753        result.volumes.push(volume_name);
1754    }
1755
1756    // 3. Create and start containers (in order, respecting depends_on)
1757    let containers = Containers::new(docker.clone());
1758    let service_order = get_service_order(compose);
1759
1760    for service_name in &service_order {
1761        if let Some(service) = compose.services.get(service_name) {
1762            // Skip if no image specified and no build config
1763            let image = match &service.image {
1764                Some(img) => img.clone(),
1765                None => {
1766                    // If build is specified, we'd need to build - for now, skip
1767                    if service.build.is_some() {
1768                        continue; // Skip services that need building
1769                    }
1770                    continue;
1771                }
1772            };
1773
1774            let container_name = service
1775                .container_name
1776                .clone()
1777                .unwrap_or_else(|| resource_name(project_name, service_name));
1778
1779            // Check if container already exists
1780            let existing: Vec<docker_api::models::ContainerSummary> = containers
1781                .list(&ContainerListOpts::builder().all(true).build())
1782                .await
1783                .map_err(|e| {
1784                    PyRuntimeError::new_err(format!("Failed to list containers: {}", e))
1785                })?;
1786
1787            let existing_container = existing.iter().find(|c| {
1788                c.names.as_ref().map_or(false, |names| {
1789                    names
1790                        .iter()
1791                        .any(|n| n.trim_start_matches('/') == container_name)
1792                })
1793            });
1794
1795            if let Some(existing) = existing_container {
1796                // Container exists, just start it if not running
1797                if existing.state.as_ref() != Some(&"running".to_string()) {
1798                    let container = containers.get(existing.id.as_ref().unwrap());
1799                    container.start().await.map_err(|e| {
1800                        PyRuntimeError::new_err(format!(
1801                            "Failed to start container {}: {}",
1802                            container_name, e
1803                        ))
1804                    })?;
1805                }
1806                result
1807                    .containers
1808                    .push(existing.id.clone().unwrap_or_default());
1809                continue;
1810            }
1811
1812            // Build container create options
1813            let mut opts = ContainerCreateOpts::builder()
1814                .image(&image)
1815                .name(&container_name);
1816
1817            // Set labels for compose project tracking
1818            let labels = labels_to_map(&service.labels, project_name, service_name);
1819            let labels_ref: HashMap<&str, &str> = labels
1820                .iter()
1821                .map(|(k, v)| (k.as_str(), v.as_str()))
1822                .collect();
1823            opts = opts.labels(labels_ref);
1824
1825            // Set environment
1826            let env = env_to_vec(&service.environment);
1827            if !env.is_empty() {
1828                let env_refs: Vec<&str> = env.iter().map(|s| s.as_str()).collect();
1829                opts = opts.env(env_refs);
1830            }
1831
1832            // Set command
1833            if let Some(cmd) = command_to_vec(&service.command) {
1834                let cmd_refs: Vec<&str> = cmd.iter().map(|s| s.as_str()).collect();
1835                opts = opts.command(cmd_refs);
1836            }
1837
1838            // Set working directory
1839            if let Some(wd) = &service.working_dir {
1840                opts = opts.working_dir(wd.as_str());
1841            }
1842
1843            // Set user
1844            if let Some(user) = &service.user {
1845                opts = opts.user(user.as_str());
1846            }
1847
1848            // Set tty
1849            if let Some(tty) = service.tty {
1850                opts = opts.tty(tty);
1851            }
1852
1853            // Set stdin_open
1854            if let Some(stdin) = service.stdin_open {
1855                opts = opts.attach_stdin(stdin);
1856            }
1857
1858            // Set privileged
1859            if let Some(priv_mode) = service.privileged {
1860                opts = opts.privileged(priv_mode);
1861            }
1862
1863            // Set hostname
1864            if let Some(hostname) = &service.hostname {
1865                // Note: hostname is not directly supported by ContainerCreateOpts in the same way
1866                // We'll skip this for now
1867                let _ = hostname;
1868            }
1869
1870            // Set network mode - use project default network
1871            let default_network = format!("{}_default", project_name);
1872            opts = opts.network_mode(&default_network);
1873
1874            // Handle port mappings
1875            if let Some(ports) = &service.ports {
1876                for port_mapping in ports {
1877                    match port_mapping {
1878                        PortMapping::Simple(s) => {
1879                            // Parse "host:container" or "container"
1880                            if let Some((host_port, container_port)) = s.split_once(':') {
1881                                let container_port =
1882                                    container_port.split('/').next().unwrap_or(container_port);
1883                                if let (Ok(hp), Ok(cp)) =
1884                                    (host_port.parse::<u32>(), container_port.parse::<u32>())
1885                                {
1886                                    opts = opts.expose(PublishPort::tcp(cp), hp);
1887                                }
1888                            }
1889                        }
1890                        PortMapping::Int(p) => {
1891                            opts = opts.expose(PublishPort::tcp(*p as u32), *p as u32);
1892                        }
1893                        PortMapping::Full(config) => {
1894                            if let (Some(target), Some(published)) =
1895                                (&config.target, &config.published)
1896                            {
1897                                let host_port = match published {
1898                                    StringOrInt::String(s) => {
1899                                        s.parse::<u32>().unwrap_or(*target as u32)
1900                                    }
1901                                    StringOrInt::Int(i) => *i as u32,
1902                                };
1903                                let protocol = config.protocol.as_deref().unwrap_or("tcp");
1904                                let publish_port = match protocol {
1905                                    "udp" => PublishPort::udp(*target as u32),
1906                                    _ => PublishPort::tcp(*target as u32),
1907                                };
1908                                opts = opts.expose(publish_port, host_port);
1909                            }
1910                        }
1911                    }
1912                }
1913            }
1914
1915            // Handle volume mounts
1916            if let Some(vol_mounts) = &service.volumes {
1917                let mut volume_bindings: Vec<String> = vec![];
1918                for mount in vol_mounts {
1919                    match mount {
1920                        VolumeMount::Simple(s) => {
1921                            // Check if it's a named volume (starts with volume name from compose)
1922                            let parts: Vec<&str> = s.split(':').collect();
1923                            if parts.len() >= 2 {
1924                                let source = parts[0];
1925                                let target = parts[1];
1926                                let mode = parts.get(2).unwrap_or(&"rw");
1927
1928                                // Check if source is a named volume
1929                                if compose.volumes.contains_key(source) {
1930                                    let vol_name = resource_name(project_name, source);
1931                                    volume_bindings
1932                                        .push(format!("{}:{}:{}", vol_name, target, mode));
1933                                } else {
1934                                    // It's a bind mount
1935                                    volume_bindings.push(s.clone());
1936                                }
1937                            }
1938                        }
1939                        VolumeMount::Full(config) => {
1940                            if let (Some(source), Some(target)) = (&config.source, &config.target) {
1941                                let mode = if config.read_only.unwrap_or(false) {
1942                                    "ro"
1943                                } else {
1944                                    "rw"
1945                                };
1946
1947                                // Check if source is a named volume
1948                                if compose.volumes.contains_key(source) {
1949                                    let vol_name = resource_name(project_name, source);
1950                                    volume_bindings
1951                                        .push(format!("{}:{}:{}", vol_name, target, mode));
1952                                } else {
1953                                    volume_bindings.push(format!("{}:{}:{}", source, target, mode));
1954                                }
1955                            }
1956                        }
1957                    }
1958                }
1959                if !volume_bindings.is_empty() {
1960                    let vol_refs: Vec<&str> = volume_bindings.iter().map(|s| s.as_str()).collect();
1961                    opts = opts.volumes(vol_refs);
1962                }
1963            }
1964
1965            // Set restart policy
1966            if let Some(restart) = &service.restart {
1967                let (policy, retries) = match restart.as_str() {
1968                    "always" => ("always", 0u64),
1969                    "unless-stopped" => ("unless-stopped", 0u64),
1970                    "on-failure" => ("on-failure", 3u64),
1971                    _ => ("no", 0u64),
1972                };
1973                opts = opts.restart_policy(policy, retries);
1974            }
1975
1976            // Create the container
1977            let container = containers.create(&opts.build()).await.map_err(|e| {
1978                PyRuntimeError::new_err(format!(
1979                    "Failed to create container {}: {}",
1980                    container_name, e
1981                ))
1982            })?;
1983
1984            // Start the container
1985            container.start().await.map_err(|e| {
1986                PyRuntimeError::new_err(format!(
1987                    "Failed to start container {}: {}",
1988                    container_name, e
1989                ))
1990            })?;
1991
1992            result.containers.push(container.id().to_string());
1993        }
1994    }
1995
1996    Ok(result)
1997}
1998
1999#[tokio::main]
2000async fn __compose_down(
2001    docker: &docker_api::Docker,
2002    compose: &Pyo3ComposeFile,
2003    project_name: &str,
2004    remove_volumes: bool,
2005    remove_networks: bool,
2006    timeout: u64,
2007) -> PyResult<ComposeDownResult> {
2008    let mut result = ComposeDownResult {
2009        stopped_containers: vec![],
2010        removed_containers: vec![],
2011        removed_networks: vec![],
2012        removed_volumes: vec![],
2013    };
2014
2015    // 1. Stop and remove containers
2016    let containers = Containers::new(docker.clone());
2017    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2018        .list(&ContainerListOpts::builder().all(true).build())
2019        .await
2020        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2021
2022    // Find containers belonging to this project
2023    let project_containers: Vec<_> = container_list
2024        .iter()
2025        .filter(|c| {
2026            c.labels.as_ref().map_or(false, |labels| {
2027                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2028            })
2029        })
2030        .collect();
2031
2032    for container_info in project_containers {
2033        if let Some(id) = &container_info.id {
2034            let container = containers.get(id);
2035
2036            // Stop if running
2037            if container_info.state.as_ref() == Some(&"running".to_string()) {
2038                let stop_opts = ContainerStopOpts::builder()
2039                    .wait(std::time::Duration::from_secs(timeout))
2040                    .build();
2041
2042                if container.stop(&stop_opts).await.is_ok() {
2043                    result.stopped_containers.push(id.clone());
2044                }
2045            }
2046
2047            // Remove container
2048            if container.delete().await.is_ok() {
2049                result.removed_containers.push(id.clone());
2050            }
2051        }
2052    }
2053
2054    // 2. Remove networks (if requested)
2055    if remove_networks {
2056        let networks = Networks::new(docker.clone());
2057        let network_list: Vec<docker_api::models::Network> = networks
2058            .list(&Default::default())
2059            .await
2060            .map_err(|e| PyRuntimeError::new_err(format!("Failed to list networks: {}", e)))?;
2061
2062        // Find networks belonging to this project
2063        let project_networks: Vec<_> = network_list
2064            .iter()
2065            .filter(|n| {
2066                n.labels.as_ref().map_or(false, |labels| {
2067                    labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2068                })
2069            })
2070            .collect();
2071
2072        for network_info in project_networks {
2073            if let Some(id) = &network_info.id {
2074                let network = networks.get(id);
2075                if network.delete().await.is_ok() {
2076                    result.removed_networks.push(id.clone());
2077                }
2078            }
2079        }
2080    }
2081
2082    // 3. Remove volumes (if requested)
2083    if remove_volumes {
2084        let volumes = Volumes::new(docker.clone());
2085
2086        for name in compose.volumes.keys() {
2087            let volume_name = resource_name(project_name, name);
2088            let volume = volumes.get(&volume_name);
2089            if volume.delete().await.is_ok() {
2090                result.removed_volumes.push(volume_name);
2091            }
2092        }
2093    }
2094
2095    Ok(result)
2096}
2097
2098#[tokio::main]
2099async fn __compose_ps(docker: &docker_api::Docker, project_name: &str) -> PyResult<Vec<String>> {
2100    let containers = Containers::new(docker.clone());
2101    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2102        .list(&ContainerListOpts::builder().all(true).build())
2103        .await
2104        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2105
2106    let project_containers: Vec<String> = container_list
2107        .iter()
2108        .filter(|c| {
2109            c.labels.as_ref().map_or(false, |labels| {
2110                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2111            })
2112        })
2113        .filter_map(|c| c.id.clone())
2114        .collect();
2115
2116    Ok(project_containers)
2117}
2118
2119#[tokio::main]
2120async fn __compose_start(docker: &docker_api::Docker, project_name: &str) -> PyResult<Vec<String>> {
2121    let containers = Containers::new(docker.clone());
2122    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2123        .list(&ContainerListOpts::builder().all(true).build())
2124        .await
2125        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2126
2127    // Find containers belonging to this project that are stopped
2128    let project_containers: Vec<_> = container_list
2129        .iter()
2130        .filter(|c| {
2131            c.labels.as_ref().map_or(false, |labels| {
2132                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2133            })
2134        })
2135        .filter(|c| c.state.as_ref() != Some(&"running".to_string()))
2136        .collect();
2137
2138    let mut started = Vec::new();
2139
2140    for container_info in project_containers {
2141        if let Some(id) = &container_info.id {
2142            let container = containers.get(id);
2143            if container.start().await.is_ok() {
2144                started.push(id.clone());
2145            }
2146        }
2147    }
2148
2149    Ok(started)
2150}
2151
2152#[tokio::main]
2153async fn __compose_stop(
2154    docker: &docker_api::Docker,
2155    project_name: &str,
2156    timeout: u64,
2157) -> PyResult<Vec<String>> {
2158    let containers = Containers::new(docker.clone());
2159    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2160        .list(&ContainerListOpts::builder().all(true).build())
2161        .await
2162        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2163
2164    // Find running containers belonging to this project
2165    let project_containers: Vec<_> = container_list
2166        .iter()
2167        .filter(|c| {
2168            c.labels.as_ref().map_or(false, |labels| {
2169                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2170            })
2171        })
2172        .filter(|c| c.state.as_ref() == Some(&"running".to_string()))
2173        .collect();
2174
2175    let mut stopped = Vec::new();
2176
2177    for container_info in project_containers {
2178        if let Some(id) = &container_info.id {
2179            let container = containers.get(id);
2180            let stop_opts = ContainerStopOpts::builder()
2181                .wait(std::time::Duration::from_secs(timeout))
2182                .build();
2183
2184            if container.stop(&stop_opts).await.is_ok() {
2185                stopped.push(id.clone());
2186            }
2187        }
2188    }
2189
2190    Ok(stopped)
2191}
2192
2193#[tokio::main]
2194async fn __compose_restart(
2195    docker: &docker_api::Docker,
2196    project_name: &str,
2197    timeout: u64,
2198) -> PyResult<Vec<String>> {
2199    let containers = Containers::new(docker.clone());
2200    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2201        .list(&ContainerListOpts::builder().all(true).build())
2202        .await
2203        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2204
2205    // Find containers belonging to this project
2206    let project_containers: Vec<_> = container_list
2207        .iter()
2208        .filter(|c| {
2209            c.labels.as_ref().map_or(false, |labels| {
2210                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2211            })
2212        })
2213        .collect();
2214
2215    let mut restarted = Vec::new();
2216
2217    for container_info in project_containers {
2218        if let Some(id) = &container_info.id {
2219            let container = containers.get(id);
2220            let restart_opts = ContainerRestartOpts::builder()
2221                .wait(std::time::Duration::from_secs(timeout))
2222                .build();
2223
2224            if container.restart(&restart_opts).await.is_ok() {
2225                restarted.push(id.clone());
2226            }
2227        }
2228    }
2229
2230    Ok(restarted)
2231}
2232
2233#[tokio::main]
2234async fn __compose_pause(docker: &docker_api::Docker, project_name: &str) -> PyResult<Vec<String>> {
2235    let containers = Containers::new(docker.clone());
2236    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2237        .list(&ContainerListOpts::builder().all(true).build())
2238        .await
2239        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2240
2241    // Find running containers belonging to this project
2242    let project_containers: Vec<_> = container_list
2243        .iter()
2244        .filter(|c| {
2245            c.labels.as_ref().map_or(false, |labels| {
2246                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2247            })
2248        })
2249        .filter(|c| c.state.as_ref() == Some(&"running".to_string()))
2250        .collect();
2251
2252    let mut paused = Vec::new();
2253
2254    for container_info in project_containers {
2255        if let Some(id) = &container_info.id {
2256            let container = containers.get(id);
2257            if container.pause().await.is_ok() {
2258                paused.push(id.clone());
2259            }
2260        }
2261    }
2262
2263    Ok(paused)
2264}
2265
2266#[tokio::main]
2267async fn __compose_unpause(
2268    docker: &docker_api::Docker,
2269    project_name: &str,
2270) -> PyResult<Vec<String>> {
2271    let containers = Containers::new(docker.clone());
2272    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2273        .list(&ContainerListOpts::builder().all(true).build())
2274        .await
2275        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2276
2277    // Find paused containers belonging to this project
2278    let project_containers: Vec<_> = container_list
2279        .iter()
2280        .filter(|c| {
2281            c.labels.as_ref().map_or(false, |labels| {
2282                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2283            })
2284        })
2285        .filter(|c| c.state.as_ref() == Some(&"paused".to_string()))
2286        .collect();
2287
2288    let mut unpaused = Vec::new();
2289
2290    for container_info in project_containers {
2291        if let Some(id) = &container_info.id {
2292            let container = containers.get(id);
2293            if container.unpause().await.is_ok() {
2294                unpaused.push(id.clone());
2295            }
2296        }
2297    }
2298
2299    Ok(unpaused)
2300}
2301
2302#[tokio::main]
2303async fn __compose_pull(
2304    docker: &docker_api::Docker,
2305    compose: &Pyo3ComposeFile,
2306) -> PyResult<Vec<String>> {
2307    use futures_util::StreamExt;
2308
2309    let images = Images::new(docker.clone());
2310    let mut pulled = Vec::new();
2311
2312    for (_service_name, service) in &compose.services {
2313        // Only pull if there's an image field (not just build)
2314        if let Some(image) = &service.image {
2315            let pull_opts = PullOpts::builder()
2316                .image(image.as_str())
2317                .auth(RegistryAuth::builder().build())
2318                .build();
2319
2320            let mut stream = images.pull(&pull_opts);
2321            let mut success = true;
2322
2323            while let Some(result) = stream.next().await {
2324                if result.is_err() {
2325                    success = false;
2326                    break;
2327                }
2328            }
2329
2330            if success {
2331                pulled.push(image.clone());
2332            }
2333        }
2334    }
2335
2336    Ok(pulled)
2337}
2338
2339#[tokio::main]
2340async fn __compose_build(
2341    docker: &docker_api::Docker,
2342    compose: &Pyo3ComposeFile,
2343    project_name: &str,
2344    no_cache: bool,
2345    pull: bool,
2346) -> PyResult<Vec<String>> {
2347    use futures_util::StreamExt;
2348
2349    let images = Images::new(docker.clone());
2350    let mut built = Vec::new();
2351
2352    for (service_name, service) in &compose.services {
2353        // Only build if there's a build field
2354        if let Some(build_config) = &service.build {
2355            let build_context = match build_config {
2356                ComposeBuild::Simple(path) => path.clone(),
2357                ComposeBuild::Full(config) => {
2358                    config.context.clone().unwrap_or_else(|| ".".to_string())
2359                }
2360            };
2361
2362            let dockerfile = match build_config {
2363                ComposeBuild::Simple(_) => None,
2364                ComposeBuild::Full(config) => config.dockerfile.clone(),
2365            };
2366
2367            // Determine the tag for the built image
2368            let tag = service
2369                .image
2370                .clone()
2371                .unwrap_or_else(|| format!("{}_{}", project_name, service_name));
2372
2373            let mut build_opts = ImageBuildOpts::builder(&build_context);
2374            build_opts = build_opts.tag(&tag);
2375
2376            if let Some(df) = &dockerfile {
2377                build_opts = build_opts.dockerfile(df);
2378            }
2379
2380            if no_cache {
2381                build_opts = build_opts.nocahe(true);
2382            }
2383
2384            if pull {
2385                build_opts = build_opts.pull("true");
2386            }
2387
2388            let mut stream = images.build(&build_opts.build());
2389            let mut success = true;
2390
2391            while let Some(result) = stream.next().await {
2392                if result.is_err() {
2393                    success = false;
2394                    break;
2395                }
2396            }
2397
2398            if success {
2399                built.push(service_name.clone());
2400            }
2401        }
2402    }
2403
2404    Ok(built)
2405}
2406
2407#[tokio::main]
2408async fn __compose_push(
2409    docker: &docker_api::Docker,
2410    compose: &Pyo3ComposeFile,
2411) -> PyResult<Vec<String>> {
2412    let images = Images::new(docker.clone());
2413    let mut pushed = Vec::new();
2414
2415    for (_service_name, service) in &compose.services {
2416        // Only push if there's an image field
2417        if let Some(image_name) = &service.image {
2418            let image = images.get(image_name);
2419            let push_opts = ImagePushOpts::builder()
2420                .auth(RegistryAuth::builder().build())
2421                .build();
2422
2423            if image.push(&push_opts).await.is_ok() {
2424                pushed.push(image_name.clone());
2425            }
2426        }
2427    }
2428
2429    Ok(pushed)
2430}
2431
2432/// Container info for ps_detailed
2433#[derive(Debug, Clone, Serialize, Deserialize)]
2434struct ContainerInfo {
2435    id: String,
2436    name: String,
2437    service: String,
2438    state: String,
2439    status: String,
2440    image: String,
2441}
2442
2443#[tokio::main]
2444async fn __compose_ps_detailed(
2445    docker: &docker_api::Docker,
2446    project_name: &str,
2447) -> PyResult<Vec<ContainerInfo>> {
2448    let containers = Containers::new(docker.clone());
2449    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2450        .list(&ContainerListOpts::builder().all(true).build())
2451        .await
2452        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2453
2454    let project_containers: Vec<ContainerInfo> = container_list
2455        .iter()
2456        .filter(|c| {
2457            c.labels.as_ref().map_or(false, |labels| {
2458                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2459            })
2460        })
2461        .map(|c| {
2462            let service = c
2463                .labels
2464                .as_ref()
2465                .and_then(|l| l.get("com.docker.compose.service"))
2466                .cloned()
2467                .unwrap_or_default();
2468
2469            let name = c
2470                .names
2471                .as_ref()
2472                .and_then(|n| n.first())
2473                .map(|n| n.trim_start_matches('/').to_string())
2474                .unwrap_or_default();
2475
2476            ContainerInfo {
2477                id: c.id.clone().unwrap_or_default(),
2478                name,
2479                service,
2480                state: c.state.clone().unwrap_or_default(),
2481                status: c.status.clone().unwrap_or_default(),
2482                image: c.image.clone().unwrap_or_default(),
2483            }
2484        })
2485        .collect();
2486
2487    Ok(project_containers)
2488}
2489
2490#[tokio::main]
2491async fn __compose_logs(
2492    docker: &docker_api::Docker,
2493    project_name: &str,
2494    service_filter: Option<&str>,
2495    tail: Option<usize>,
2496    timestamps: bool,
2497) -> PyResult<HashMap<String, String>> {
2498    use futures_util::StreamExt;
2499
2500    let containers = Containers::new(docker.clone());
2501    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2502        .list(&ContainerListOpts::builder().all(true).build())
2503        .await
2504        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2505
2506    let project_containers: Vec<_> = container_list
2507        .iter()
2508        .filter(|c| {
2509            c.labels.as_ref().map_or(false, |labels| {
2510                let matches_project =
2511                    labels.get("com.docker.compose.project") == Some(&project_name.to_string());
2512                let matches_service = service_filter.map_or(true, |svc| {
2513                    labels.get("com.docker.compose.service") == Some(&svc.to_string())
2514                });
2515                matches_project && matches_service
2516            })
2517        })
2518        .collect();
2519
2520    let mut logs_map: HashMap<String, String> = HashMap::new();
2521
2522    for container_info in project_containers {
2523        if let Some(id) = &container_info.id {
2524            let container = containers.get(id);
2525
2526            let mut log_opts = LogsOpts::builder();
2527            log_opts = log_opts.stdout(true);
2528            log_opts = log_opts.stderr(true);
2529
2530            if let Some(n) = tail {
2531                log_opts = log_opts.n_lines(n);
2532            }
2533
2534            if timestamps {
2535                log_opts = log_opts.timestamps(true);
2536            }
2537
2538            let log_stream = container.logs(&log_opts.build());
2539            let log_chunks: Vec<Vec<u8>> = log_stream
2540                .map(|chunk| match chunk {
2541                    Ok(chunk) => chunk.to_vec(),
2542                    Err(_) => vec![],
2543                })
2544                .collect()
2545                .await;
2546
2547            let log_bytes: Vec<u8> = log_chunks.into_iter().flatten().collect();
2548            let log_str = String::from_utf8_lossy(&log_bytes).to_string();
2549
2550            let name = container_info
2551                .names
2552                .as_ref()
2553                .and_then(|n| n.first())
2554                .map(|n| n.trim_start_matches('/').to_string())
2555                .unwrap_or_else(|| id.clone());
2556
2557            logs_map.insert(name, log_str);
2558        }
2559    }
2560
2561    Ok(logs_map)
2562}
2563
2564#[tokio::main]
2565async fn __compose_top(
2566    docker: &docker_api::Docker,
2567    project_name: &str,
2568    ps_args: Option<&str>,
2569) -> PyResult<HashMap<String, serde_json::Value>> {
2570    let containers = Containers::new(docker.clone());
2571    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2572        .list(&ContainerListOpts::builder().all(true).build())
2573        .await
2574        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2575
2576    // Find running containers belonging to this project
2577    let project_containers: Vec<_> = container_list
2578        .iter()
2579        .filter(|c| {
2580            c.labels.as_ref().map_or(false, |labels| {
2581                labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2582            })
2583        })
2584        .filter(|c| c.state.as_ref() == Some(&"running".to_string()))
2585        .collect();
2586
2587    let mut top_map: HashMap<String, serde_json::Value> = HashMap::new();
2588
2589    for container_info in project_containers {
2590        if let Some(id) = &container_info.id {
2591            let container = containers.get(id);
2592
2593            if let Ok(top_result) = container.top(ps_args).await {
2594                let name = container_info
2595                    .names
2596                    .as_ref()
2597                    .and_then(|n| n.first())
2598                    .map(|n| n.trim_start_matches('/').to_string())
2599                    .unwrap_or_else(|| id.clone());
2600
2601                // Convert ContainerTop200Response to serde_json::Value
2602                let value = serde_json::json!({
2603                    "titles": top_result.titles,
2604                    "processes": top_result.processes
2605                });
2606
2607                top_map.insert(name, value);
2608            }
2609        }
2610    }
2611
2612    Ok(top_map)
2613}
2614
2615/// Result of a compose run operation
2616#[derive(Debug, Clone, Serialize)]
2617pub struct ComposeRunResult {
2618    /// ID of the created container
2619    pub container_id: String,
2620    /// Output from the command (if not detached)
2621    pub output: Option<String>,
2622    /// Exit code (if waited for)
2623    pub exit_code: Option<i64>,
2624}
2625
2626#[tokio::main]
2627async fn __compose_exec(
2628    docker: &docker_api::Docker,
2629    project_name: &str,
2630    service: &str,
2631    command: Vec<String>,
2632    user: Option<&str>,
2633    workdir: Option<&str>,
2634    env: Option<Vec<String>>,
2635    privileged: bool,
2636    tty: bool,
2637) -> PyResult<String> {
2638    use futures_util::StreamExt;
2639
2640    let containers = Containers::new(docker.clone());
2641    let container_list: Vec<docker_api::models::ContainerSummary> = containers
2642        .list(&ContainerListOpts::builder().all(true).build())
2643        .await
2644        .map_err(|e| PyRuntimeError::new_err(format!("Failed to list containers: {}", e)))?;
2645
2646    // Find a running container for the specified service
2647    let service_container = container_list.iter().find(|c| {
2648        c.labels.as_ref().map_or(false, |labels| {
2649            labels.get("com.docker.compose.project") == Some(&project_name.to_string())
2650                && labels.get("com.docker.compose.service") == Some(&service.to_string())
2651        }) && c.state.as_ref() == Some(&"running".to_string())
2652    });
2653
2654    let container_info = service_container.ok_or_else(|| {
2655        PyRuntimeError::new_err(format!(
2656            "No running container found for service '{}' in project '{}'",
2657            service, project_name
2658        ))
2659    })?;
2660
2661    let container_id = container_info
2662        .id
2663        .as_ref()
2664        .ok_or_else(|| PyRuntimeError::new_err("Container ID not found".to_string()))?;
2665
2666    let container = containers.get(container_id);
2667
2668    // Build exec options
2669    let cmd_refs: Vec<&str> = command.iter().map(|s| s.as_str()).collect();
2670    let mut exec_opts = ExecCreateOpts::builder()
2671        .command(cmd_refs)
2672        .attach_stdout(true)
2673        .attach_stderr(true)
2674        .privileged(privileged)
2675        .tty(tty);
2676
2677    if let Some(u) = user {
2678        exec_opts = exec_opts.user(u);
2679    }
2680
2681    if let Some(wd) = workdir {
2682        exec_opts = exec_opts.working_dir(wd);
2683    }
2684
2685    if let Some(env_vars) = &env {
2686        let env_refs: Vec<&str> = env_vars.iter().map(|s| s.as_str()).collect();
2687        exec_opts = exec_opts.env(env_refs);
2688    }
2689
2690    // Create and start the exec instance
2691    let start_opts = ExecStartOpts::builder().build();
2692    let mut multiplexer = container
2693        .exec(&exec_opts.build(), &start_opts)
2694        .await
2695        .map_err(|e| PyRuntimeError::new_err(format!("Failed to exec command: {}", e)))?;
2696
2697    // Collect output
2698    let mut output = Vec::new();
2699    while let Some(chunk_result) = multiplexer.next().await {
2700        match chunk_result {
2701            Ok(chunk) => {
2702                output.extend_from_slice(&chunk.to_vec());
2703            }
2704            Err(_) => break,
2705        }
2706    }
2707
2708    Ok(String::from_utf8_lossy(&output).to_string())
2709}
2710
2711#[tokio::main]
2712async fn __compose_run(
2713    docker: &docker_api::Docker,
2714    compose: &Pyo3ComposeFile,
2715    project_name: &str,
2716    service: &str,
2717    command: Option<Vec<String>>,
2718    user: Option<&str>,
2719    workdir: Option<&str>,
2720    env: Option<Vec<String>>,
2721    rm: bool,
2722    detach: bool,
2723) -> PyResult<ComposeRunResult> {
2724    use futures_util::StreamExt;
2725
2726    // Get the service configuration
2727    let service_config = compose.services.get(service).ok_or_else(|| {
2728        PyRuntimeError::new_err(format!("Service '{}' not found in compose file", service))
2729    })?;
2730
2731    // Determine the image to use
2732    let image = service_config.image.as_ref().ok_or_else(|| {
2733        PyRuntimeError::new_err(format!(
2734            "Service '{}' does not have an image specified",
2735            service
2736        ))
2737    })?;
2738
2739    // Generate a unique container name for the run
2740    let container_name = format!(
2741        "{}_{}_run_{}",
2742        project_name,
2743        service,
2744        std::time::SystemTime::now()
2745            .duration_since(std::time::UNIX_EPOCH)
2746            .unwrap()
2747            .as_millis()
2748    );
2749
2750    let containers = Containers::new(docker.clone());
2751
2752    // Build container create options
2753    let mut opts = ContainerCreateOpts::builder()
2754        .image(image)
2755        .name(&container_name)
2756        .auto_remove(rm);
2757
2758    // Set labels for compose project tracking
2759    let mut labels = HashMap::new();
2760    labels.insert(
2761        "com.docker.compose.project".to_string(),
2762        project_name.to_string(),
2763    );
2764    labels.insert(
2765        "com.docker.compose.service".to_string(),
2766        service.to_string(),
2767    );
2768    labels.insert("com.docker.compose.oneoff".to_string(), "True".to_string());
2769    let labels_ref: HashMap<&str, &str> = labels
2770        .iter()
2771        .map(|(k, v)| (k.as_str(), v.as_str()))
2772        .collect();
2773    opts = opts.labels(labels_ref);
2774
2775    // Set command - use provided command or service default
2776    let cmd = command.or_else(|| command_to_vec(&service_config.command));
2777    if let Some(c) = &cmd {
2778        let cmd_refs: Vec<&str> = c.iter().map(|s| s.as_str()).collect();
2779        opts = opts.command(cmd_refs);
2780    }
2781
2782    // Set environment - combine service env with provided env
2783    let mut all_env = env_to_vec(&service_config.environment);
2784    if let Some(additional_env) = &env {
2785        all_env.extend(additional_env.iter().cloned());
2786    }
2787    if !all_env.is_empty() {
2788        let env_refs: Vec<&str> = all_env.iter().map(|s| s.as_str()).collect();
2789        opts = opts.env(env_refs);
2790    }
2791
2792    // Set user
2793    if let Some(u) = user {
2794        opts = opts.user(u);
2795    } else if let Some(u) = &service_config.user {
2796        opts = opts.user(u.as_str());
2797    }
2798
2799    // Set working directory
2800    if let Some(wd) = workdir {
2801        opts = opts.working_dir(wd);
2802    } else if let Some(wd) = &service_config.working_dir {
2803        opts = opts.working_dir(wd.as_str());
2804    }
2805
2806    // Set tty if configured
2807    if let Some(tty) = service_config.tty {
2808        opts = opts.tty(tty);
2809    }
2810
2811    // Set stdin_open if configured
2812    if let Some(stdin) = service_config.stdin_open {
2813        opts = opts.attach_stdin(stdin);
2814    }
2815
2816    // Set network mode - use project default network
2817    let default_network = format!("{}_default", project_name);
2818    opts = opts.network_mode(&default_network);
2819
2820    // Create the container
2821    let container = containers.create(&opts.build()).await.map_err(|e| {
2822        PyRuntimeError::new_err(format!("Failed to create container for run: {}", e))
2823    })?;
2824
2825    let container_id = container.id().to_string();
2826
2827    // Start the container
2828    container
2829        .start()
2830        .await
2831        .map_err(|e| PyRuntimeError::new_err(format!("Failed to start run container: {}", e)))?;
2832
2833    if detach {
2834        // Return immediately for detached mode
2835        return Ok(ComposeRunResult {
2836            container_id,
2837            output: None,
2838            exit_code: None,
2839        });
2840    }
2841
2842    // Wait for the container and collect output
2843    let log_opts = LogsOpts::builder()
2844        .stdout(true)
2845        .stderr(true)
2846        .follow(true)
2847        .build();
2848
2849    let log_stream = container.logs(&log_opts);
2850    let log_chunks: Vec<Vec<u8>> = log_stream
2851        .map(|chunk| match chunk {
2852            Ok(chunk) => chunk.to_vec(),
2853            Err(_) => vec![],
2854        })
2855        .collect()
2856        .await;
2857
2858    let log_bytes: Vec<u8> = log_chunks.into_iter().flatten().collect();
2859    let output = String::from_utf8_lossy(&log_bytes).to_string();
2860
2861    // Wait for container to finish
2862    let wait_result = container.wait().await.ok();
2863    let exit_code = wait_result.map(|r| r.status_code);
2864
2865    // Container is auto-removed if rm=true, otherwise leave it
2866    // auto_remove handles cleanup automatically
2867
2868    Ok(ComposeRunResult {
2869        container_id,
2870        output: Some(output),
2871        exit_code,
2872    })
2873}
2874
2875/// Get services in dependency order (topological sort)
2876fn get_service_order(compose: &Pyo3ComposeFile) -> Vec<String> {
2877    let mut result = Vec::new();
2878    let mut visited = std::collections::HashSet::new();
2879
2880    fn visit(
2881        name: &str,
2882        compose: &Pyo3ComposeFile,
2883        visited: &mut std::collections::HashSet<String>,
2884        result: &mut Vec<String>,
2885    ) {
2886        if visited.contains(name) {
2887            return;
2888        }
2889        visited.insert(name.to_string());
2890
2891        // Visit dependencies first
2892        if let Some(service) = compose.services.get(name) {
2893            if let Some(depends) = &service.depends_on {
2894                match depends {
2895                    DependsOn::List(deps) => {
2896                        for dep in deps {
2897                            visit(dep, compose, visited, result);
2898                        }
2899                    }
2900                    DependsOn::Map(deps) => {
2901                        for dep in deps.keys() {
2902                            visit(dep, compose, visited, result);
2903                        }
2904                    }
2905                }
2906            }
2907        }
2908
2909        result.push(name.to_string());
2910    }
2911
2912    for name in compose.services.keys() {
2913        visit(name, compose, &mut visited, &mut result);
2914    }
2915
2916    result
2917}