freenet_test_network/
docker.rs

1//! Docker-based NAT simulation backend for testing Freenet in isolated networks.
2//!
3//! This module provides infrastructure to run Freenet peers in Docker containers
4//! behind simulated NAT routers, allowing detection of bugs that only manifest
5//! when peers are on different networks.
6
7use crate::{logs::LogEntry, process::PeerProcess, Error, Result};
8use bollard::{
9    container::{
10        Config, CreateContainerOptions, LogOutput, LogsOptions, RemoveContainerOptions,
11        StartContainerOptions, StopContainerOptions, UploadToContainerOptions,
12    },
13    exec::{CreateExecOptions, StartExecResults},
14    image::BuildImageOptions,
15    network::CreateNetworkOptions,
16    secret::{ContainerStateStatusEnum, HostConfig, Ipam, IpamConfig, PortBinding},
17    Docker,
18};
19use futures::StreamExt;
20use ipnetwork::Ipv4Network;
21use rand::Rng;
22use std::{
23    collections::HashMap,
24    net::Ipv4Addr,
25    path::{Path, PathBuf},
26    time::Duration,
27};
28
29/// Configuration for Docker NAT simulation
30#[derive(Debug, Clone)]
31pub struct DockerNatConfig {
32    /// NAT topology configuration
33    pub topology: NatTopology,
34    /// Base subnet for public network (gateway network)
35    pub public_subnet: Ipv4Network,
36    /// Base for private network subnets (each NAT gets one)
37    pub private_subnet_base: Ipv4Addr,
38    /// Whether to remove containers on drop
39    pub cleanup_on_drop: bool,
40    /// Prefix for container and network names
41    pub name_prefix: String,
42}
43
44impl Default for DockerNatConfig {
45    fn default() -> Self {
46        // Generate timestamp-based prefix for easier identification of stale resources
47        let timestamp = chrono::Utc::now().format("%Y%m%d-%H%M%S").to_string();
48        let random_id = rand::thread_rng().gen::<u16>();
49        let name_prefix = format!("freenet-nat-{}-{}", timestamp, random_id);
50
51        // Randomize the second octet (16-31) to avoid subnet overlap when running
52        // multiple tests sequentially. Docker cannot create networks with overlapping
53        // subnets, so each test run needs a unique subnet range.
54        // Using 172.16.0.0/12 private range: 172.16-31.x.x
55        // Use /16 subnet to allow peers in different /24s (different ring locations)
56        let second_octet = rand::thread_rng().gen_range(16..=31);
57        let public_subnet = format!("172.{}.0.0/16", second_octet).parse().unwrap();
58
59        // Also randomize the private subnet base to avoid conflicts
60        // Using 10.x.0.0 range with random first octet portion
61        let private_first_octet = rand::thread_rng().gen_range(1..=250);
62
63        Self {
64            topology: NatTopology::OnePerNat,
65            public_subnet,
66            private_subnet_base: Ipv4Addr::new(10, private_first_octet, 0, 0),
67            cleanup_on_drop: true,
68            name_prefix,
69        }
70    }
71}
72
73/// How peers are distributed across NAT networks
74#[derive(Debug, Clone)]
75pub enum NatTopology {
76    /// Each peer (except gateways) gets its own NAT network
77    OnePerNat,
78    /// Specific assignment of peers to NAT networks
79    Custom(Vec<NatNetwork>),
80}
81
82/// A NAT network containing one or more peers
83#[derive(Debug, Clone)]
84pub struct NatNetwork {
85    pub name: String,
86    pub peer_indices: Vec<usize>,
87    pub nat_type: NatType,
88}
89
90/// Type of NAT simulation
91#[derive(Debug, Clone, Default)]
92pub enum NatType {
93    /// Outbound MASQUERADE only - most common residential NAT
94    #[default]
95    RestrictedCone,
96    /// MASQUERADE + port forwarding for specified ports
97    FullCone { forwarded_ports: Option<Vec<u16>> },
98}
99
100/// Manages Docker resources for NAT simulation
101pub struct DockerNatBackend {
102    docker: Docker,
103    config: DockerNatConfig,
104    /// Network IDs created by this backend
105    networks: Vec<String>,
106    /// Container IDs created by this backend (NAT routers + peers)
107    containers: Vec<String>,
108    /// Mapping from peer index to container info
109    peer_containers: HashMap<usize, DockerPeerInfo>,
110    /// ID of the public network
111    public_network_id: Option<String>,
112}
113
114/// Information about a peer running in a Docker container
115#[derive(Debug, Clone)]
116pub struct DockerPeerInfo {
117    pub container_id: String,
118    pub container_name: String,
119    /// IP address on private network (behind NAT)
120    pub private_ip: Ipv4Addr,
121    /// IP address on public network (for gateways) or NAT router's public IP (for peers)
122    pub public_ip: Ipv4Addr,
123    /// Port mapped to host for WebSocket API access
124    pub host_ws_port: u16,
125    /// Network port inside container
126    pub network_port: u16,
127    /// Whether this is a gateway (not behind NAT)
128    pub is_gateway: bool,
129    /// NAT router container ID (None for gateways)
130    pub nat_router_id: Option<String>,
131}
132
133/// A peer process running in a Docker container
134pub struct DockerProcess {
135    docker: Docker,
136    container_id: String,
137    container_name: String,
138    local_log_cache: PathBuf,
139}
140
141impl PeerProcess for DockerProcess {
142    fn is_running(&self) -> bool {
143        // Use blocking runtime to check container status
144        let docker = self.docker.clone();
145        let id = self.container_id.clone();
146
147        tokio::task::block_in_place(|| {
148            tokio::runtime::Handle::current().block_on(async {
149                match docker.inspect_container(&id, None).await {
150                    Ok(info) => info
151                        .state
152                        .and_then(|s| s.status)
153                        .map(|s| s == ContainerStateStatusEnum::RUNNING)
154                        .unwrap_or(false),
155                    Err(_) => false,
156                }
157            })
158        })
159    }
160
161    fn kill(&mut self) -> Result<()> {
162        let docker = self.docker.clone();
163        let id = self.container_id.clone();
164
165        tokio::task::block_in_place(|| {
166            tokio::runtime::Handle::current().block_on(async {
167                // Stop container with timeout
168                let _ = docker
169                    .stop_container(&id, Some(StopContainerOptions { t: 5 }))
170                    .await;
171                Ok(())
172            })
173        })
174    }
175
176    fn log_path(&self) -> PathBuf {
177        self.local_log_cache.clone()
178    }
179
180    fn read_logs(&self) -> Result<Vec<LogEntry>> {
181        let docker = self.docker.clone();
182        let id = self.container_id.clone();
183        let cache_path = self.local_log_cache.clone();
184
185        tokio::task::block_in_place(|| {
186            tokio::runtime::Handle::current().block_on(async {
187                // Fetch logs from container
188                let options = LogsOptions::<String> {
189                    stdout: true,
190                    stderr: true,
191                    timestamps: true,
192                    ..Default::default()
193                };
194
195                let mut logs = docker.logs(&id, Some(options));
196                let mut log_content = String::new();
197
198                while let Some(log_result) = logs.next().await {
199                    match log_result {
200                        Ok(LogOutput::StdOut { message }) | Ok(LogOutput::StdErr { message }) => {
201                            log_content.push_str(&String::from_utf8_lossy(&message));
202                        }
203                        _ => {}
204                    }
205                }
206
207                // Write to cache file
208                if let Some(parent) = cache_path.parent() {
209                    std::fs::create_dir_all(parent)?;
210                }
211                std::fs::write(&cache_path, &log_content)?;
212
213                // Parse logs
214                crate::logs::read_log_file(&cache_path)
215            })
216        })
217    }
218}
219
220impl Drop for DockerProcess {
221    fn drop(&mut self) {
222        let _ = self.kill();
223    }
224}
225
226impl DockerNatBackend {
227    /// Create a new Docker NAT backend
228    pub async fn new(config: DockerNatConfig) -> Result<Self> {
229        let docker = Docker::connect_with_local_defaults()
230            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to connect to Docker: {}", e)))?;
231
232        // Verify Docker is accessible
233        docker
234            .ping()
235            .await
236            .map_err(|e| Error::Other(anyhow::anyhow!("Docker ping failed: {}", e)))?;
237
238        // Clean up stale resources before creating new ones.
239        // Use a short max_age (10 seconds) to remove resources from previous test runs
240        // while preserving any resources created in the current session. This prevents
241        // "Pool overlaps with other one on this address space" errors when tests
242        // run sequentially in the same process.
243        Self::cleanup_stale_resources(&docker, Duration::from_secs(10)).await?;
244
245        Ok(Self {
246            docker,
247            config,
248            networks: Vec::new(),
249            containers: Vec::new(),
250            peer_containers: HashMap::new(),
251            public_network_id: None,
252        })
253    }
254
255    /// Clean up stale Docker resources older than the specified duration
256    ///
257    /// This removes containers and networks matching the "freenet-nat-" prefix
258    /// that are older than `max_age`. Pass `Duration::ZERO` to clean up ALL
259    /// matching resources regardless of age.
260    async fn cleanup_stale_resources(docker: &Docker, max_age: Duration) -> Result<()> {
261        use bollard::container::ListContainersOptions;
262        use bollard::network::ListNetworksOptions;
263
264        let now = std::time::SystemTime::now();
265        let now_secs = now.duration_since(std::time::UNIX_EPOCH).unwrap().as_secs() as i64;
266        // If max_age is zero, set cutoff to future to match everything
267        let cutoff = if max_age.is_zero() {
268            i64::MAX // Match everything
269        } else {
270            now_secs - max_age.as_secs() as i64
271        };
272
273        if max_age.is_zero() {
274            tracing::debug!("Cleaning up ALL freenet-nat resources");
275        } else {
276            tracing::debug!(
277                "Cleaning up freenet-nat resources older than {} seconds",
278                max_age.as_secs()
279            );
280        }
281
282        // Clean up stale containers
283        let mut filters = HashMap::new();
284        filters.insert("name".to_string(), vec!["freenet-nat-".to_string()]);
285
286        let options = ListContainersOptions {
287            all: true,
288            filters,
289            ..Default::default()
290        };
291
292        match docker.list_containers(Some(options)).await {
293            Ok(containers) => {
294                let mut removed_count = 0;
295                for container in containers {
296                    // Parse timestamp from container name
297                    if let Some(name) = container.names.and_then(|n| n.first().cloned()) {
298                        if let Some(created) = container.created {
299                            if created < cutoff {
300                                if let Some(id) = container.id {
301                                    tracing::info!(
302                                        "Removing stale container: {} (age: {}s)",
303                                        name,
304                                        now.duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()
305                                            as i64
306                                            - created
307                                    );
308                                    let _ = docker
309                                        .stop_container(&id, Some(StopContainerOptions { t: 2 }))
310                                        .await;
311                                    let _ = docker
312                                        .remove_container(
313                                            &id,
314                                            Some(RemoveContainerOptions {
315                                                force: true,
316                                                ..Default::default()
317                                            }),
318                                        )
319                                        .await;
320                                    removed_count += 1;
321                                }
322                            }
323                        }
324                    }
325                }
326                if removed_count > 0 {
327                    tracing::info!("Removed {} stale container(s)", removed_count);
328                }
329            }
330            Err(e) => {
331                tracing::warn!("Failed to list containers for cleanup: {}", e);
332            }
333        }
334
335        // Clean up stale networks
336        let mut filters = HashMap::new();
337        filters.insert("name".to_string(), vec!["freenet-nat-".to_string()]);
338
339        let options = ListNetworksOptions { filters };
340
341        match docker.list_networks(Some(options)).await {
342            Ok(networks) => {
343                let mut removed_count = 0;
344                for network in networks {
345                    if let Some(name) = &network.name {
346                        if name.starts_with("freenet-nat-") {
347                            // Parse timestamp from network name (format: freenet-nat-YYYYMMDD-HHMMSS-xxxxx)
348                            if let Some(timestamp_str) = name.strip_prefix("freenet-nat-") {
349                                // Extract YYYYMMDD-HHMMSS part
350                                let parts: Vec<&str> = timestamp_str.split('-').collect();
351                                if parts.len() >= 2 {
352                                    let date_time = format!("{}-{}", parts[0], parts[1]);
353                                    if let Ok(created_time) = chrono::NaiveDateTime::parse_from_str(
354                                        &date_time,
355                                        "%Y%m%d-%H%M%S",
356                                    ) {
357                                        let created_timestamp = created_time.and_utc().timestamp();
358                                        if created_timestamp < cutoff {
359                                            if let Some(id) = &network.id {
360                                                tracing::info!(
361                                                    "Removing stale network: {} (age: {}s)",
362                                                    name,
363                                                    now.duration_since(std::time::UNIX_EPOCH)
364                                                        .unwrap()
365                                                        .as_secs()
366                                                        as i64
367                                                        - created_timestamp
368                                                );
369                                                let _ = docker.remove_network(id).await;
370                                                removed_count += 1;
371                                            }
372                                        }
373                                    }
374                                }
375                            }
376                        }
377                    }
378                }
379                if removed_count > 0 {
380                    tracing::info!("Removed {} stale network(s)", removed_count);
381                }
382            }
383            Err(e) => {
384                tracing::warn!("Failed to list networks for cleanup: {}", e);
385            }
386        }
387
388        Ok(())
389    }
390
391    /// Create the public network where gateways live
392    ///
393    /// If the initially chosen subnet conflicts with an existing Docker network,
394    /// this will retry with a different random subnet up to MAX_SUBNET_RETRIES times.
395    pub async fn create_public_network(&mut self) -> Result<String> {
396        const MAX_SUBNET_RETRIES: usize = 10;
397
398        for attempt in 0..MAX_SUBNET_RETRIES {
399            let network_name = format!("{}-public", self.config.name_prefix);
400
401            let options = CreateNetworkOptions {
402                name: network_name.clone(),
403                driver: "bridge".to_string(),
404                ipam: Ipam {
405                    config: Some(vec![IpamConfig {
406                        subnet: Some(self.config.public_subnet.to_string()),
407                        ..Default::default()
408                    }]),
409                    ..Default::default()
410                },
411                ..Default::default()
412            };
413
414            match self.docker.create_network(options).await {
415                Ok(response) => {
416                    let network_id = response.id;
417                    self.networks.push(network_id.clone());
418                    self.public_network_id = Some(network_id.clone());
419                    tracing::info!(
420                        "Created public network: {} ({}) with subnet {}",
421                        network_name,
422                        network_id,
423                        self.config.public_subnet
424                    );
425                    return Ok(network_id);
426                }
427                Err(e) => {
428                    let error_msg = e.to_string();
429                    if error_msg.contains("Pool overlaps") {
430                        // Subnet conflict - pick a new random subnet and retry
431                        let old_subnet = self.config.public_subnet;
432                        let new_second_octet = rand::thread_rng().gen_range(16..=31);
433                        self.config.public_subnet =
434                            format!("172.{}.0.0/16", new_second_octet).parse().unwrap();
435                        tracing::warn!(
436                            "Subnet {} conflicts with existing network, retrying with {} (attempt {}/{})",
437                            old_subnet,
438                            self.config.public_subnet,
439                            attempt + 1,
440                            MAX_SUBNET_RETRIES
441                        );
442                        continue;
443                    }
444                    return Err(Error::Other(anyhow::anyhow!(
445                        "Failed to create public network: {}",
446                        e
447                    )));
448                }
449            }
450        }
451
452        Err(Error::Other(anyhow::anyhow!(
453            "Failed to create public network after {} attempts due to subnet conflicts. \
454             This may indicate stale Docker networks. Try running: \
455             docker network ls | grep freenet-nat | awk '{{print $1}}' | xargs -r docker network rm",
456            MAX_SUBNET_RETRIES
457        )))
458    }
459
460    /// Create a private network behind NAT for a peer
461    pub async fn create_nat_network(
462        &mut self,
463        peer_index: usize,
464    ) -> Result<(String, String, Ipv4Addr)> {
465        // Create private network using randomized base to avoid subnet conflicts
466        // between concurrent test runs. Each peer gets its own /24 subnet.
467        let network_name = format!("{}-nat-{}", self.config.name_prefix, peer_index);
468        let base = self.config.private_subnet_base.octets();
469        let subnet = Ipv4Network::new(
470            Ipv4Addr::new(base[0], base[1].wrapping_add(peer_index as u8), 0, 0),
471            24,
472        )
473        .map_err(|e| Error::Other(anyhow::anyhow!("Invalid subnet: {}", e)))?;
474
475        let options = CreateNetworkOptions {
476            name: network_name.clone(),
477            driver: "bridge".to_string(),
478            internal: true, // No direct external access
479            ipam: Ipam {
480                config: Some(vec![IpamConfig {
481                    subnet: Some(subnet.to_string()),
482                    ..Default::default()
483                }]),
484                ..Default::default()
485            },
486            ..Default::default()
487        };
488
489        let response =
490            self.docker.create_network(options).await.map_err(|e| {
491                Error::Other(anyhow::anyhow!("Failed to create NAT network: {}", e))
492            })?;
493
494        let network_id = response.id;
495        self.networks.push(network_id.clone());
496
497        // Create NAT router container
498        let router_name = format!("{}-router-{}", self.config.name_prefix, peer_index);
499        let public_network_id = self
500            .public_network_id
501            .as_ref()
502            .ok_or_else(|| Error::Other(anyhow::anyhow!("Public network not created yet")))?;
503
504        // NAT router IP addresses
505        // Each peer gets an IP in a different /24 subnet to ensure different ring locations
506        // E.g., peer 0 -> 172.X.0.100, peer 1 -> 172.X.1.100, peer 2 -> 172.X.2.100
507        // This way, Location::from_address (which masks last byte) gives each peer a different location
508        let router_public_ip = Ipv4Addr::new(
509            self.config.public_subnet.ip().octets()[0],
510            self.config.public_subnet.ip().octets()[1],
511            peer_index as u8, // Different /24 per peer for unique ring locations
512            100,              // Fixed host part within each /24
513        );
514        // Use .254 for router to avoid conflict with Docker's default gateway at .1
515        let router_private_ip =
516            Ipv4Addr::new(base[0], base[1].wrapping_add(peer_index as u8), 0, 254);
517
518        // Create router container with iptables NAT rules
519        // Create without network first, then connect to both networks before starting
520        // Build patterns for matching the public and private networks
521        let public_octets = self.config.public_subnet.ip().octets();
522        let public_pattern = format!("172\\.{}\\.", public_octets[1]);
523        let private_pattern = format!(" {}\\.", base[0]);
524        // Calculate peer's private IP (matches what create_peer will use)
525        let peer_private_ip = Ipv4Addr::new(base[0], base[1].wrapping_add(peer_index as u8), 0, 2);
526
527        // Build iptables rules based on NAT type
528        //
529        // NAT Types (from most permissive to most restrictive):
530        // 1. Full Cone: Any external host can send to mapped port (like port forwarding)
531        // 2. Address-Restricted Cone: Only hosts the peer has contacted can send back
532        // 3. Port-Restricted Cone: Only host:port pairs the peer has contacted can send back
533        // 4. Symmetric: Different mapping for each destination (breaks hole punching)
534        //
535        // Default: Port-Restricted Cone NAT - the most common residential NAT type
536        // This requires proper UDP hole-punching: peer must send packet to remote's public
537        // IP:port first, which creates a NAT mapping that allows return traffic.
538        //
539        // The key insight: Linux conntrack already provides port-restricted cone behavior
540        // by default with MASQUERADE - it allows return traffic from the exact IP:port
541        // that received outbound traffic. We just need to NOT add blanket DNAT rules.
542        let dnat_rules = if std::env::var("FREENET_TEST_FULL_CONE_NAT").is_ok() {
543            // Full Cone NAT: Add DNAT rules to forward all traffic on port 31337 to peer
544            // This simulates port forwarding / UPnP - unrealistic for testing hole punching
545            format!(
546                "iptables -t nat -A PREROUTING -i $PUBLIC_IF -p udp --dport 31337 -j DNAT --to-destination {}:31337 && \
547                 echo 'Full Cone NAT: DNAT rule added for port 31337 -> {}:31337' && ",
548                peer_private_ip, peer_private_ip
549            )
550        } else if std::env::var("FREENET_TEST_SYMMETRIC_NAT").is_ok() {
551            // Symmetric NAT: Use random source ports for each destination
552            // This breaks UDP hole punching entirely
553            format!(
554                "iptables -t nat -A POSTROUTING -o $PUBLIC_IF -p udp -j MASQUERADE --random && \
555                 echo 'Symmetric NAT: Random port mapping enabled (hole punching will fail)' && "
556            )
557        } else {
558            // Port-Restricted Cone NAT (default): Realistic residential NAT
559            // - Outbound traffic creates NAT mapping (conntrack entry)
560            // - Only return traffic from the same remote IP:port is allowed through
561            // - No pre-configured port forwarding
562            // - Requires proper hole-punching coordination via signaling
563            String::from(
564                "echo 'Port-Restricted Cone NAT: No DNAT rules - hole punching required' && "
565            )
566        };
567
568        let router_config = Config {
569            image: Some("alpine:latest".to_string()),
570            hostname: Some(router_name.clone()),
571            cmd: Some(vec![
572                "sh".to_string(),
573                "-c".to_string(),
574                // Set up NAT (IP forwarding enabled via sysctl in host_config)
575                // Find interfaces dynamically by IP address since Docker doesn't guarantee interface order
576                // PUBLIC_IF: interface with 172.X.x.x (public network, X varies)
577                // PRIVATE_IF: interface with 10.x.x.x (private network)
578                format!(
579                    "apk add --no-cache iptables iproute2 > /dev/null 2>&1 && \
580                     PUBLIC_IF=$(ip -o addr show | grep '{}' | awk '{{print $2}}') && \
581                     PRIVATE_IF=$(ip -o addr show | grep '{}' | awk '{{print $2}}') && \
582                     PUBLIC_IP=$(ip -o addr show dev $PUBLIC_IF | awk '/inet / {{split($4,a,\"/\"); print a[1]}}') && \
583                     echo \"Public interface: $PUBLIC_IF ($PUBLIC_IP), Private interface: $PRIVATE_IF\" && \
584                     {}iptables -t nat -A POSTROUTING -o $PUBLIC_IF -j MASQUERADE && \
585                     iptables -A FORWARD -i $PRIVATE_IF -o $PUBLIC_IF -j ACCEPT && \
586                     iptables -A FORWARD -i $PUBLIC_IF -o $PRIVATE_IF -j ACCEPT && \
587                     echo 'NAT router ready' && \
588                     tail -f /dev/null",
589                    public_pattern, private_pattern, dnat_rules
590                ),
591            ]),
592            host_config: Some(HostConfig {
593                cap_add: Some(vec!["NET_ADMIN".to_string()]),
594                sysctls: Some(HashMap::from([
595                    ("net.ipv4.ip_forward".to_string(), "1".to_string()),
596                ])),
597                ..Default::default()
598            }),
599            ..Default::default()
600        };
601
602        let router_id = self
603            .docker
604            .create_container(
605                Some(CreateContainerOptions {
606                    name: router_name.clone(),
607                    ..Default::default()
608                }),
609                router_config,
610            )
611            .await
612            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to create NAT router: {}", e)))?
613            .id;
614
615        self.containers.push(router_id.clone());
616
617        // Disconnect from default bridge network
618        let _ = self
619            .docker
620            .disconnect_network(
621                "bridge",
622                bollard::network::DisconnectNetworkOptions {
623                    container: router_id.clone(),
624                    force: true,
625                },
626            )
627            .await;
628
629        // Connect router to public network (becomes eth0 after starting)
630        self.docker
631            .connect_network(
632                public_network_id,
633                bollard::network::ConnectNetworkOptions {
634                    container: router_id.clone(),
635                    endpoint_config: bollard::secret::EndpointSettings {
636                        ipam_config: Some(bollard::secret::EndpointIpamConfig {
637                            ipv4_address: Some(router_public_ip.to_string()),
638                            ..Default::default()
639                        }),
640                        ..Default::default()
641                    },
642                },
643            )
644            .await
645            .map_err(|e| {
646                Error::Other(anyhow::anyhow!(
647                    "Failed to connect router to public network: {}",
648                    e
649                ))
650            })?;
651
652        // Connect router to private network (becomes eth1 after starting)
653        self.docker
654            .connect_network(
655                &network_id,
656                bollard::network::ConnectNetworkOptions {
657                    container: router_id.clone(),
658                    endpoint_config: bollard::secret::EndpointSettings {
659                        ipam_config: Some(bollard::secret::EndpointIpamConfig {
660                            ipv4_address: Some(router_private_ip.to_string()),
661                            ..Default::default()
662                        }),
663                        ..Default::default()
664                    },
665                },
666            )
667            .await
668            .map_err(|e| {
669                Error::Other(anyhow::anyhow!(
670                    "Failed to connect router to private network: {}",
671                    e
672                ))
673            })?;
674
675        // Start the router
676        self.docker
677            .start_container(&router_id, None::<StartContainerOptions<String>>)
678            .await
679            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to start NAT router: {}", e)))?;
680
681        // Wait for router to be ready
682        tokio::time::sleep(Duration::from_secs(2)).await;
683
684        tracing::info!(
685            "Created NAT network {} with router {} (public: {}, private: {})",
686            network_name,
687            router_name,
688            router_public_ip,
689            router_private_ip
690        );
691
692        Ok((network_id, router_id, router_public_ip))
693    }
694
695    /// Build the base Freenet peer Docker image
696    pub async fn ensure_base_image(&self) -> Result<String> {
697        let image_name = "freenet-test-peer:latest";
698
699        // Check if image already exists
700        if self.docker.inspect_image(image_name).await.is_ok() {
701            tracing::debug!("Base image {} already exists", image_name);
702            return Ok(image_name.to_string());
703        }
704
705        tracing::info!("Building base image {}...", image_name);
706
707        // Create a minimal Dockerfile - use Ubuntu 24.04 to match host glibc version
708        let dockerfile = r#"
709FROM ubuntu:24.04
710RUN apt-get update && \
711    apt-get install -y --no-install-recommends \
712        libssl3 \
713        ca-certificates \
714        iproute2 \
715        && rm -rf /var/lib/apt/lists/*
716RUN mkdir -p /data /config
717WORKDIR /app
718"#;
719
720        // Create tar archive with Dockerfile
721        let mut tar_builder = tar::Builder::new(Vec::new());
722        let mut header = tar::Header::new_gnu();
723        header.set_path("Dockerfile")?;
724        header.set_size(dockerfile.len() as u64);
725        header.set_mode(0o644);
726        header.set_cksum();
727        tar_builder.append(&header, dockerfile.as_bytes())?;
728        let tar_data = tar_builder.into_inner()?;
729
730        // Build image
731        let options = BuildImageOptions {
732            dockerfile: "Dockerfile",
733            t: image_name,
734            rm: true,
735            ..Default::default()
736        };
737
738        let mut build_stream = self
739            .docker
740            .build_image(options, None, Some(tar_data.into()));
741
742        while let Some(result) = build_stream.next().await {
743            match result {
744                Ok(info) => {
745                    if let Some(stream) = info.stream {
746                        tracing::debug!("Build: {}", stream.trim());
747                    }
748                    if let Some(error) = info.error {
749                        return Err(Error::Other(anyhow::anyhow!(
750                            "Image build error: {}",
751                            error
752                        )));
753                    }
754                }
755                Err(e) => {
756                    return Err(Error::Other(anyhow::anyhow!("Image build failed: {}", e)));
757                }
758            }
759        }
760
761        tracing::info!("Built base image {}", image_name);
762        Ok(image_name.to_string())
763    }
764
765    /// Copy binary into a container
766    pub async fn copy_binary_to_container(
767        &self,
768        container_id: &str,
769        binary_path: &Path,
770    ) -> Result<()> {
771        // Read binary
772        let binary_data = std::fs::read(binary_path)?;
773
774        // Create tar archive with the binary
775        let mut tar_builder = tar::Builder::new(Vec::new());
776        let mut header = tar::Header::new_gnu();
777        header.set_path("freenet")?;
778        header.set_size(binary_data.len() as u64);
779        header.set_mode(0o755);
780        header.set_cksum();
781        tar_builder.append(&header, binary_data.as_slice())?;
782        let tar_data = tar_builder.into_inner()?;
783
784        // Upload to container
785        self.docker
786            .upload_to_container(
787                container_id,
788                Some(UploadToContainerOptions {
789                    path: "/app",
790                    ..Default::default()
791                }),
792                tar_data.into(),
793            )
794            .await
795            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to copy binary: {}", e)))?;
796
797        Ok(())
798    }
799
800    /// Create a gateway container (on public network, no NAT)
801    pub async fn create_gateway(
802        &mut self,
803        index: usize,
804        binary_path: &Path,
805        keypair_path: &Path,
806        public_key_path: &Path,
807        ws_port: u16,
808        network_port: u16,
809        run_root: &Path,
810    ) -> Result<(DockerPeerInfo, DockerProcess)> {
811        let container_name = format!("{}-gw-{}", self.config.name_prefix, index);
812        let image = self.ensure_base_image().await?;
813
814        let public_network_id = self
815            .public_network_id
816            .as_ref()
817            .ok_or_else(|| Error::Other(anyhow::anyhow!("Public network not created yet")))?;
818
819        // Gateway IP on public network
820        let gateway_ip = Ipv4Addr::new(
821            self.config.public_subnet.ip().octets()[0],
822            self.config.public_subnet.ip().octets()[1],
823            0,
824            10 + index as u8,
825        );
826
827        // Allocate host port for WS API
828        let host_ws_port = crate::peer::get_free_port()?;
829
830        // Create container
831        let config = Config {
832            image: Some(image),
833            hostname: Some(container_name.clone()),
834            exposed_ports: Some(HashMap::from([(
835                format!("{}/tcp", ws_port),
836                HashMap::new(),
837            )])),
838            host_config: Some(HostConfig {
839                port_bindings: Some(HashMap::from([(
840                    format!("{}/tcp", ws_port),
841                    Some(vec![PortBinding {
842                        host_ip: Some("0.0.0.0".to_string()),
843                        host_port: Some(host_ws_port.to_string()),
844                    }]),
845                )])),
846                cap_add: Some(vec!["NET_ADMIN".to_string()]),
847                ..Default::default()
848            }),
849            env: Some(vec![
850                "RUST_LOG=info".to_string(),
851                "RUST_BACKTRACE=1".to_string(),
852            ]),
853            cmd: Some(vec![
854                "/app/freenet".to_string(),
855                "network".to_string(),
856                "--data-dir".to_string(),
857                "/data".to_string(),
858                "--config-dir".to_string(),
859                "/config".to_string(),
860                "--ws-api-address".to_string(),
861                "0.0.0.0".to_string(),
862                "--ws-api-port".to_string(),
863                ws_port.to_string(),
864                "--network-address".to_string(),
865                "0.0.0.0".to_string(),
866                "--network-port".to_string(),
867                network_port.to_string(),
868                "--public-network-address".to_string(),
869                gateway_ip.to_string(),
870                "--public-network-port".to_string(),
871                network_port.to_string(),
872                "--is-gateway".to_string(),
873                "--skip-load-from-network".to_string(),
874                "--transport-keypair".to_string(),
875                "/config/keypair.pem".to_string(),
876            ]),
877            ..Default::default()
878        };
879
880        let container_id = self
881            .docker
882            .create_container(
883                Some(CreateContainerOptions {
884                    name: container_name.clone(),
885                    ..Default::default()
886                }),
887                config,
888            )
889            .await
890            .map_err(|e| {
891                Error::Other(anyhow::anyhow!("Failed to create gateway container: {}", e))
892            })?
893            .id;
894
895        self.containers.push(container_id.clone());
896
897        // Connect to public network with specific IP
898        self.docker
899            .connect_network(
900                public_network_id,
901                bollard::network::ConnectNetworkOptions {
902                    container: container_id.clone(),
903                    endpoint_config: bollard::secret::EndpointSettings {
904                        ipam_config: Some(bollard::secret::EndpointIpamConfig {
905                            ipv4_address: Some(gateway_ip.to_string()),
906                            ..Default::default()
907                        }),
908                        ..Default::default()
909                    },
910                },
911            )
912            .await
913            .map_err(|e| {
914                Error::Other(anyhow::anyhow!(
915                    "Failed to connect gateway to network: {}",
916                    e
917                ))
918            })?;
919
920        // Copy binary and keys into container
921        self.copy_binary_to_container(&container_id, binary_path)
922            .await?;
923        self.copy_file_to_container(&container_id, keypair_path, "/config/keypair.pem")
924            .await?;
925        self.copy_file_to_container(&container_id, public_key_path, "/config/public_key.pem")
926            .await?;
927
928        // Start container
929        self.docker
930            .start_container(&container_id, None::<StartContainerOptions<String>>)
931            .await
932            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to start gateway: {}", e)))?;
933
934        let info = DockerPeerInfo {
935            container_id: container_id.clone(),
936            container_name: container_name.clone(),
937            private_ip: gateway_ip, // Gateways don't have private IP
938            public_ip: gateway_ip,
939            host_ws_port,
940            network_port,
941            is_gateway: true,
942            nat_router_id: None,
943        };
944
945        self.peer_containers.insert(index, info.clone());
946
947        let local_log_cache = run_root.join(format!("gw{}", index)).join("peer.log");
948
949        tracing::info!(
950            "Created gateway {} at {} (ws: localhost:{})",
951            container_name,
952            gateway_ip,
953            host_ws_port
954        );
955
956        Ok((
957            info,
958            DockerProcess {
959                docker: self.docker.clone(),
960                container_id,
961                container_name,
962                local_log_cache,
963            },
964        ))
965    }
966
967    /// Create a peer container behind NAT
968    pub async fn create_peer(
969        &mut self,
970        index: usize,
971        binary_path: &Path,
972        keypair_path: &Path,
973        public_key_path: &Path,
974        gateways_toml_path: &Path,
975        gateway_public_key_path: Option<&Path>,
976        ws_port: u16,
977        network_port: u16,
978        run_root: &Path,
979    ) -> Result<(DockerPeerInfo, DockerProcess)> {
980        let container_name = format!("{}-peer-{}", self.config.name_prefix, index);
981        let image = self.ensure_base_image().await?;
982
983        // Create NAT network for this peer
984        let (nat_network_id, router_id, router_public_ip) = self.create_nat_network(index).await?;
985
986        // Peer's private IP (behind NAT) - use the randomized base from config
987        let base = self.config.private_subnet_base.octets();
988        let private_ip = Ipv4Addr::new(base[0], base[1].wrapping_add(index as u8), 0, 2);
989
990        // Allocate host port for WS API
991        let host_ws_port = crate::peer::get_free_port()?;
992
993        // Create container
994        let config = Config {
995            image: Some(image),
996            hostname: Some(container_name.clone()),
997            exposed_ports: Some(HashMap::from([(
998                format!("{}/tcp", ws_port),
999                HashMap::new(),
1000            )])),
1001            host_config: Some(HostConfig {
1002                port_bindings: Some(HashMap::from([(
1003                    format!("{}/tcp", ws_port),
1004                    Some(vec![PortBinding {
1005                        host_ip: Some("0.0.0.0".to_string()),
1006                        host_port: Some(host_ws_port.to_string()),
1007                    }]),
1008                )])),
1009                cap_add: Some(vec!["NET_ADMIN".to_string()]),
1010                ..Default::default()
1011            }),
1012            env: Some(vec![
1013                "RUST_LOG=info".to_string(),
1014                "RUST_BACKTRACE=1".to_string(),
1015            ]),
1016            cmd: Some(vec![
1017                "/app/freenet".to_string(),
1018                "network".to_string(),
1019                "--data-dir".to_string(),
1020                "/data".to_string(),
1021                "--config-dir".to_string(),
1022                "/config".to_string(),
1023                "--ws-api-address".to_string(),
1024                "0.0.0.0".to_string(),
1025                "--ws-api-port".to_string(),
1026                ws_port.to_string(),
1027                "--network-address".to_string(),
1028                "0.0.0.0".to_string(),
1029                "--network-port".to_string(),
1030                network_port.to_string(),
1031                // Don't set public address - let Freenet discover it via gateway
1032                "--skip-load-from-network".to_string(),
1033                "--transport-keypair".to_string(),
1034                "/config/keypair.pem".to_string(),
1035            ]),
1036            ..Default::default()
1037        };
1038
1039        let container_id = self
1040            .docker
1041            .create_container(
1042                Some(CreateContainerOptions {
1043                    name: container_name.clone(),
1044                    ..Default::default()
1045                }),
1046                config,
1047            )
1048            .await
1049            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to create peer container: {}", e)))?
1050            .id;
1051
1052        self.containers.push(container_id.clone());
1053
1054        // Keep bridge network connected for Docker port forwarding to work (WebSocket access from host)
1055        // Connect to NAT private network for Freenet traffic
1056        self.docker
1057            .connect_network(
1058                &nat_network_id,
1059                bollard::network::ConnectNetworkOptions {
1060                    container: container_id.clone(),
1061                    endpoint_config: bollard::secret::EndpointSettings {
1062                        ipam_config: Some(bollard::secret::EndpointIpamConfig {
1063                            ipv4_address: Some(private_ip.to_string()),
1064                            ..Default::default()
1065                        }),
1066                        gateway: Some(
1067                            Ipv4Addr::new(base[0], base[1].wrapping_add(index as u8), 0, 1)
1068                                .to_string(),
1069                        ),
1070                        ..Default::default()
1071                    },
1072                },
1073            )
1074            .await
1075            .map_err(|e| {
1076                Error::Other(anyhow::anyhow!(
1077                    "Failed to connect peer to NAT network: {}",
1078                    e
1079                ))
1080            })?;
1081
1082        // Copy binary and keys into container
1083        self.copy_binary_to_container(&container_id, binary_path)
1084            .await?;
1085        self.copy_file_to_container(&container_id, keypair_path, "/config/keypair.pem")
1086            .await?;
1087        self.copy_file_to_container(&container_id, public_key_path, "/config/public_key.pem")
1088            .await?;
1089        self.copy_file_to_container(&container_id, gateways_toml_path, "/config/gateways.toml")
1090            .await?;
1091
1092        // Copy gateway public key if provided
1093        if let Some(gw_pubkey_path) = gateway_public_key_path {
1094            self.copy_file_to_container(&container_id, gw_pubkey_path, "/config/gw_public_key.pem")
1095                .await?;
1096        }
1097
1098        // Start container
1099        self.docker
1100            .start_container(&container_id, None::<StartContainerOptions<String>>)
1101            .await
1102            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to start peer: {}", e)))?;
1103
1104        // Configure routing: traffic to public network goes through NAT router
1105        // Keep default route via bridge for Docker port forwarding (WebSocket access from host)
1106        let router_gateway = Ipv4Addr::new(base[0], base[1].wrapping_add(index as u8), 0, 254);
1107        let public_subnet = self.config.public_subnet;
1108        self.exec_in_container(
1109            &container_id,
1110            &[
1111                "sh",
1112                "-c",
1113                &format!("ip route add {} via {}", public_subnet, router_gateway),
1114            ],
1115        )
1116        .await?;
1117
1118        let info = DockerPeerInfo {
1119            container_id: container_id.clone(),
1120            container_name: container_name.clone(),
1121            private_ip,
1122            public_ip: router_public_ip,
1123            host_ws_port,
1124            network_port,
1125            is_gateway: false,
1126            nat_router_id: Some(router_id),
1127        };
1128
1129        self.peer_containers.insert(index, info.clone());
1130
1131        let local_log_cache = run_root.join(format!("peer{}", index)).join("peer.log");
1132
1133        tracing::info!(
1134            "Created peer {} at {} behind NAT {} (ws: localhost:{})",
1135            container_name,
1136            private_ip,
1137            router_public_ip,
1138            host_ws_port
1139        );
1140
1141        Ok((
1142            info,
1143            DockerProcess {
1144                docker: self.docker.clone(),
1145                container_id,
1146                container_name,
1147                local_log_cache,
1148            },
1149        ))
1150    }
1151
1152    /// Copy a file into a container (public version)
1153    pub async fn copy_file_to_container_pub(
1154        &self,
1155        container_id: &str,
1156        local_path: &Path,
1157        container_path: &str,
1158    ) -> Result<()> {
1159        self.copy_file_to_container(container_id, local_path, container_path)
1160            .await
1161    }
1162
1163    /// Copy a file into a container
1164    async fn copy_file_to_container(
1165        &self,
1166        container_id: &str,
1167        local_path: &Path,
1168        container_path: &str,
1169    ) -> Result<()> {
1170        let file_data = std::fs::read(local_path)?;
1171        let file_name = Path::new(container_path)
1172            .file_name()
1173            .ok_or_else(|| Error::Other(anyhow::anyhow!("Invalid container path")))?
1174            .to_str()
1175            .ok_or_else(|| Error::Other(anyhow::anyhow!("Invalid file name")))?;
1176
1177        let dir_path = Path::new(container_path)
1178            .parent()
1179            .ok_or_else(|| Error::Other(anyhow::anyhow!("Invalid container path")))?
1180            .to_str()
1181            .ok_or_else(|| Error::Other(anyhow::anyhow!("Invalid directory path")))?;
1182
1183        // Create tar archive
1184        let mut tar_builder = tar::Builder::new(Vec::new());
1185        let mut header = tar::Header::new_gnu();
1186        header.set_path(file_name)?;
1187        header.set_size(file_data.len() as u64);
1188        header.set_mode(0o644);
1189        header.set_cksum();
1190        tar_builder.append(&header, file_data.as_slice())?;
1191        let tar_data = tar_builder.into_inner()?;
1192
1193        self.docker
1194            .upload_to_container(
1195                container_id,
1196                Some(UploadToContainerOptions {
1197                    path: dir_path,
1198                    ..Default::default()
1199                }),
1200                tar_data.into(),
1201            )
1202            .await
1203            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to copy file: {}", e)))?;
1204
1205        Ok(())
1206    }
1207
1208    /// Execute a command in a container
1209    async fn exec_in_container(&self, container_id: &str, cmd: &[&str]) -> Result<String> {
1210        let exec = self
1211            .docker
1212            .create_exec(
1213                container_id,
1214                CreateExecOptions {
1215                    cmd: Some(cmd.iter().map(|s| s.to_string()).collect()),
1216                    attach_stdout: Some(true),
1217                    attach_stderr: Some(true),
1218                    ..Default::default()
1219                },
1220            )
1221            .await
1222            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to create exec: {}", e)))?;
1223
1224        let output = self
1225            .docker
1226            .start_exec(&exec.id, None)
1227            .await
1228            .map_err(|e| Error::Other(anyhow::anyhow!("Failed to start exec: {}", e)))?;
1229
1230        let mut result = String::new();
1231        if let StartExecResults::Attached { mut output, .. } = output {
1232            while let Some(Ok(msg)) = output.next().await {
1233                match msg {
1234                    LogOutput::StdOut { message } | LogOutput::StdErr { message } => {
1235                        result.push_str(&String::from_utf8_lossy(&message));
1236                    }
1237                    _ => {}
1238                }
1239            }
1240        }
1241
1242        Ok(result)
1243    }
1244
1245    /// Clean up all Docker resources created by this backend
1246    pub async fn cleanup(&mut self) -> Result<()> {
1247        tracing::info!("Cleaning up Docker NAT resources...");
1248
1249        // Stop and remove containers
1250        for container_id in self.containers.drain(..) {
1251            let _ = self
1252                .docker
1253                .stop_container(&container_id, Some(StopContainerOptions { t: 2 }))
1254                .await;
1255            let _ = self
1256                .docker
1257                .remove_container(
1258                    &container_id,
1259                    Some(RemoveContainerOptions {
1260                        force: true,
1261                        ..Default::default()
1262                    }),
1263                )
1264                .await;
1265        }
1266
1267        // Remove networks
1268        for network_id in self.networks.drain(..) {
1269            let _ = self.docker.remove_network(&network_id).await;
1270        }
1271
1272        self.peer_containers.clear();
1273        self.public_network_id = None;
1274
1275        Ok(())
1276    }
1277
1278    /// Get peer info by index
1279    pub fn get_peer_info(&self, index: usize) -> Option<&DockerPeerInfo> {
1280        self.peer_containers.get(&index)
1281    }
1282
1283    /// Dump iptables NAT rules and packet counters from all NAT routers
1284    ///
1285    /// Returns a map of peer_index -> iptables output showing:
1286    /// - NAT table rules with packet/byte counters
1287    /// - FORWARD chain counters
1288    pub async fn dump_iptables_counters(&self) -> Result<std::collections::HashMap<usize, String>> {
1289        let mut results = std::collections::HashMap::new();
1290
1291        for (&peer_index, peer_info) in &self.peer_containers {
1292            if let Some(router_id) = &peer_info.nat_router_id {
1293                let mut output = String::new();
1294
1295                // Get NAT table with counters
1296                output.push_str("=== NAT table ===\n");
1297                match self.exec_in_container(router_id, &["iptables", "-t", "nat", "-nvL"]).await {
1298                    Ok(s) => output.push_str(&s),
1299                    Err(e) => output.push_str(&format!("Error: {}\n", e)),
1300                }
1301
1302                // Get FORWARD chain with counters
1303                output.push_str("\n=== FORWARD chain ===\n");
1304                match self.exec_in_container(router_id, &["iptables", "-nvL", "FORWARD"]).await {
1305                    Ok(s) => output.push_str(&s),
1306                    Err(e) => output.push_str(&format!("Error: {}\n", e)),
1307                }
1308
1309                results.insert(peer_index, output);
1310            }
1311        }
1312
1313        Ok(results)
1314    }
1315
1316    /// Dump conntrack table from all NAT routers
1317    ///
1318    /// Shows active NAT connection tracking entries for UDP traffic.
1319    /// Note: Installs conntrack-tools if not present (adds ~2s per router first time).
1320    pub async fn dump_conntrack_table(&self) -> Result<std::collections::HashMap<usize, String>> {
1321        let mut results = std::collections::HashMap::new();
1322
1323        for (&peer_index, peer_info) in &self.peer_containers {
1324            if let Some(router_id) = &peer_info.nat_router_id {
1325                // Install conntrack-tools if needed
1326                let _ = self.exec_in_container(
1327                    router_id,
1328                    &["apk", "add", "--no-cache", "conntrack-tools"]
1329                ).await;
1330
1331                // Get conntrack entries for UDP
1332                match self.exec_in_container(router_id, &["conntrack", "-L", "-p", "udp"]).await {
1333                    Ok(s) if s.trim().is_empty() => {
1334                        results.insert(peer_index, "(no UDP conntrack entries)".to_string());
1335                    }
1336                    Ok(s) => {
1337                        results.insert(peer_index, s);
1338                    }
1339                    Err(e) => {
1340                        results.insert(peer_index, format!("Error: {}", e));
1341                    }
1342                }
1343            }
1344        }
1345
1346        Ok(results)
1347    }
1348}
1349
1350impl Drop for DockerNatBackend {
1351    fn drop(&mut self) {
1352        if self.config.cleanup_on_drop {
1353            tracing::info!("Cleaning up Docker NAT backend resources...");
1354
1355            // Use blocking approach to ensure cleanup completes before drop finishes
1356            let docker = self.docker.clone();
1357            let containers = std::mem::take(&mut self.containers);
1358            let networks = std::mem::take(&mut self.networks);
1359
1360            // Block until cleanup completes - important for ensuring resources are freed
1361            // even on panic or ctrl-c.
1362            // If we're already in a runtime, use block_in_place; otherwise create a new runtime.
1363            let cleanup = async {
1364                // Stop and remove containers in parallel for faster cleanup
1365                let container_futures = containers.into_iter().map(|container_id| {
1366                    let docker = docker.clone();
1367                    async move {
1368                        if let Err(e) = docker
1369                            .stop_container(&container_id, Some(StopContainerOptions { t: 2 }))
1370                            .await
1371                        {
1372                            tracing::debug!("Failed to stop container {}: {}", container_id, e);
1373                        }
1374                        if let Err(e) = docker
1375                            .remove_container(
1376                                &container_id,
1377                                Some(RemoveContainerOptions {
1378                                    force: true,
1379                                    ..Default::default()
1380                                }),
1381                            )
1382                            .await
1383                        {
1384                            tracing::debug!("Failed to remove container {}: {}", container_id, e);
1385                        }
1386                    }
1387                });
1388
1389                // Wait for all containers to be cleaned up
1390                futures::future::join_all(container_futures).await;
1391
1392                // Then remove networks (must happen after containers are disconnected)
1393                for network_id in networks {
1394                    if let Err(e) = docker.remove_network(&network_id).await {
1395                        tracing::debug!("Failed to remove network {}: {}", network_id, e);
1396                    }
1397                }
1398
1399                tracing::info!("Docker NAT backend cleanup complete");
1400            };
1401
1402            // Try to use existing runtime first (if we're in async context)
1403            // Otherwise fall back to creating a new runtime
1404            if let Ok(handle) = tokio::runtime::Handle::try_current() {
1405                tokio::task::block_in_place(|| {
1406                    handle.block_on(cleanup);
1407                });
1408            } else if let Ok(rt) = tokio::runtime::Runtime::new() {
1409                rt.block_on(cleanup);
1410            } else {
1411                tracing::error!("Failed to create runtime for cleanup");
1412            }
1413        }
1414    }
1415}