Skip to main content

zlayer_overlay/
bootstrap.rs

1//! Overlay network bootstrap functionality
2//!
3//! Provides initialization and joining capabilities for overlay networks,
4//! including keypair generation, interface creation, and peer management.
5
6use crate::allocator::IpAllocator;
7use crate::config::PeerInfo;
8use crate::dns::{peer_hostname, DnsConfig, DnsHandle, DnsServer, DEFAULT_DNS_PORT};
9use crate::error::{OverlayError, Result};
10use crate::transport::OverlayTransport;
11use serde::{Deserialize, Serialize};
12use std::net::{IpAddr, Ipv4Addr, SocketAddr};
13use std::path::{Path, PathBuf};
14use std::time::Duration;
15use tracing::{debug, info, warn};
16
17/// Default overlay interface name for `ZLayer`
18pub const DEFAULT_INTERFACE_NAME: &str = "zl-overlay0";
19
20/// Default overlay listen port
21pub const DEFAULT_WG_PORT: u16 = 51820;
22
23/// Default overlay network CIDR
24pub const DEFAULT_OVERLAY_CIDR: &str = "10.200.0.0/16";
25
26/// Default persistent keepalive interval (seconds)
27pub const DEFAULT_KEEPALIVE_SECS: u16 = 25;
28
29/// Overlay network bootstrap configuration
30///
31/// Contains all configuration needed to initialize and manage
32/// an overlay network on a node.
33#[derive(Debug, Clone, Serialize, Deserialize)]
34pub struct BootstrapConfig {
35    /// Network CIDR (e.g., "10.200.0.0/16")
36    pub cidr: String,
37
38    /// This node's overlay IP address
39    pub node_ip: Ipv4Addr,
40
41    /// Overlay interface name
42    pub interface: String,
43
44    /// Overlay listen port
45    pub port: u16,
46
47    /// This node's overlay private key
48    pub private_key: String,
49
50    /// This node's overlay public key
51    pub public_key: String,
52
53    /// Whether this node is the cluster leader
54    pub is_leader: bool,
55
56    /// Creation timestamp (Unix epoch seconds)
57    pub created_at: u64,
58}
59
60impl BootstrapConfig {
61    /// Get the overlay IP with /32 prefix for allowed IPs
62    #[must_use]
63    pub fn allowed_ip(&self) -> String {
64        format!("{}/32", self.node_ip)
65    }
66}
67
68/// Peer configuration for overlay network
69#[derive(Debug, Clone, Serialize, Deserialize)]
70pub struct PeerConfig {
71    /// Peer's node ID (for identification)
72    pub node_id: String,
73
74    /// Peer's overlay public key
75    pub public_key: String,
76
77    /// Peer's public endpoint (host:port)
78    pub endpoint: String,
79
80    /// Peer's overlay IP address
81    pub overlay_ip: Ipv4Addr,
82
83    /// Optional persistent keepalive interval in seconds
84    #[serde(default)]
85    pub keepalive: Option<u16>,
86
87    /// Optional custom DNS hostname for this peer (without zone suffix)
88    /// If provided, the peer will be registered with this name in addition
89    /// to the auto-generated IP-based hostname.
90    #[serde(default)]
91    pub hostname: Option<String>,
92}
93
94impl PeerConfig {
95    /// Create a new peer configuration
96    #[must_use]
97    pub fn new(
98        node_id: String,
99        public_key: String,
100        endpoint: String,
101        overlay_ip: Ipv4Addr,
102    ) -> Self {
103        Self {
104            node_id,
105            public_key,
106            endpoint,
107            overlay_ip,
108            keepalive: Some(DEFAULT_KEEPALIVE_SECS),
109            hostname: None,
110        }
111    }
112
113    /// Set a custom DNS hostname for this peer
114    #[must_use]
115    pub fn with_hostname(mut self, hostname: impl Into<String>) -> Self {
116        self.hostname = Some(hostname.into());
117        self
118    }
119
120    /// Convert to `PeerInfo` for overlay transport configuration
121    ///
122    /// # Errors
123    ///
124    /// Returns an error if the endpoint address cannot be parsed.
125    pub fn to_peer_info(&self) -> std::result::Result<PeerInfo, Box<dyn std::error::Error>> {
126        let endpoint: SocketAddr = self.endpoint.parse()?;
127        let keepalive =
128            Duration::from_secs(u64::from(self.keepalive.unwrap_or(DEFAULT_KEEPALIVE_SECS)));
129
130        Ok(PeerInfo::new(
131            self.public_key.clone(),
132            endpoint,
133            &format!("{}/32", self.overlay_ip),
134            keepalive,
135        ))
136    }
137}
138
139/// Persistent state for the overlay bootstrap
140#[derive(Debug, Clone, Serialize, Deserialize)]
141pub struct BootstrapState {
142    /// Bootstrap configuration
143    pub config: BootstrapConfig,
144
145    /// List of configured peers
146    pub peers: Vec<PeerConfig>,
147
148    /// IP allocator state (only for leader)
149    #[serde(skip_serializing_if = "Option::is_none")]
150    pub allocator_state: Option<crate::allocator::IpAllocatorState>,
151}
152
153/// Bootstrap manager for overlay network
154///
155/// Handles overlay network initialization, peer management,
156/// and overlay transport interface configuration.
157pub struct OverlayBootstrap {
158    /// Bootstrap configuration
159    config: BootstrapConfig,
160
161    /// Configured peers
162    peers: Vec<PeerConfig>,
163
164    /// Data directory for persistent state
165    data_dir: PathBuf,
166
167    /// IP allocator (only for leader nodes)
168    allocator: Option<IpAllocator>,
169
170    /// DNS configuration (opt-in)
171    dns_config: Option<DnsConfig>,
172
173    /// DNS handle for managing records (available after `start()` if DNS enabled)
174    dns_handle: Option<DnsHandle>,
175
176    /// Overlay transport (boringtun device handle).
177    ///
178    /// Must be kept alive for the overlay network lifetime; dropping the
179    /// transport destroys the TUN device.
180    transport: Option<OverlayTransport>,
181}
182
183impl OverlayBootstrap {
184    /// Initialize as cluster leader (first node in the overlay)
185    ///
186    /// This generates a new overlay keypair, allocates the first IP
187    /// in the CIDR range, and prepares the node as the overlay leader.
188    ///
189    /// # Arguments
190    /// * `cidr` - Overlay network CIDR (e.g., "10.200.0.0/16")
191    /// * `port` - Overlay listen port
192    /// * `data_dir` - Directory for persistent state
193    ///
194    /// # Example
195    /// ```ignore
196    /// let bootstrap = OverlayBootstrap::init_leader(
197    ///     "10.200.0.0/16",
198    ///     51820,
199    ///     Path::new("/var/lib/zlayer"),
200    /// ).await?;
201    /// ```
202    ///
203    /// # Errors
204    ///
205    /// Returns an error if already initialized, key generation fails, or state cannot be saved.
206    pub async fn init_leader(cidr: &str, port: u16, data_dir: &Path) -> Result<Self> {
207        // Check if already initialized
208        let config_path = data_dir.join("overlay_bootstrap.json");
209        if config_path.exists() {
210            return Err(OverlayError::AlreadyInitialized(
211                config_path.display().to_string(),
212            ));
213        }
214
215        // Ensure data directory exists
216        tokio::fs::create_dir_all(data_dir).await?;
217
218        // Generate overlay keypair
219        info!("Generating overlay keypair for leader");
220        let (private_key, public_key) = OverlayTransport::generate_keys()
221            .await
222            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
223
224        // Initialize IP allocator and allocate first IP for leader
225        let mut allocator = IpAllocator::new(cidr)?;
226        let node_ip = allocator.allocate_first()?;
227
228        info!(node_ip = %node_ip, cidr = cidr, "Allocated leader IP");
229
230        // Create config
231        let config = BootstrapConfig {
232            cidr: cidr.to_string(),
233            node_ip,
234            interface: DEFAULT_INTERFACE_NAME.to_string(),
235            port,
236            private_key,
237            public_key,
238            is_leader: true,
239            created_at: current_timestamp(),
240        };
241
242        let bootstrap = Self {
243            config,
244            peers: Vec::new(),
245            data_dir: data_dir.to_path_buf(),
246            allocator: Some(allocator),
247            dns_config: None,
248            dns_handle: None,
249            transport: None,
250        };
251
252        // Persist state
253        bootstrap.save().await?;
254
255        Ok(bootstrap)
256    }
257
258    /// Join an existing overlay network
259    ///
260    /// Generates a new overlay keypair and configures this node
261    /// to connect to an existing overlay network.
262    ///
263    /// # Arguments
264    /// * `leader_cidr` - Leader's overlay network CIDR
265    /// * `leader_endpoint` - Leader's public endpoint (host:port)
266    /// * `leader_public_key` - Leader's overlay public key
267    /// * `leader_overlay_ip` - Leader's overlay IP address
268    /// * `allocated_ip` - IP address allocated for this node by the leader
269    /// * `port` - Overlay listen port for this node
270    /// * `data_dir` - Directory for persistent state
271    ///
272    /// # Errors
273    ///
274    /// Returns an error if already initialized, key generation fails, or state cannot be saved.
275    pub async fn join(
276        leader_cidr: &str,
277        leader_endpoint: &str,
278        leader_public_key: &str,
279        leader_overlay_ip: Ipv4Addr,
280        allocated_ip: Ipv4Addr,
281        port: u16,
282        data_dir: &Path,
283    ) -> Result<Self> {
284        // Check if already initialized
285        let config_path = data_dir.join("overlay_bootstrap.json");
286        if config_path.exists() {
287            return Err(OverlayError::AlreadyInitialized(
288                config_path.display().to_string(),
289            ));
290        }
291
292        // Ensure data directory exists
293        tokio::fs::create_dir_all(data_dir).await?;
294
295        // Generate overlay keypair for this node
296        info!("Generating overlay keypair for joining node");
297        let (private_key, public_key) = OverlayTransport::generate_keys()
298            .await
299            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
300
301        // Create config
302        let config = BootstrapConfig {
303            cidr: leader_cidr.to_string(),
304            node_ip: allocated_ip,
305            interface: DEFAULT_INTERFACE_NAME.to_string(),
306            port,
307            private_key,
308            public_key,
309            is_leader: false,
310            created_at: current_timestamp(),
311        };
312
313        // Add leader as the first peer
314        let leader_peer = PeerConfig {
315            node_id: "leader".to_string(),
316            public_key: leader_public_key.to_string(),
317            endpoint: leader_endpoint.to_string(),
318            overlay_ip: leader_overlay_ip,
319            keepalive: Some(DEFAULT_KEEPALIVE_SECS),
320            hostname: None, // Leader gets its own DNS alias "leader.zone"
321        };
322
323        info!(
324            leader_endpoint = leader_endpoint,
325            overlay_ip = %allocated_ip,
326            "Configured leader as peer"
327        );
328
329        let bootstrap = Self {
330            config,
331            peers: vec![leader_peer],
332            data_dir: data_dir.to_path_buf(),
333            allocator: None, // Workers don't manage IP allocation
334            dns_config: None,
335            dns_handle: None,
336            transport: None,
337        };
338
339        // Persist state
340        bootstrap.save().await?;
341
342        Ok(bootstrap)
343    }
344
345    /// Load existing bootstrap state from disk
346    ///
347    /// # Errors
348    ///
349    /// Returns an error if the state file is missing, unreadable, or invalid.
350    pub async fn load(data_dir: &Path) -> Result<Self> {
351        let config_path = data_dir.join("overlay_bootstrap.json");
352
353        if !config_path.exists() {
354            return Err(OverlayError::NotInitialized);
355        }
356
357        let contents = tokio::fs::read_to_string(&config_path).await?;
358        let state: BootstrapState = serde_json::from_str(&contents)?;
359
360        let allocator = if let Some(alloc_state) = state.allocator_state {
361            Some(IpAllocator::from_state(alloc_state)?)
362        } else {
363            None
364        };
365
366        Ok(Self {
367            config: state.config,
368            peers: state.peers,
369            data_dir: data_dir.to_path_buf(),
370            allocator,
371            dns_config: None, // DNS config must be re-enabled after load
372            dns_handle: None,
373            transport: None,
374        })
375    }
376
377    /// Save bootstrap state to disk
378    ///
379    /// # Errors
380    ///
381    /// Returns an error if serialization or file writing fails.
382    pub async fn save(&self) -> Result<()> {
383        let config_path = self.data_dir.join("overlay_bootstrap.json");
384
385        let state = BootstrapState {
386            config: self.config.clone(),
387            peers: self.peers.clone(),
388            allocator_state: self
389                .allocator
390                .as_ref()
391                .map(super::allocator::IpAllocator::to_state),
392        };
393
394        let contents = serde_json::to_string_pretty(&state)?;
395        tokio::fs::write(&config_path, contents).await?;
396
397        debug!(path = %config_path.display(), "Saved bootstrap state");
398        Ok(())
399    }
400
401    /// Enable DNS service discovery for the overlay network
402    ///
403    /// When DNS is enabled, peers are automatically registered with both:
404    /// - An IP-based hostname: `node-X-Y.zone` (e.g., `node-0-5.overlay.local`)
405    /// - A custom hostname if provided in `PeerConfig`
406    ///
407    /// The leader node additionally gets a `leader.zone` alias.
408    ///
409    /// # Arguments
410    /// * `zone` - DNS zone (e.g., "overlay.local.")
411    /// * `port` - DNS server port (default: 15353 to avoid conflicts)
412    ///
413    /// # Example
414    /// ```ignore
415    /// let bootstrap = OverlayBootstrap::init_leader(cidr, port, data_dir)
416    ///     .await?
417    ///     .with_dns("overlay.local.", 15353)?;
418    /// bootstrap.start().await?;
419    /// ```
420    ///
421    /// # Errors
422    ///
423    /// This method currently always succeeds but returns `Result` for API consistency.
424    pub fn with_dns(mut self, zone: &str, port: u16) -> Result<Self> {
425        self.dns_config = Some(DnsConfig {
426            zone: zone.to_string(),
427            port,
428            bind_addr: IpAddr::V4(self.config.node_ip),
429        });
430        Ok(self)
431    }
432
433    /// Enable DNS with default port (15353)
434    ///
435    /// # Errors
436    ///
437    /// This method currently always succeeds but returns `Result` for API consistency.
438    pub fn with_dns_default(self, zone: &str) -> Result<Self> {
439        self.with_dns(zone, DEFAULT_DNS_PORT)
440    }
441
442    /// Get the DNS handle for managing records
443    ///
444    /// Returns None if DNS is not enabled or `start()` hasn't been called yet.
445    #[must_use]
446    pub fn dns_handle(&self) -> Option<&DnsHandle> {
447        self.dns_handle.as_ref()
448    }
449
450    /// Check if DNS is enabled
451    #[must_use]
452    pub fn dns_enabled(&self) -> bool {
453        self.dns_config.is_some()
454    }
455
456    /// Start the overlay network (create and configure overlay transport)
457    ///
458    /// This creates the boringtun TUN interface, assigns the overlay IP,
459    /// configures all known peers, and starts the DNS server if enabled.
460    ///
461    /// # Errors
462    ///
463    /// Returns an error if interface creation, peer configuration, or DNS startup fails.
464    pub async fn start(&mut self) -> Result<()> {
465        info!(
466            interface = %self.config.interface,
467            overlay_ip = %self.config.node_ip,
468            port = self.config.port,
469            dns_enabled = self.dns_config.is_some(),
470            "Starting overlay network"
471        );
472
473        // Convert our config to OverlayConfig
474        let overlay_config = crate::config::OverlayConfig {
475            local_endpoint: SocketAddr::new(
476                std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
477                self.config.port,
478            ),
479            private_key: self.config.private_key.clone(),
480            public_key: self.config.public_key.clone(),
481            overlay_cidr: self.config.allowed_ip(),
482            peer_discovery_interval: Duration::from_secs(30),
483        };
484
485        // Create overlay transport
486        let mut transport = OverlayTransport::new(overlay_config, self.config.interface.clone());
487
488        // Create the interface
489        transport
490            .create_interface()
491            .await
492            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
493
494        // Convert peers to PeerInfo
495        let peer_infos: Vec<PeerInfo> = self
496            .peers
497            .iter()
498            .filter_map(|p| match p.to_peer_info() {
499                Ok(info) => Some(info),
500                Err(e) => {
501                    warn!(peer = %p.node_id, error = %e, "Failed to parse peer info");
502                    None
503                }
504            })
505            .collect();
506
507        // Configure transport with peers
508        transport
509            .configure(&peer_infos)
510            .await
511            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
512
513        // Store the transport so the TUN device stays alive for the overlay
514        // lifetime. Dropping the OverlayTransport destroys the boringtun device.
515        self.transport = Some(transport);
516
517        // Start DNS server if configured
518        if let Some(dns_config) = &self.dns_config {
519            info!(
520                zone = %dns_config.zone,
521                port = dns_config.port,
522                "Starting DNS server for overlay"
523            );
524
525            let dns_server =
526                DnsServer::from_config(dns_config).map_err(|e| OverlayError::Dns(e.to_string()))?;
527
528            // Register self with IP-based hostname
529            let self_hostname = peer_hostname(self.config.node_ip);
530            dns_server
531                .add_record(&self_hostname, self.config.node_ip)
532                .await
533                .map_err(|e| OverlayError::Dns(e.to_string()))?;
534
535            // If leader, also register "leader" alias
536            if self.config.is_leader {
537                dns_server
538                    .add_record("leader", self.config.node_ip)
539                    .await
540                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
541                debug!(ip = %self.config.node_ip, "Registered leader.{}", dns_config.zone);
542            }
543
544            // Register existing peers
545            for peer in &self.peers {
546                // Always register IP-based hostname
547                let hostname = peer_hostname(peer.overlay_ip);
548                dns_server
549                    .add_record(&hostname, peer.overlay_ip)
550                    .await
551                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
552
553                // Also register custom hostname if provided
554                if let Some(custom) = &peer.hostname {
555                    dns_server
556                        .add_record(custom, peer.overlay_ip)
557                        .await
558                        .map_err(|e| OverlayError::Dns(e.to_string()))?;
559                    debug!(
560                        hostname = custom,
561                        ip = %peer.overlay_ip,
562                        "Registered custom hostname"
563                    );
564                }
565            }
566
567            // Start the DNS server and store the handle
568            let handle = dns_server
569                .start()
570                .await
571                .map_err(|e| OverlayError::Dns(e.to_string()))?;
572            self.dns_handle = Some(handle);
573
574            info!("DNS server started successfully");
575        }
576
577        info!("Overlay network started successfully");
578        Ok(())
579    }
580
581    /// Stop the overlay network (shut down the boringtun transport)
582    ///
583    /// # Errors
584    ///
585    /// This method currently always succeeds but returns `Result` for API consistency.
586    #[allow(clippy::unused_async)]
587    pub async fn stop(&mut self) -> Result<()> {
588        info!(interface = %self.config.interface, "Stopping overlay network");
589
590        if let Some(mut transport) = self.transport.take() {
591            transport.shutdown();
592        }
593
594        Ok(())
595    }
596
597    /// Add a new peer to the overlay network
598    ///
599    /// For leader nodes, this also allocates an IP address for the peer.
600    ///
601    /// # Errors
602    ///
603    /// Returns an error if no IPs are available, DNS registration fails, or state cannot be saved.
604    pub async fn add_peer(&mut self, mut peer: PeerConfig) -> Result<Ipv4Addr> {
605        // If we're the leader, allocate an IP for this peer
606        let overlay_ip = if let Some(ref mut allocator) = self.allocator {
607            let ip = allocator.allocate().ok_or(OverlayError::NoAvailableIps)?;
608            peer.overlay_ip = ip;
609            ip
610        } else {
611            peer.overlay_ip
612        };
613
614        // Add peer to overlay transport via UAPI
615        if let Ok(peer_info) = peer.to_peer_info() {
616            // Prefer the stored transport; fall back to a temporary instance
617            // (UAPI calls work via the Unix socket regardless of DeviceHandle)
618            let transport_ref: Option<&OverlayTransport> = self.transport.as_ref();
619
620            let result = if let Some(t) = transport_ref {
621                t.add_peer(&peer_info).await
622            } else {
623                let overlay_config = crate::config::OverlayConfig {
624                    local_endpoint: SocketAddr::new(
625                        std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
626                        self.config.port,
627                    ),
628                    private_key: self.config.private_key.clone(),
629                    public_key: self.config.public_key.clone(),
630                    overlay_cidr: self.config.allowed_ip(),
631                    peer_discovery_interval: Duration::from_secs(30),
632                };
633                let tmp = OverlayTransport::new(overlay_config, self.config.interface.clone());
634                tmp.add_peer(&peer_info).await
635            };
636
637            match result {
638                Ok(()) => debug!(peer = %peer.node_id, "Added peer to overlay"),
639                Err(e) => {
640                    warn!(peer = %peer.node_id, error = %e, "Failed to add peer to overlay (interface may not be up)");
641                }
642            }
643        }
644
645        // Register peer in DNS if enabled
646        if let Some(ref dns_handle) = self.dns_handle {
647            // IP-based hostname
648            let hostname = peer_hostname(overlay_ip);
649            dns_handle
650                .add_record(&hostname, overlay_ip)
651                .await
652                .map_err(|e| OverlayError::Dns(e.to_string()))?;
653            debug!(hostname = %hostname, ip = %overlay_ip, "Registered peer in DNS");
654
655            // Custom hostname alias if provided
656            if let Some(ref custom) = peer.hostname {
657                dns_handle
658                    .add_record(custom, overlay_ip)
659                    .await
660                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
661                debug!(hostname = %custom, ip = %overlay_ip, "Registered custom hostname in DNS");
662            }
663        }
664
665        // Add to peer list
666        self.peers.push(peer);
667
668        // Persist state
669        self.save().await?;
670
671        info!(peer_ip = %overlay_ip, "Added peer to overlay");
672        Ok(overlay_ip)
673    }
674
675    /// Remove a peer from the overlay network
676    ///
677    /// # Errors
678    ///
679    /// Returns an error if the peer is not found, DNS removal fails, or state cannot be saved.
680    pub async fn remove_peer(&mut self, public_key: &str) -> Result<()> {
681        // Find the peer
682        let peer_idx = self
683            .peers
684            .iter()
685            .position(|p| p.public_key == public_key)
686            .ok_or_else(|| OverlayError::PeerNotFound(public_key.to_string()))?;
687
688        let peer = &self.peers[peer_idx];
689
690        // Capture peer info for DNS removal before we lose the reference
691        let peer_overlay_ip = peer.overlay_ip;
692        let peer_custom_hostname = peer.hostname.clone();
693
694        // Release IP if we're managing allocation
695        if let Some(ref mut allocator) = self.allocator {
696            allocator.release(peer_overlay_ip);
697        }
698
699        // Remove from DNS if enabled
700        if let Some(ref dns_handle) = self.dns_handle {
701            // Remove IP-based hostname
702            let hostname = peer_hostname(peer_overlay_ip);
703            dns_handle
704                .remove_record(&hostname)
705                .await
706                .map_err(|e| OverlayError::Dns(e.to_string()))?;
707            debug!(hostname = %hostname, "Removed peer from DNS");
708
709            // Remove custom hostname if it was set
710            if let Some(ref custom) = peer_custom_hostname {
711                dns_handle
712                    .remove_record(custom)
713                    .await
714                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
715                debug!(hostname = %custom, "Removed custom hostname from DNS");
716            }
717        }
718
719        // Remove peer from overlay transport via UAPI
720        let transport_ref: Option<&OverlayTransport> = self.transport.as_ref();
721
722        let result = if let Some(t) = transport_ref {
723            t.remove_peer(public_key).await
724        } else {
725            let overlay_config = crate::config::OverlayConfig {
726                local_endpoint: SocketAddr::new(
727                    std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
728                    self.config.port,
729                ),
730                private_key: self.config.private_key.clone(),
731                public_key: self.config.public_key.clone(),
732                overlay_cidr: self.config.allowed_ip(),
733                peer_discovery_interval: Duration::from_secs(30),
734            };
735            let tmp = OverlayTransport::new(overlay_config, self.config.interface.clone());
736            tmp.remove_peer(public_key).await
737        };
738
739        match result {
740            Ok(()) => debug!(public_key = public_key, "Removed peer from overlay"),
741            Err(e) => {
742                warn!(public_key = public_key, error = %e, "Failed to remove peer from overlay");
743            }
744        }
745
746        // Remove from peer list
747        self.peers.remove(peer_idx);
748
749        // Persist state
750        self.save().await?;
751
752        info!(public_key = public_key, "Removed peer from overlay");
753        Ok(())
754    }
755
756    /// Get this node's public key
757    #[must_use]
758    pub fn public_key(&self) -> &str {
759        &self.config.public_key
760    }
761
762    /// Get this node's overlay IP
763    #[must_use]
764    pub fn node_ip(&self) -> Ipv4Addr {
765        self.config.node_ip
766    }
767
768    /// Get the overlay CIDR
769    #[must_use]
770    pub fn cidr(&self) -> &str {
771        &self.config.cidr
772    }
773
774    /// Get the overlay interface name
775    #[must_use]
776    pub fn interface(&self) -> &str {
777        &self.config.interface
778    }
779
780    /// Get the overlay listen port
781    #[must_use]
782    pub fn port(&self) -> u16 {
783        self.config.port
784    }
785
786    /// Check if this node is the leader
787    #[must_use]
788    pub fn is_leader(&self) -> bool {
789        self.config.is_leader
790    }
791
792    /// Get configured peers
793    #[must_use]
794    pub fn peers(&self) -> &[PeerConfig] {
795        &self.peers
796    }
797
798    /// Get the bootstrap config
799    #[must_use]
800    pub fn config(&self) -> &BootstrapConfig {
801        &self.config
802    }
803
804    /// Allocate an IP for a new peer (leader only)
805    ///
806    /// This is used by the control plane when processing join requests.
807    ///
808    /// # Errors
809    ///
810    /// Returns an error if this node is not a leader or no IPs are available.
811    pub fn allocate_peer_ip(&mut self) -> Result<Ipv4Addr> {
812        let allocator = self
813            .allocator
814            .as_mut()
815            .ok_or(OverlayError::Config("Not a leader node".to_string()))?;
816
817        allocator.allocate().ok_or(OverlayError::NoAvailableIps)
818    }
819
820    /// Get IP allocation statistics (leader only)
821    #[must_use]
822    #[allow(clippy::cast_possible_truncation)]
823    pub fn allocation_stats(&self) -> Option<(u32, u32)> {
824        self.allocator
825            .as_ref()
826            .map(|a| (a.allocated_count() as u32, a.total_hosts()))
827    }
828}
829
830/// Get current Unix timestamp
831fn current_timestamp() -> u64 {
832    std::time::SystemTime::now()
833        .duration_since(std::time::UNIX_EPOCH)
834        .unwrap_or_default()
835        .as_secs()
836}
837
838#[cfg(test)]
839mod tests {
840    use super::*;
841
842    #[test]
843    fn test_bootstrap_config_allowed_ip() {
844        let config = BootstrapConfig {
845            cidr: "10.200.0.0/16".to_string(),
846            node_ip: "10.200.0.1".parse().unwrap(),
847            interface: DEFAULT_INTERFACE_NAME.to_string(),
848            port: DEFAULT_WG_PORT,
849            private_key: "test_private".to_string(),
850            public_key: "test_public".to_string(),
851            is_leader: true,
852            created_at: 0,
853        };
854
855        assert_eq!(config.allowed_ip(), "10.200.0.1/32");
856    }
857
858    #[test]
859    fn test_peer_config_new() {
860        let peer = PeerConfig::new(
861            "node-1".to_string(),
862            "pubkey123".to_string(),
863            "192.168.1.100:51820".to_string(),
864            "10.200.0.5".parse().unwrap(),
865        );
866
867        assert_eq!(peer.node_id, "node-1");
868        assert_eq!(peer.keepalive, Some(DEFAULT_KEEPALIVE_SECS));
869        assert_eq!(peer.hostname, None);
870    }
871
872    #[test]
873    fn test_peer_config_with_hostname() {
874        let peer = PeerConfig::new(
875            "node-1".to_string(),
876            "pubkey123".to_string(),
877            "192.168.1.100:51820".to_string(),
878            "10.200.0.5".parse().unwrap(),
879        )
880        .with_hostname("web-server");
881
882        assert_eq!(peer.hostname, Some("web-server".to_string()));
883    }
884
885    #[test]
886    fn test_peer_config_to_peer_info() {
887        let peer = PeerConfig::new(
888            "node-1".to_string(),
889            "pubkey123".to_string(),
890            "192.168.1.100:51820".to_string(),
891            "10.200.0.5".parse().unwrap(),
892        );
893
894        let peer_info = peer.to_peer_info().unwrap();
895        assert_eq!(peer_info.public_key, "pubkey123");
896        assert_eq!(peer_info.allowed_ips, "10.200.0.5/32");
897    }
898
899    #[test]
900    fn test_bootstrap_state_serialization() {
901        let config = BootstrapConfig {
902            cidr: "10.200.0.0/16".to_string(),
903            node_ip: "10.200.0.1".parse().unwrap(),
904            interface: DEFAULT_INTERFACE_NAME.to_string(),
905            port: DEFAULT_WG_PORT,
906            private_key: "private".to_string(),
907            public_key: "public".to_string(),
908            is_leader: true,
909            created_at: 1234567890,
910        };
911
912        let state = BootstrapState {
913            config,
914            peers: vec![],
915            allocator_state: None,
916        };
917
918        let json = serde_json::to_string_pretty(&state).unwrap();
919        let deserialized: BootstrapState = serde_json::from_str(&json).unwrap();
920
921        assert_eq!(deserialized.config.cidr, "10.200.0.0/16");
922        assert_eq!(deserialized.config.node_ip.to_string(), "10.200.0.1");
923    }
924}