Skip to main content

zlayer_overlay/
bootstrap.rs

1//! Overlay network bootstrap functionality
2//!
3//! Provides initialization and joining capabilities for overlay networks,
4//! including keypair generation, interface creation, and peer management.
5
6use crate::allocator::IpAllocator;
7use crate::config::PeerInfo;
8use crate::dns::{peer_hostname, DnsConfig, DnsHandle, DnsServer, DEFAULT_DNS_PORT};
9use crate::error::{OverlayError, Result};
10use crate::transport::OverlayTransport;
11use serde::{Deserialize, Serialize};
12use std::net::{IpAddr, Ipv4Addr, SocketAddr};
13use std::path::{Path, PathBuf};
14use std::time::Duration;
15use tracing::{debug, info, warn};
16
17/// Default overlay interface name for ZLayer
18pub const DEFAULT_INTERFACE_NAME: &str = "zl-overlay0";
19
20/// Default overlay listen port
21pub const DEFAULT_WG_PORT: u16 = 51820;
22
23/// Default overlay network CIDR
24pub const DEFAULT_OVERLAY_CIDR: &str = "10.200.0.0/16";
25
26/// Default persistent keepalive interval (seconds)
27pub const DEFAULT_KEEPALIVE_SECS: u16 = 25;
28
29/// Overlay network bootstrap configuration
30///
31/// Contains all configuration needed to initialize and manage
32/// an overlay network on a node.
33#[derive(Debug, Clone, Serialize, Deserialize)]
34pub struct BootstrapConfig {
35    /// Network CIDR (e.g., "10.200.0.0/16")
36    pub cidr: String,
37
38    /// This node's overlay IP address
39    pub node_ip: Ipv4Addr,
40
41    /// Overlay interface name
42    pub interface: String,
43
44    /// Overlay listen port
45    pub port: u16,
46
47    /// This node's overlay private key
48    pub private_key: String,
49
50    /// This node's overlay public key
51    pub public_key: String,
52
53    /// Whether this node is the cluster leader
54    pub is_leader: bool,
55
56    /// Creation timestamp (Unix epoch seconds)
57    pub created_at: u64,
58}
59
60impl BootstrapConfig {
61    /// Get the overlay IP with /32 prefix for allowed IPs
62    pub fn allowed_ip(&self) -> String {
63        format!("{}/32", self.node_ip)
64    }
65}
66
67/// Peer configuration for overlay network
68#[derive(Debug, Clone, Serialize, Deserialize)]
69pub struct PeerConfig {
70    /// Peer's node ID (for identification)
71    pub node_id: String,
72
73    /// Peer's overlay public key
74    pub public_key: String,
75
76    /// Peer's public endpoint (host:port)
77    pub endpoint: String,
78
79    /// Peer's overlay IP address
80    pub overlay_ip: Ipv4Addr,
81
82    /// Optional persistent keepalive interval in seconds
83    #[serde(default)]
84    pub keepalive: Option<u16>,
85
86    /// Optional custom DNS hostname for this peer (without zone suffix)
87    /// If provided, the peer will be registered with this name in addition
88    /// to the auto-generated IP-based hostname.
89    #[serde(default)]
90    pub hostname: Option<String>,
91}
92
93impl PeerConfig {
94    /// Create a new peer configuration
95    pub fn new(
96        node_id: String,
97        public_key: String,
98        endpoint: String,
99        overlay_ip: Ipv4Addr,
100    ) -> Self {
101        Self {
102            node_id,
103            public_key,
104            endpoint,
105            overlay_ip,
106            keepalive: Some(DEFAULT_KEEPALIVE_SECS),
107            hostname: None,
108        }
109    }
110
111    /// Set a custom DNS hostname for this peer
112    pub fn with_hostname(mut self, hostname: impl Into<String>) -> Self {
113        self.hostname = Some(hostname.into());
114        self
115    }
116
117    /// Convert to PeerInfo for overlay transport configuration
118    pub fn to_peer_info(&self) -> std::result::Result<PeerInfo, Box<dyn std::error::Error>> {
119        let endpoint: SocketAddr = self.endpoint.parse()?;
120        let keepalive =
121            Duration::from_secs(self.keepalive.unwrap_or(DEFAULT_KEEPALIVE_SECS) as u64);
122
123        Ok(PeerInfo::new(
124            self.public_key.clone(),
125            endpoint,
126            &format!("{}/32", self.overlay_ip),
127            keepalive,
128        ))
129    }
130}
131
132/// Persistent state for the overlay bootstrap
133#[derive(Debug, Clone, Serialize, Deserialize)]
134pub struct BootstrapState {
135    /// Bootstrap configuration
136    pub config: BootstrapConfig,
137
138    /// List of configured peers
139    pub peers: Vec<PeerConfig>,
140
141    /// IP allocator state (only for leader)
142    #[serde(skip_serializing_if = "Option::is_none")]
143    pub allocator_state: Option<crate::allocator::IpAllocatorState>,
144}
145
146/// Bootstrap manager for overlay network
147///
148/// Handles overlay network initialization, peer management,
149/// and overlay transport interface configuration.
150pub struct OverlayBootstrap {
151    /// Bootstrap configuration
152    config: BootstrapConfig,
153
154    /// Configured peers
155    peers: Vec<PeerConfig>,
156
157    /// Data directory for persistent state
158    data_dir: PathBuf,
159
160    /// IP allocator (only for leader nodes)
161    allocator: Option<IpAllocator>,
162
163    /// DNS configuration (opt-in)
164    dns_config: Option<DnsConfig>,
165
166    /// DNS handle for managing records (available after start() if DNS enabled)
167    dns_handle: Option<DnsHandle>,
168
169    /// Overlay transport (boringtun device handle).
170    ///
171    /// Must be kept alive for the overlay network lifetime; dropping the
172    /// transport destroys the TUN device.
173    transport: Option<OverlayTransport>,
174}
175
176impl OverlayBootstrap {
177    /// Initialize as cluster leader (first node in the overlay)
178    ///
179    /// This generates a new overlay keypair, allocates the first IP
180    /// in the CIDR range, and prepares the node as the overlay leader.
181    ///
182    /// # Arguments
183    /// * `cidr` - Overlay network CIDR (e.g., "10.200.0.0/16")
184    /// * `port` - Overlay listen port
185    /// * `data_dir` - Directory for persistent state
186    ///
187    /// # Example
188    /// ```ignore
189    /// let bootstrap = OverlayBootstrap::init_leader(
190    ///     "10.200.0.0/16",
191    ///     51820,
192    ///     Path::new("/var/lib/zlayer"),
193    /// ).await?;
194    /// ```
195    pub async fn init_leader(cidr: &str, port: u16, data_dir: &Path) -> Result<Self> {
196        // Check if already initialized
197        let config_path = data_dir.join("overlay_bootstrap.json");
198        if config_path.exists() {
199            return Err(OverlayError::AlreadyInitialized(
200                config_path.display().to_string(),
201            ));
202        }
203
204        // Ensure data directory exists
205        tokio::fs::create_dir_all(data_dir).await?;
206
207        // Generate overlay keypair
208        info!("Generating overlay keypair for leader");
209        let (private_key, public_key) = OverlayTransport::generate_keys()
210            .await
211            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
212
213        // Initialize IP allocator and allocate first IP for leader
214        let mut allocator = IpAllocator::new(cidr)?;
215        let node_ip = allocator.allocate_first()?;
216
217        info!(node_ip = %node_ip, cidr = cidr, "Allocated leader IP");
218
219        // Create config
220        let config = BootstrapConfig {
221            cidr: cidr.to_string(),
222            node_ip,
223            interface: DEFAULT_INTERFACE_NAME.to_string(),
224            port,
225            private_key,
226            public_key,
227            is_leader: true,
228            created_at: current_timestamp(),
229        };
230
231        let bootstrap = Self {
232            config,
233            peers: Vec::new(),
234            data_dir: data_dir.to_path_buf(),
235            allocator: Some(allocator),
236            dns_config: None,
237            dns_handle: None,
238            transport: None,
239        };
240
241        // Persist state
242        bootstrap.save().await?;
243
244        Ok(bootstrap)
245    }
246
247    /// Join an existing overlay network
248    ///
249    /// Generates a new overlay keypair and configures this node
250    /// to connect to an existing overlay network.
251    ///
252    /// # Arguments
253    /// * `leader_cidr` - Leader's overlay network CIDR
254    /// * `leader_endpoint` - Leader's public endpoint (host:port)
255    /// * `leader_public_key` - Leader's overlay public key
256    /// * `leader_overlay_ip` - Leader's overlay IP address
257    /// * `allocated_ip` - IP address allocated for this node by the leader
258    /// * `port` - Overlay listen port for this node
259    /// * `data_dir` - Directory for persistent state
260    pub async fn join(
261        leader_cidr: &str,
262        leader_endpoint: &str,
263        leader_public_key: &str,
264        leader_overlay_ip: Ipv4Addr,
265        allocated_ip: Ipv4Addr,
266        port: u16,
267        data_dir: &Path,
268    ) -> Result<Self> {
269        // Check if already initialized
270        let config_path = data_dir.join("overlay_bootstrap.json");
271        if config_path.exists() {
272            return Err(OverlayError::AlreadyInitialized(
273                config_path.display().to_string(),
274            ));
275        }
276
277        // Ensure data directory exists
278        tokio::fs::create_dir_all(data_dir).await?;
279
280        // Generate overlay keypair for this node
281        info!("Generating overlay keypair for joining node");
282        let (private_key, public_key) = OverlayTransport::generate_keys()
283            .await
284            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
285
286        // Create config
287        let config = BootstrapConfig {
288            cidr: leader_cidr.to_string(),
289            node_ip: allocated_ip,
290            interface: DEFAULT_INTERFACE_NAME.to_string(),
291            port,
292            private_key,
293            public_key,
294            is_leader: false,
295            created_at: current_timestamp(),
296        };
297
298        // Add leader as the first peer
299        let leader_peer = PeerConfig {
300            node_id: "leader".to_string(),
301            public_key: leader_public_key.to_string(),
302            endpoint: leader_endpoint.to_string(),
303            overlay_ip: leader_overlay_ip,
304            keepalive: Some(DEFAULT_KEEPALIVE_SECS),
305            hostname: None, // Leader gets its own DNS alias "leader.zone"
306        };
307
308        info!(
309            leader_endpoint = leader_endpoint,
310            overlay_ip = %allocated_ip,
311            "Configured leader as peer"
312        );
313
314        let bootstrap = Self {
315            config,
316            peers: vec![leader_peer],
317            data_dir: data_dir.to_path_buf(),
318            allocator: None, // Workers don't manage IP allocation
319            dns_config: None,
320            dns_handle: None,
321            transport: None,
322        };
323
324        // Persist state
325        bootstrap.save().await?;
326
327        Ok(bootstrap)
328    }
329
330    /// Load existing bootstrap state from disk
331    pub async fn load(data_dir: &Path) -> Result<Self> {
332        let config_path = data_dir.join("overlay_bootstrap.json");
333
334        if !config_path.exists() {
335            return Err(OverlayError::NotInitialized);
336        }
337
338        let contents = tokio::fs::read_to_string(&config_path).await?;
339        let state: BootstrapState = serde_json::from_str(&contents)?;
340
341        let allocator = if let Some(alloc_state) = state.allocator_state {
342            Some(IpAllocator::from_state(alloc_state)?)
343        } else {
344            None
345        };
346
347        Ok(Self {
348            config: state.config,
349            peers: state.peers,
350            data_dir: data_dir.to_path_buf(),
351            allocator,
352            dns_config: None, // DNS config must be re-enabled after load
353            dns_handle: None,
354            transport: None,
355        })
356    }
357
358    /// Save bootstrap state to disk
359    pub async fn save(&self) -> Result<()> {
360        let config_path = self.data_dir.join("overlay_bootstrap.json");
361
362        let state = BootstrapState {
363            config: self.config.clone(),
364            peers: self.peers.clone(),
365            allocator_state: self.allocator.as_ref().map(|a| a.to_state()),
366        };
367
368        let contents = serde_json::to_string_pretty(&state)?;
369        tokio::fs::write(&config_path, contents).await?;
370
371        debug!(path = %config_path.display(), "Saved bootstrap state");
372        Ok(())
373    }
374
375    /// Enable DNS service discovery for the overlay network
376    ///
377    /// When DNS is enabled, peers are automatically registered with both:
378    /// - An IP-based hostname: `node-X-Y.zone` (e.g., `node-0-5.overlay.local`)
379    /// - A custom hostname if provided in PeerConfig
380    ///
381    /// The leader node additionally gets a `leader.zone` alias.
382    ///
383    /// # Arguments
384    /// * `zone` - DNS zone (e.g., "overlay.local.")
385    /// * `port` - DNS server port (default: 15353 to avoid conflicts)
386    ///
387    /// # Example
388    /// ```ignore
389    /// let bootstrap = OverlayBootstrap::init_leader(cidr, port, data_dir)
390    ///     .await?
391    ///     .with_dns("overlay.local.", 15353)?;
392    /// bootstrap.start().await?;
393    /// ```
394    pub fn with_dns(mut self, zone: &str, port: u16) -> Result<Self> {
395        self.dns_config = Some(DnsConfig {
396            zone: zone.to_string(),
397            port,
398            bind_addr: IpAddr::V4(self.config.node_ip),
399        });
400        Ok(self)
401    }
402
403    /// Enable DNS with default port (15353)
404    pub fn with_dns_default(self, zone: &str) -> Result<Self> {
405        self.with_dns(zone, DEFAULT_DNS_PORT)
406    }
407
408    /// Get the DNS handle for managing records
409    ///
410    /// Returns None if DNS is not enabled or start() hasn't been called yet.
411    pub fn dns_handle(&self) -> Option<&DnsHandle> {
412        self.dns_handle.as_ref()
413    }
414
415    /// Check if DNS is enabled
416    pub fn dns_enabled(&self) -> bool {
417        self.dns_config.is_some()
418    }
419
420    /// Start the overlay network (create and configure overlay transport)
421    ///
422    /// This creates the boringtun TUN interface, assigns the overlay IP,
423    /// configures all known peers, and starts the DNS server if enabled.
424    pub async fn start(&mut self) -> Result<()> {
425        info!(
426            interface = %self.config.interface,
427            overlay_ip = %self.config.node_ip,
428            port = self.config.port,
429            dns_enabled = self.dns_config.is_some(),
430            "Starting overlay network"
431        );
432
433        // Convert our config to OverlayConfig
434        let overlay_config = crate::config::OverlayConfig {
435            local_endpoint: SocketAddr::new(
436                std::net::IpAddr::V4(std::net::Ipv4Addr::new(0, 0, 0, 0)),
437                self.config.port,
438            ),
439            private_key: self.config.private_key.clone(),
440            public_key: self.config.public_key.clone(),
441            overlay_cidr: self.config.allowed_ip(),
442            peer_discovery_interval: Duration::from_secs(30),
443        };
444
445        // Create overlay transport
446        let mut transport = OverlayTransport::new(overlay_config, self.config.interface.clone());
447
448        // Create the interface
449        transport
450            .create_interface()
451            .await
452            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
453
454        // Convert peers to PeerInfo
455        let peer_infos: Vec<PeerInfo> = self
456            .peers
457            .iter()
458            .filter_map(|p| match p.to_peer_info() {
459                Ok(info) => Some(info),
460                Err(e) => {
461                    warn!(peer = %p.node_id, error = %e, "Failed to parse peer info");
462                    None
463                }
464            })
465            .collect();
466
467        // Configure transport with peers
468        transport
469            .configure(&peer_infos)
470            .await
471            .map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
472
473        // Store the transport so the TUN device stays alive for the overlay
474        // lifetime. Dropping the OverlayTransport destroys the boringtun device.
475        self.transport = Some(transport);
476
477        // Start DNS server if configured
478        if let Some(dns_config) = &self.dns_config {
479            info!(
480                zone = %dns_config.zone,
481                port = dns_config.port,
482                "Starting DNS server for overlay"
483            );
484
485            let dns_server =
486                DnsServer::from_config(dns_config).map_err(|e| OverlayError::Dns(e.to_string()))?;
487
488            // Register self with IP-based hostname
489            let self_hostname = peer_hostname(self.config.node_ip);
490            dns_server
491                .add_record(&self_hostname, self.config.node_ip)
492                .await
493                .map_err(|e| OverlayError::Dns(e.to_string()))?;
494
495            // If leader, also register "leader" alias
496            if self.config.is_leader {
497                dns_server
498                    .add_record("leader", self.config.node_ip)
499                    .await
500                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
501                debug!(ip = %self.config.node_ip, "Registered leader.{}", dns_config.zone);
502            }
503
504            // Register existing peers
505            for peer in &self.peers {
506                // Always register IP-based hostname
507                let hostname = peer_hostname(peer.overlay_ip);
508                dns_server
509                    .add_record(&hostname, peer.overlay_ip)
510                    .await
511                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
512
513                // Also register custom hostname if provided
514                if let Some(custom) = &peer.hostname {
515                    dns_server
516                        .add_record(custom, peer.overlay_ip)
517                        .await
518                        .map_err(|e| OverlayError::Dns(e.to_string()))?;
519                    debug!(
520                        hostname = custom,
521                        ip = %peer.overlay_ip,
522                        "Registered custom hostname"
523                    );
524                }
525            }
526
527            // Start the DNS server and store the handle
528            let handle = dns_server
529                .start()
530                .await
531                .map_err(|e| OverlayError::Dns(e.to_string()))?;
532            self.dns_handle = Some(handle);
533
534            info!("DNS server started successfully");
535        }
536
537        info!("Overlay network started successfully");
538        Ok(())
539    }
540
541    /// Stop the overlay network (shut down the boringtun transport)
542    pub async fn stop(&mut self) -> Result<()> {
543        info!(interface = %self.config.interface, "Stopping overlay network");
544
545        if let Some(mut transport) = self.transport.take() {
546            transport.shutdown();
547        }
548
549        Ok(())
550    }
551
552    /// Add a new peer to the overlay network
553    ///
554    /// For leader nodes, this also allocates an IP address for the peer.
555    pub async fn add_peer(&mut self, mut peer: PeerConfig) -> Result<Ipv4Addr> {
556        // If we're the leader, allocate an IP for this peer
557        let overlay_ip = if let Some(ref mut allocator) = self.allocator {
558            let ip = allocator.allocate().ok_or(OverlayError::NoAvailableIps)?;
559            peer.overlay_ip = ip;
560            ip
561        } else {
562            peer.overlay_ip
563        };
564
565        // Add peer to overlay transport via UAPI
566        if let Ok(peer_info) = peer.to_peer_info() {
567            // Prefer the stored transport; fall back to a temporary instance
568            // (UAPI calls work via the Unix socket regardless of DeviceHandle)
569            let transport_ref: Option<&OverlayTransport> = self.transport.as_ref();
570
571            let result = if let Some(t) = transport_ref {
572                t.add_peer(&peer_info).await
573            } else {
574                let overlay_config = crate::config::OverlayConfig {
575                    local_endpoint: SocketAddr::new(
576                        std::net::IpAddr::V4(std::net::Ipv4Addr::new(0, 0, 0, 0)),
577                        self.config.port,
578                    ),
579                    private_key: self.config.private_key.clone(),
580                    public_key: self.config.public_key.clone(),
581                    overlay_cidr: self.config.allowed_ip(),
582                    peer_discovery_interval: Duration::from_secs(30),
583                };
584                let tmp = OverlayTransport::new(overlay_config, self.config.interface.clone());
585                tmp.add_peer(&peer_info).await
586            };
587
588            match result {
589                Ok(_) => debug!(peer = %peer.node_id, "Added peer to overlay"),
590                Err(e) => {
591                    warn!(peer = %peer.node_id, error = %e, "Failed to add peer to overlay (interface may not be up)")
592                }
593            }
594        }
595
596        // Register peer in DNS if enabled
597        if let Some(ref dns_handle) = self.dns_handle {
598            // IP-based hostname
599            let hostname = peer_hostname(overlay_ip);
600            dns_handle
601                .add_record(&hostname, overlay_ip)
602                .await
603                .map_err(|e| OverlayError::Dns(e.to_string()))?;
604            debug!(hostname = %hostname, ip = %overlay_ip, "Registered peer in DNS");
605
606            // Custom hostname alias if provided
607            if let Some(ref custom) = peer.hostname {
608                dns_handle
609                    .add_record(custom, overlay_ip)
610                    .await
611                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
612                debug!(hostname = %custom, ip = %overlay_ip, "Registered custom hostname in DNS");
613            }
614        }
615
616        // Add to peer list
617        self.peers.push(peer);
618
619        // Persist state
620        self.save().await?;
621
622        info!(peer_ip = %overlay_ip, "Added peer to overlay");
623        Ok(overlay_ip)
624    }
625
626    /// Remove a peer from the overlay network
627    pub async fn remove_peer(&mut self, public_key: &str) -> Result<()> {
628        // Find the peer
629        let peer_idx = self
630            .peers
631            .iter()
632            .position(|p| p.public_key == public_key)
633            .ok_or_else(|| OverlayError::PeerNotFound(public_key.to_string()))?;
634
635        let peer = &self.peers[peer_idx];
636
637        // Capture peer info for DNS removal before we lose the reference
638        let peer_overlay_ip = peer.overlay_ip;
639        let peer_custom_hostname = peer.hostname.clone();
640
641        // Release IP if we're managing allocation
642        if let Some(ref mut allocator) = self.allocator {
643            allocator.release(peer_overlay_ip);
644        }
645
646        // Remove from DNS if enabled
647        if let Some(ref dns_handle) = self.dns_handle {
648            // Remove IP-based hostname
649            let hostname = peer_hostname(peer_overlay_ip);
650            dns_handle
651                .remove_record(&hostname)
652                .await
653                .map_err(|e| OverlayError::Dns(e.to_string()))?;
654            debug!(hostname = %hostname, "Removed peer from DNS");
655
656            // Remove custom hostname if it was set
657            if let Some(ref custom) = peer_custom_hostname {
658                dns_handle
659                    .remove_record(custom)
660                    .await
661                    .map_err(|e| OverlayError::Dns(e.to_string()))?;
662                debug!(hostname = %custom, "Removed custom hostname from DNS");
663            }
664        }
665
666        // Remove peer from overlay transport via UAPI
667        let transport_ref: Option<&OverlayTransport> = self.transport.as_ref();
668
669        let result = if let Some(t) = transport_ref {
670            t.remove_peer(public_key).await
671        } else {
672            let overlay_config = crate::config::OverlayConfig {
673                local_endpoint: SocketAddr::new(
674                    std::net::IpAddr::V4(std::net::Ipv4Addr::new(0, 0, 0, 0)),
675                    self.config.port,
676                ),
677                private_key: self.config.private_key.clone(),
678                public_key: self.config.public_key.clone(),
679                overlay_cidr: self.config.allowed_ip(),
680                peer_discovery_interval: Duration::from_secs(30),
681            };
682            let tmp = OverlayTransport::new(overlay_config, self.config.interface.clone());
683            tmp.remove_peer(public_key).await
684        };
685
686        match result {
687            Ok(_) => debug!(public_key = public_key, "Removed peer from overlay"),
688            Err(e) => {
689                warn!(public_key = public_key, error = %e, "Failed to remove peer from overlay")
690            }
691        }
692
693        // Remove from peer list
694        self.peers.remove(peer_idx);
695
696        // Persist state
697        self.save().await?;
698
699        info!(public_key = public_key, "Removed peer from overlay");
700        Ok(())
701    }
702
703    /// Get this node's public key
704    pub fn public_key(&self) -> &str {
705        &self.config.public_key
706    }
707
708    /// Get this node's overlay IP
709    pub fn node_ip(&self) -> Ipv4Addr {
710        self.config.node_ip
711    }
712
713    /// Get the overlay CIDR
714    pub fn cidr(&self) -> &str {
715        &self.config.cidr
716    }
717
718    /// Get the overlay interface name
719    pub fn interface(&self) -> &str {
720        &self.config.interface
721    }
722
723    /// Get the overlay listen port
724    pub fn port(&self) -> u16 {
725        self.config.port
726    }
727
728    /// Check if this node is the leader
729    pub fn is_leader(&self) -> bool {
730        self.config.is_leader
731    }
732
733    /// Get configured peers
734    pub fn peers(&self) -> &[PeerConfig] {
735        &self.peers
736    }
737
738    /// Get the bootstrap config
739    pub fn config(&self) -> &BootstrapConfig {
740        &self.config
741    }
742
743    /// Allocate an IP for a new peer (leader only)
744    ///
745    /// This is used by the control plane when processing join requests.
746    pub fn allocate_peer_ip(&mut self) -> Result<Ipv4Addr> {
747        let allocator = self
748            .allocator
749            .as_mut()
750            .ok_or(OverlayError::Config("Not a leader node".to_string()))?;
751
752        allocator.allocate().ok_or(OverlayError::NoAvailableIps)
753    }
754
755    /// Get IP allocation statistics (leader only)
756    pub fn allocation_stats(&self) -> Option<(u32, u32)> {
757        self.allocator
758            .as_ref()
759            .map(|a| (a.allocated_count() as u32, a.total_hosts()))
760    }
761}
762
763/// Get current Unix timestamp
764fn current_timestamp() -> u64 {
765    std::time::SystemTime::now()
766        .duration_since(std::time::UNIX_EPOCH)
767        .unwrap_or_default()
768        .as_secs()
769}
770
771#[cfg(test)]
772mod tests {
773    use super::*;
774
775    #[test]
776    fn test_bootstrap_config_allowed_ip() {
777        let config = BootstrapConfig {
778            cidr: "10.200.0.0/16".to_string(),
779            node_ip: "10.200.0.1".parse().unwrap(),
780            interface: DEFAULT_INTERFACE_NAME.to_string(),
781            port: DEFAULT_WG_PORT,
782            private_key: "test_private".to_string(),
783            public_key: "test_public".to_string(),
784            is_leader: true,
785            created_at: 0,
786        };
787
788        assert_eq!(config.allowed_ip(), "10.200.0.1/32");
789    }
790
791    #[test]
792    fn test_peer_config_new() {
793        let peer = PeerConfig::new(
794            "node-1".to_string(),
795            "pubkey123".to_string(),
796            "192.168.1.100:51820".to_string(),
797            "10.200.0.5".parse().unwrap(),
798        );
799
800        assert_eq!(peer.node_id, "node-1");
801        assert_eq!(peer.keepalive, Some(DEFAULT_KEEPALIVE_SECS));
802        assert_eq!(peer.hostname, None);
803    }
804
805    #[test]
806    fn test_peer_config_with_hostname() {
807        let peer = PeerConfig::new(
808            "node-1".to_string(),
809            "pubkey123".to_string(),
810            "192.168.1.100:51820".to_string(),
811            "10.200.0.5".parse().unwrap(),
812        )
813        .with_hostname("web-server");
814
815        assert_eq!(peer.hostname, Some("web-server".to_string()));
816    }
817
818    #[test]
819    fn test_peer_config_to_peer_info() {
820        let peer = PeerConfig::new(
821            "node-1".to_string(),
822            "pubkey123".to_string(),
823            "192.168.1.100:51820".to_string(),
824            "10.200.0.5".parse().unwrap(),
825        );
826
827        let peer_info = peer.to_peer_info().unwrap();
828        assert_eq!(peer_info.public_key, "pubkey123");
829        assert_eq!(peer_info.allowed_ips, "10.200.0.5/32");
830    }
831
832    #[test]
833    fn test_bootstrap_state_serialization() {
834        let config = BootstrapConfig {
835            cidr: "10.200.0.0/16".to_string(),
836            node_ip: "10.200.0.1".parse().unwrap(),
837            interface: DEFAULT_INTERFACE_NAME.to_string(),
838            port: DEFAULT_WG_PORT,
839            private_key: "private".to_string(),
840            public_key: "public".to_string(),
841            is_leader: true,
842            created_at: 1234567890,
843        };
844
845        let state = BootstrapState {
846            config,
847            peers: vec![],
848            allocator_state: None,
849        };
850
851        let json = serde_json::to_string_pretty(&state).unwrap();
852        let deserialized: BootstrapState = serde_json::from_str(&json).unwrap();
853
854        assert_eq!(deserialized.config.cidr, "10.200.0.0/16");
855        assert_eq!(deserialized.config.node_ip.to_string(), "10.200.0.1");
856    }
857}