use crate::allocator::IpAllocator;
use crate::config::PeerInfo;
use crate::dns::{peer_hostname, DnsConfig, DnsHandle, DnsServer, DEFAULT_DNS_PORT};
use crate::error::{OverlayError, Result};
#[cfg(feature = "nat")]
use crate::nat::{Candidate, ConnectionType, NatTraversal, RelayServer};
use crate::transport::OverlayTransport;
use serde::{Deserialize, Serialize};
use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
use std::time::Duration;
use tracing::{debug, info, warn};
#[cfg(target_os = "macos")]
pub const DEFAULT_INTERFACE_NAME: &str = "utun";
#[cfg(not(target_os = "macos"))]
pub const DEFAULT_INTERFACE_NAME: &str = "zl-overlay0";
pub use zlayer_core::DEFAULT_WG_PORT;
pub const DEFAULT_OVERLAY_CIDR: &str = "10.200.0.0/16";
pub const DEFAULT_OVERLAY_CIDR_V6: &str = "fd00:200::/48";
pub const DEFAULT_KEEPALIVE_SECS: u16 = 25;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BootstrapConfig {
pub cidr: String,
pub node_ip: IpAddr,
pub interface: String,
pub port: u16,
pub private_key: String,
pub public_key: String,
pub is_leader: bool,
pub created_at: u64,
}
impl BootstrapConfig {
#[must_use]
pub fn allowed_ip(&self) -> String {
let prefix = match self.node_ip {
IpAddr::V4(_) => 32,
IpAddr::V6(_) => 128,
};
format!("{}/{}", self.node_ip, prefix)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PeerConfig {
pub node_id: String,
pub public_key: String,
pub endpoint: String,
pub overlay_ip: IpAddr,
#[serde(default)]
pub keepalive: Option<u16>,
#[serde(default)]
pub hostname: Option<String>,
#[serde(default)]
#[cfg(feature = "nat")]
pub candidates: Vec<Candidate>,
#[serde(default)]
#[cfg(feature = "nat")]
pub connection_type: ConnectionType,
}
impl PeerConfig {
#[must_use]
pub fn new(node_id: String, public_key: String, endpoint: String, overlay_ip: IpAddr) -> Self {
Self {
node_id,
public_key,
endpoint,
overlay_ip,
keepalive: Some(DEFAULT_KEEPALIVE_SECS),
hostname: None,
#[cfg(feature = "nat")]
candidates: Vec::new(),
#[cfg(feature = "nat")]
connection_type: ConnectionType::default(),
}
}
#[must_use]
pub fn with_hostname(mut self, hostname: impl Into<String>) -> Self {
self.hostname = Some(hostname.into());
self
}
pub fn to_peer_info(&self) -> std::result::Result<PeerInfo, Box<dyn std::error::Error>> {
let endpoint: SocketAddr = self.endpoint.parse()?;
let keepalive =
Duration::from_secs(u64::from(self.keepalive.unwrap_or(DEFAULT_KEEPALIVE_SECS)));
let prefix = match self.overlay_ip {
IpAddr::V4(_) => 32,
IpAddr::V6(_) => 128,
};
Ok(PeerInfo::new(
self.public_key.clone(),
endpoint,
&format!("{}/{}", self.overlay_ip, prefix),
keepalive,
))
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BootstrapState {
pub config: BootstrapConfig,
pub peers: Vec<PeerConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
pub allocator_state: Option<crate::allocator::IpAllocatorState>,
}
pub struct OverlayBootstrap {
config: BootstrapConfig,
peers: Vec<PeerConfig>,
data_dir: PathBuf,
allocator: Option<IpAllocator>,
dns_config: Option<DnsConfig>,
dns_handle: Option<DnsHandle>,
transport: Option<OverlayTransport>,
#[cfg(feature = "nat")]
nat_traversal: Option<NatTraversal>,
#[cfg(feature = "nat")]
relay_server: Option<RelayServer>,
}
impl OverlayBootstrap {
pub async fn init_leader(cidr: &str, port: u16, data_dir: &Path) -> Result<Self> {
let config_path = data_dir.join("overlay_bootstrap.json");
if config_path.exists() {
return Err(OverlayError::AlreadyInitialized(
config_path.display().to_string(),
));
}
tokio::fs::create_dir_all(data_dir).await?;
info!("Generating overlay keypair for leader");
let (private_key, public_key) = OverlayTransport::generate_keys()
.await
.map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
let mut allocator = IpAllocator::new(cidr)?;
let node_ip = allocator.allocate_first()?;
info!(node_ip = %node_ip, cidr = cidr, "Allocated leader IP");
let config = BootstrapConfig {
cidr: cidr.to_string(),
node_ip,
interface: DEFAULT_INTERFACE_NAME.to_string(),
port,
private_key,
public_key,
is_leader: true,
created_at: current_timestamp(),
};
let bootstrap = Self {
config,
peers: Vec::new(),
data_dir: data_dir.to_path_buf(),
allocator: Some(allocator),
dns_config: None,
dns_handle: None,
transport: None,
#[cfg(feature = "nat")]
nat_traversal: None,
#[cfg(feature = "nat")]
relay_server: None,
};
bootstrap.save().await?;
Ok(bootstrap)
}
pub async fn join(
leader_cidr: &str,
leader_endpoint: &str,
leader_public_key: &str,
leader_overlay_ip: IpAddr,
allocated_ip: IpAddr,
port: u16,
data_dir: &Path,
) -> Result<Self> {
let config_path = data_dir.join("overlay_bootstrap.json");
if config_path.exists() {
return Err(OverlayError::AlreadyInitialized(
config_path.display().to_string(),
));
}
tokio::fs::create_dir_all(data_dir).await?;
info!("Generating overlay keypair for joining node");
let (private_key, public_key) = OverlayTransport::generate_keys()
.await
.map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
let config = BootstrapConfig {
cidr: leader_cidr.to_string(),
node_ip: allocated_ip,
interface: DEFAULT_INTERFACE_NAME.to_string(),
port,
private_key,
public_key,
is_leader: false,
created_at: current_timestamp(),
};
let leader_peer = PeerConfig {
node_id: "leader".to_string(),
public_key: leader_public_key.to_string(),
endpoint: leader_endpoint.to_string(),
overlay_ip: leader_overlay_ip,
keepalive: Some(DEFAULT_KEEPALIVE_SECS),
hostname: None, #[cfg(feature = "nat")]
candidates: Vec::new(),
#[cfg(feature = "nat")]
connection_type: ConnectionType::default(),
};
info!(
leader_endpoint = leader_endpoint,
overlay_ip = %allocated_ip,
"Configured leader as peer"
);
let bootstrap = Self {
config,
peers: vec![leader_peer],
data_dir: data_dir.to_path_buf(),
allocator: None, dns_config: None,
dns_handle: None,
transport: None,
#[cfg(feature = "nat")]
nat_traversal: None,
#[cfg(feature = "nat")]
relay_server: None,
};
bootstrap.save().await?;
Ok(bootstrap)
}
pub async fn load(data_dir: &Path) -> Result<Self> {
let config_path = data_dir.join("overlay_bootstrap.json");
if !config_path.exists() {
return Err(OverlayError::NotInitialized);
}
let contents = tokio::fs::read_to_string(&config_path).await?;
let state: BootstrapState = serde_json::from_str(&contents)?;
let allocator = if let Some(alloc_state) = state.allocator_state {
Some(IpAllocator::from_state(alloc_state)?)
} else {
None
};
Ok(Self {
config: state.config,
peers: state.peers,
data_dir: data_dir.to_path_buf(),
allocator,
dns_config: None, dns_handle: None,
transport: None,
#[cfg(feature = "nat")]
nat_traversal: None,
#[cfg(feature = "nat")]
relay_server: None,
})
}
pub async fn save(&self) -> Result<()> {
let config_path = self.data_dir.join("overlay_bootstrap.json");
let state = BootstrapState {
config: self.config.clone(),
peers: self.peers.clone(),
allocator_state: self
.allocator
.as_ref()
.map(super::allocator::IpAllocator::to_state),
};
let contents = serde_json::to_string_pretty(&state)?;
tokio::fs::write(&config_path, contents).await?;
debug!(path = %config_path.display(), "Saved bootstrap state");
Ok(())
}
pub fn with_dns(mut self, zone: &str, port: u16) -> Result<Self> {
self.dns_config = Some(DnsConfig {
zone: zone.to_string(),
port,
bind_addr: self.config.node_ip,
});
Ok(self)
}
pub fn with_dns_default(self, zone: &str) -> Result<Self> {
self.with_dns(zone, DEFAULT_DNS_PORT)
}
#[must_use]
pub fn dns_handle(&self) -> Option<&DnsHandle> {
self.dns_handle.as_ref()
}
#[must_use]
pub fn dns_enabled(&self) -> bool {
self.dns_config.is_some()
}
pub async fn start(&mut self) -> Result<()> {
info!(
interface = %self.config.interface,
overlay_ip = %self.config.node_ip,
port = self.config.port,
dns_enabled = self.dns_config.is_some(),
"Starting overlay network"
);
let overlay_config = crate::config::OverlayConfig {
local_endpoint: SocketAddr::new(
std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
self.config.port,
),
private_key: self.config.private_key.clone(),
public_key: self.config.public_key.clone(),
overlay_cidr: self.config.allowed_ip(),
peer_discovery_interval: Duration::from_secs(30),
#[cfg(feature = "nat")]
nat: crate::nat::NatConfig::default(),
};
#[cfg(feature = "nat")]
let nat_config = overlay_config.nat.clone();
let mut transport = OverlayTransport::new(overlay_config, self.config.interface.clone());
transport
.create_interface()
.await
.map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
let actual_name = transport.interface_name().to_string();
if actual_name != self.config.interface {
info!(
requested = %self.config.interface,
actual = %actual_name,
"Interface name resolved by kernel"
);
self.config.interface = actual_name;
}
let peer_infos: Vec<PeerInfo> = self
.peers
.iter()
.filter_map(|p| match p.to_peer_info() {
Ok(info) => Some(info),
Err(e) => {
warn!(peer = %p.node_id, error = %e, "Failed to parse peer info");
None
}
})
.collect();
transport
.configure(&peer_infos)
.await
.map_err(|e| OverlayError::TransportCommand(e.to_string()))?;
self.transport = Some(transport);
#[cfg(feature = "nat")]
self.start_nat_traversal(nat_config).await;
self.start_dns().await?;
info!("Overlay network started successfully");
Ok(())
}
async fn start_dns(&mut self) -> Result<()> {
let Some(dns_config) = &self.dns_config else {
return Ok(());
};
info!(
zone = %dns_config.zone,
port = dns_config.port,
"Starting DNS server for overlay"
);
let dns_server =
DnsServer::from_config(dns_config).map_err(|e| OverlayError::Dns(e.to_string()))?;
let self_hostname = peer_hostname(self.config.node_ip);
dns_server
.add_record(&self_hostname, self.config.node_ip)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
if self.config.is_leader {
dns_server
.add_record("leader", self.config.node_ip)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
debug!(ip = %self.config.node_ip, "Registered leader.{}", dns_config.zone);
}
for peer in &self.peers {
let hostname = peer_hostname(peer.overlay_ip);
dns_server
.add_record(&hostname, peer.overlay_ip)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
if let Some(custom) = &peer.hostname {
dns_server
.add_record(custom, peer.overlay_ip)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
debug!(
hostname = custom,
ip = %peer.overlay_ip,
"Registered custom hostname"
);
}
}
let handle = dns_server
.start()
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
self.dns_handle = Some(handle);
info!("DNS server started successfully");
Ok(())
}
#[cfg(feature = "nat")]
async fn start_nat_traversal(&mut self, nat_config: crate::nat::NatConfig) {
if !nat_config.enabled {
return;
}
if let Some(ref relay_config) = nat_config.relay_server {
let relay = RelayServer::new(relay_config, &self.config.private_key);
match relay.start().await {
Ok(()) => {
info!("Built-in relay server started");
self.relay_server = Some(relay);
}
Err(e) => {
warn!(error = %e, "Failed to start relay server");
}
}
}
let mut nat = NatTraversal::new(nat_config, self.config.port);
match nat.gather_candidates().await {
Ok(candidates) => {
info!(count = candidates.len(), "Gathered NAT candidates");
if let Some(ref transport) = self.transport {
for peer in &mut self.peers {
if !peer.candidates.is_empty() {
match nat
.connect_to_peer(transport, &peer.public_key, &peer.candidates)
.await
{
Ok(ct) => {
peer.connection_type = ct;
info!(
peer = %peer.node_id,
connection = %ct,
"NAT traversal succeeded"
);
}
Err(e) => warn!(
peer = %peer.node_id,
error = %e,
"NAT traversal failed"
),
}
}
}
}
self.nat_traversal = Some(nat);
}
Err(e) => warn!(error = %e, "NAT candidate gathering failed"),
}
}
#[allow(clippy::unused_async)]
pub async fn stop(&mut self) -> Result<()> {
info!(interface = %self.config.interface, "Stopping overlay network");
if let Some(mut transport) = self.transport.take() {
transport.shutdown();
}
Ok(())
}
pub async fn add_peer(&mut self, mut peer: PeerConfig) -> Result<IpAddr> {
let overlay_ip = if let Some(ref mut allocator) = self.allocator {
let ip = allocator.allocate().ok_or(OverlayError::NoAvailableIps)?;
peer.overlay_ip = ip;
ip
} else {
peer.overlay_ip
};
if let Ok(peer_info) = peer.to_peer_info() {
let transport_ref: Option<&OverlayTransport> = self.transport.as_ref();
let result = if let Some(t) = transport_ref {
t.add_peer(&peer_info).await
} else {
let overlay_config = crate::config::OverlayConfig {
local_endpoint: SocketAddr::new(
std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
self.config.port,
),
private_key: self.config.private_key.clone(),
public_key: self.config.public_key.clone(),
overlay_cidr: self.config.allowed_ip(),
peer_discovery_interval: Duration::from_secs(30),
#[cfg(feature = "nat")]
nat: crate::nat::NatConfig::default(),
};
let tmp = OverlayTransport::new(overlay_config, self.config.interface.clone());
tmp.add_peer(&peer_info).await
};
match result {
Ok(()) => debug!(peer = %peer.node_id, "Added peer to overlay"),
Err(e) => {
warn!(peer = %peer.node_id, error = %e, "Failed to add peer to overlay (interface may not be up)");
}
}
}
if let Some(ref dns_handle) = self.dns_handle {
let hostname = peer_hostname(overlay_ip);
dns_handle
.add_record(&hostname, overlay_ip)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
debug!(hostname = %hostname, ip = %overlay_ip, "Registered peer in DNS");
if let Some(ref custom) = peer.hostname {
dns_handle
.add_record(custom, overlay_ip)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
debug!(hostname = %custom, ip = %overlay_ip, "Registered custom hostname in DNS");
}
}
#[cfg(feature = "nat")]
{
if let (Some(ref nat), Some(ref transport)) = (&self.nat_traversal, &self.transport) {
if !peer.candidates.is_empty() {
match nat
.connect_to_peer(transport, &peer.public_key, &peer.candidates)
.await
{
Ok(ct) => {
peer.connection_type = ct;
info!(
peer = %peer.node_id,
connection = %ct,
"NAT traversal for new peer"
);
}
Err(e) => warn!(
peer = %peer.node_id,
error = %e,
"NAT failed for new peer"
),
}
}
}
}
self.peers.push(peer);
self.save().await?;
info!(peer_ip = %overlay_ip, "Added peer to overlay");
Ok(overlay_ip)
}
pub async fn remove_peer(&mut self, public_key: &str) -> Result<()> {
let peer_idx = self
.peers
.iter()
.position(|p| p.public_key == public_key)
.ok_or_else(|| OverlayError::PeerNotFound(public_key.to_string()))?;
let peer = &self.peers[peer_idx];
let peer_overlay_ip = peer.overlay_ip;
let peer_custom_hostname = peer.hostname.clone();
if let Some(ref mut allocator) = self.allocator {
allocator.release(peer_overlay_ip);
}
if let Some(ref dns_handle) = self.dns_handle {
let hostname = peer_hostname(peer_overlay_ip);
dns_handle
.remove_record(&hostname)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
debug!(hostname = %hostname, "Removed peer from DNS");
if let Some(ref custom) = peer_custom_hostname {
dns_handle
.remove_record(custom)
.await
.map_err(|e| OverlayError::Dns(e.to_string()))?;
debug!(hostname = %custom, "Removed custom hostname from DNS");
}
}
let transport_ref: Option<&OverlayTransport> = self.transport.as_ref();
let result = if let Some(t) = transport_ref {
t.remove_peer(public_key).await
} else {
let overlay_config = crate::config::OverlayConfig {
local_endpoint: SocketAddr::new(
std::net::IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED),
self.config.port,
),
private_key: self.config.private_key.clone(),
public_key: self.config.public_key.clone(),
overlay_cidr: self.config.allowed_ip(),
peer_discovery_interval: Duration::from_secs(30),
#[cfg(feature = "nat")]
nat: crate::nat::NatConfig::default(),
};
let tmp = OverlayTransport::new(overlay_config, self.config.interface.clone());
tmp.remove_peer(public_key).await
};
match result {
Ok(()) => debug!(public_key = public_key, "Removed peer from overlay"),
Err(e) => {
warn!(public_key = public_key, error = %e, "Failed to remove peer from overlay");
}
}
self.peers.remove(peer_idx);
self.save().await?;
info!(public_key = public_key, "Removed peer from overlay");
Ok(())
}
#[must_use]
pub fn public_key(&self) -> &str {
&self.config.public_key
}
#[must_use]
pub fn node_ip(&self) -> IpAddr {
self.config.node_ip
}
#[must_use]
pub fn cidr(&self) -> &str {
&self.config.cidr
}
#[must_use]
pub fn interface(&self) -> &str {
&self.config.interface
}
#[must_use]
pub fn port(&self) -> u16 {
self.config.port
}
#[must_use]
pub fn is_leader(&self) -> bool {
self.config.is_leader
}
#[must_use]
pub fn peers(&self) -> &[PeerConfig] {
&self.peers
}
#[must_use]
pub fn config(&self) -> &BootstrapConfig {
&self.config
}
pub fn allocate_peer_ip(&mut self) -> Result<IpAddr> {
let allocator = self
.allocator
.as_mut()
.ok_or(OverlayError::Config("Not a leader node".to_string()))?;
allocator.allocate().ok_or(OverlayError::NoAvailableIps)
}
#[must_use]
#[allow(clippy::cast_possible_truncation)]
pub fn allocation_stats(&self) -> Option<(u32, u32)> {
self.allocator
.as_ref()
.map(|a| (a.allocated_count() as u32, a.total_hosts()))
}
#[cfg(feature = "nat")]
pub async fn nat_maintenance_tick(&mut self) -> Result<()> {
let (Some(nat), Some(transport)) = (&mut self.nat_traversal, &self.transport) else {
return Ok(());
};
if nat.refresh().await? {
info!("Reflexive address changed");
}
for peer in &mut self.peers {
if peer.connection_type == ConnectionType::Relayed && !peer.candidates.is_empty() {
if let Ok(Some(upgraded)) = nat
.attempt_upgrade(transport, &peer.public_key, &peer.candidates)
.await
{
peer.connection_type = upgraded;
info!(
peer = %peer.node_id,
connection = %upgraded,
"Upgraded relayed connection"
);
}
}
}
Ok(())
}
#[cfg(feature = "nat")]
#[must_use]
pub fn nat_candidates(&self) -> Vec<Candidate> {
self.nat_traversal
.as_ref()
.map(|n| n.local_candidates().to_vec())
.unwrap_or_default()
}
}
fn current_timestamp() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::Ipv4Addr;
#[test]
fn test_bootstrap_config_allowed_ip_v4() {
let config = BootstrapConfig {
cidr: "10.200.0.0/16".to_string(),
node_ip: IpAddr::V4(Ipv4Addr::new(10, 200, 0, 1)),
interface: DEFAULT_INTERFACE_NAME.to_string(),
port: DEFAULT_WG_PORT,
private_key: "test_private".to_string(),
public_key: "test_public".to_string(),
is_leader: true,
created_at: 0,
};
assert_eq!(config.allowed_ip(), "10.200.0.1/32");
}
#[test]
fn test_bootstrap_config_allowed_ip_v6() {
let config = BootstrapConfig {
cidr: "fd00:200::/48".to_string(),
node_ip: "fd00:200::1".parse::<IpAddr>().unwrap(),
interface: DEFAULT_INTERFACE_NAME.to_string(),
port: DEFAULT_WG_PORT,
private_key: "test_private".to_string(),
public_key: "test_public".to_string(),
is_leader: true,
created_at: 0,
};
assert_eq!(config.allowed_ip(), "fd00:200::1/128");
}
#[test]
fn test_peer_config_new_v4() {
let peer = PeerConfig::new(
"node-1".to_string(),
"pubkey123".to_string(),
"192.168.1.100:51820".to_string(),
IpAddr::V4(Ipv4Addr::new(10, 200, 0, 5)),
);
assert_eq!(peer.node_id, "node-1");
assert_eq!(peer.keepalive, Some(DEFAULT_KEEPALIVE_SECS));
assert_eq!(peer.hostname, None);
}
#[test]
fn test_peer_config_new_v6() {
let peer = PeerConfig::new(
"node-1".to_string(),
"pubkey123".to_string(),
"[::1]:51820".to_string(),
"fd00:200::5".parse::<IpAddr>().unwrap(),
);
assert_eq!(peer.node_id, "node-1");
assert_eq!(peer.keepalive, Some(DEFAULT_KEEPALIVE_SECS));
assert_eq!(peer.hostname, None);
}
#[test]
fn test_peer_config_with_hostname() {
let peer = PeerConfig::new(
"node-1".to_string(),
"pubkey123".to_string(),
"192.168.1.100:51820".to_string(),
IpAddr::V4(Ipv4Addr::new(10, 200, 0, 5)),
)
.with_hostname("web-server");
assert_eq!(peer.hostname, Some("web-server".to_string()));
}
#[test]
fn test_peer_config_to_peer_info_v4() {
let peer = PeerConfig::new(
"node-1".to_string(),
"pubkey123".to_string(),
"192.168.1.100:51820".to_string(),
IpAddr::V4(Ipv4Addr::new(10, 200, 0, 5)),
);
let peer_info = peer.to_peer_info().unwrap();
assert_eq!(peer_info.public_key, "pubkey123");
assert_eq!(peer_info.allowed_ips, "10.200.0.5/32");
}
#[test]
fn test_peer_config_to_peer_info_v6() {
let peer = PeerConfig::new(
"node-1".to_string(),
"pubkey123".to_string(),
"[::1]:51820".to_string(),
"fd00:200::5".parse::<IpAddr>().unwrap(),
);
let peer_info = peer.to_peer_info().unwrap();
assert_eq!(peer_info.public_key, "pubkey123");
assert_eq!(peer_info.allowed_ips, "fd00:200::5/128");
}
#[test]
fn test_bootstrap_state_serialization_v4() {
let config = BootstrapConfig {
cidr: "10.200.0.0/16".to_string(),
node_ip: IpAddr::V4(Ipv4Addr::new(10, 200, 0, 1)),
interface: DEFAULT_INTERFACE_NAME.to_string(),
port: DEFAULT_WG_PORT,
private_key: "private".to_string(),
public_key: "public".to_string(),
is_leader: true,
created_at: 1_234_567_890,
};
let state = BootstrapState {
config,
peers: vec![],
allocator_state: None,
};
let json = serde_json::to_string_pretty(&state).unwrap();
let deserialized: BootstrapState = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.config.cidr, "10.200.0.0/16");
assert_eq!(deserialized.config.node_ip.to_string(), "10.200.0.1");
}
#[test]
fn test_bootstrap_state_serialization_v6() {
let config = BootstrapConfig {
cidr: "fd00:200::/48".to_string(),
node_ip: "fd00:200::1".parse::<IpAddr>().unwrap(),
interface: DEFAULT_INTERFACE_NAME.to_string(),
port: DEFAULT_WG_PORT,
private_key: "private".to_string(),
public_key: "public".to_string(),
is_leader: true,
created_at: 1_234_567_890,
};
let state = BootstrapState {
config,
peers: vec![],
allocator_state: None,
};
let json = serde_json::to_string_pretty(&state).unwrap();
let deserialized: BootstrapState = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.config.cidr, "fd00:200::/48");
assert_eq!(deserialized.config.node_ip.to_string(), "fd00:200::1");
}
#[test]
fn test_default_overlay_cidr_v6_constant() {
let net: ipnet::IpNet = DEFAULT_OVERLAY_CIDR_V6.parse().unwrap();
assert!(matches!(net, ipnet::IpNet::V6(_)));
assert_eq!(net.prefix_len(), 48);
}
}