use crate::error::Result;
use regex::Regex;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fs;
use std::path::{Path, PathBuf};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DockerAnalysis {
pub dockerfiles: Vec<DockerfileInfo>,
pub compose_files: Vec<ComposeFileInfo>,
pub services: Vec<DockerService>,
pub networking: NetworkingConfig,
pub orchestration_pattern: OrchestrationPattern,
pub environments: Vec<DockerEnvironment>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DockerfileInfo {
pub path: PathBuf,
pub environment: Option<String>,
pub base_image: Option<String>,
pub exposed_ports: Vec<u16>,
pub workdir: Option<String>,
pub entrypoint: Option<String>,
pub env_vars: Vec<String>,
pub build_stages: Vec<String>,
pub is_multistage: bool,
pub instruction_count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DiscoveredDockerfile {
pub path: PathBuf,
pub build_context: String,
pub suggested_service_name: String,
pub suggested_port: Option<u16>,
pub base_image: Option<String>,
pub is_multistage: bool,
pub environment: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ComposeFileInfo {
pub path: PathBuf,
pub environment: Option<String>,
pub version: Option<String>,
pub service_names: Vec<String>,
pub networks: Vec<String>,
pub volumes: Vec<String>,
pub external_dependencies: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum OrchestrationPattern {
SingleContainer,
DockerCompose,
Microservices,
EventDriven,
ServiceMesh,
Mixed,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DockerService {
pub name: String,
pub compose_file: PathBuf,
pub image_or_build: ImageOrBuild,
pub ports: Vec<PortMapping>,
pub environment: HashMap<String, String>,
pub depends_on: Vec<String>,
pub networks: Vec<String>,
pub volumes: Vec<VolumeMount>,
pub health_check: Option<HealthCheck>,
pub restart_policy: Option<String>,
pub resource_limits: Option<ResourceLimits>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum ImageOrBuild {
Image(String),
Build {
context: String,
dockerfile: Option<String>,
args: HashMap<String, String>,
},
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct PortMapping {
pub host_port: Option<u16>,
pub container_port: u16,
pub protocol: String,
pub exposed_to_host: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct VolumeMount {
pub source: String,
pub target: String,
pub mount_type: String,
pub read_only: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct HealthCheck {
pub test: String,
pub interval: Option<String>,
pub timeout: Option<String>,
pub retries: Option<u32>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ResourceLimits {
pub cpu_limit: Option<String>,
pub memory_limit: Option<String>,
pub cpu_reservation: Option<String>,
pub memory_reservation: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct NetworkingConfig {
pub custom_networks: Vec<NetworkInfo>,
pub service_discovery: ServiceDiscoveryConfig,
pub load_balancing: Vec<LoadBalancerConfig>,
pub external_connectivity: ExternalConnectivity,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct NetworkInfo {
pub name: String,
pub driver: Option<String>,
pub external: bool,
pub connected_services: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ServiceDiscoveryConfig {
pub internal_dns: bool,
pub external_tools: Vec<String>,
pub service_mesh: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct LoadBalancerConfig {
pub service: String,
pub lb_type: String,
pub backends: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ExternalConnectivity {
pub exposed_services: Vec<ExposedService>,
pub ingress_patterns: Vec<String>,
pub api_gateways: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct ExposedService {
pub service: String,
pub external_ports: Vec<u16>,
pub protocols: Vec<String>,
pub ssl_enabled: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct DockerEnvironment {
pub name: String,
pub dockerfiles: Vec<PathBuf>,
pub compose_files: Vec<PathBuf>,
pub config_overrides: HashMap<String, String>,
}
pub fn analyze_docker_infrastructure(project_root: &Path) -> Result<DockerAnalysis> {
log::info!(
"Starting Docker infrastructure analysis for: {}",
project_root.display()
);
let dockerfiles = find_dockerfiles(project_root)?;
let compose_files = find_compose_files(project_root)?;
log::debug!(
"Found {} Dockerfiles and {} Compose files",
dockerfiles.len(),
compose_files.len()
);
let parsed_dockerfiles: Vec<DockerfileInfo> = dockerfiles
.into_iter()
.filter_map(|path| parse_dockerfile(&path).ok())
.collect();
let parsed_compose_files: Vec<ComposeFileInfo> = compose_files
.into_iter()
.filter_map(|path| parse_compose_file(&path).ok())
.collect();
let services = extract_services_from_compose(&parsed_compose_files)?;
let networking = analyze_networking(&services, &parsed_compose_files)?;
let orchestration_pattern = determine_orchestration_pattern(&services, &networking);
let environments = analyze_environments(&parsed_dockerfiles, &parsed_compose_files);
Ok(DockerAnalysis {
dockerfiles: parsed_dockerfiles,
compose_files: parsed_compose_files,
services,
networking,
orchestration_pattern,
environments,
})
}
fn find_dockerfiles(project_root: &Path) -> Result<Vec<PathBuf>> {
let mut dockerfiles = Vec::new();
fn collect_dockerfiles_recursive(dir: &Path, dockerfiles: &mut Vec<PathBuf>) -> Result<()> {
if dir.file_name().is_some_and(|name| {
name == "node_modules" || name == ".git" || name == "target" || name == ".next"
}) {
return Ok(());
}
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
collect_dockerfiles_recursive(&path, dockerfiles)?;
} else if let Some(filename) = path.file_name().and_then(|n| n.to_str())
&& is_dockerfile_name(filename)
{
dockerfiles.push(path);
}
}
Ok(())
}
collect_dockerfiles_recursive(project_root, &mut dockerfiles)?;
Ok(dockerfiles)
}
fn is_dockerfile_name(filename: &str) -> bool {
let filename_lower = filename.to_lowercase();
if filename_lower == "dockerfile" {
return true;
}
if filename_lower.starts_with("dockerfile.") {
return true;
}
if filename_lower.ends_with(".dockerfile") {
return true;
}
false
}
fn find_compose_files(project_root: &Path) -> Result<Vec<PathBuf>> {
let mut compose_files = Vec::new();
fn collect_compose_files_recursive(dir: &Path, compose_files: &mut Vec<PathBuf>) -> Result<()> {
if dir.file_name().is_some_and(|name| {
name == "node_modules" || name == ".git" || name == "target" || name == ".next"
}) {
return Ok(());
}
for entry in fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
collect_compose_files_recursive(&path, compose_files)?;
} else if let Some(filename) = path.file_name().and_then(|n| n.to_str())
&& is_compose_file_name(filename)
{
compose_files.push(path);
}
}
Ok(())
}
collect_compose_files_recursive(project_root, &mut compose_files)?;
Ok(compose_files)
}
fn is_compose_file_name(filename: &str) -> bool {
let filename_lower = filename.to_lowercase();
let patterns = [
"docker-compose.yml",
"docker-compose.yaml",
"compose.yml",
"compose.yaml",
];
for pattern in &patterns {
if filename_lower == *pattern {
return true;
}
}
if filename_lower.starts_with("docker-compose.")
&& (filename_lower.ends_with(".yml") || filename_lower.ends_with(".yaml"))
{
return true;
}
if filename_lower.starts_with("compose.")
&& (filename_lower.ends_with(".yml") || filename_lower.ends_with(".yaml"))
{
return true;
}
false
}
fn parse_dockerfile(path: &PathBuf) -> Result<DockerfileInfo> {
let content = fs::read_to_string(path)?;
let lines: Vec<&str> = content.lines().collect();
let mut info = DockerfileInfo {
path: path.clone(),
environment: extract_environment_from_filename(path),
base_image: None,
exposed_ports: Vec::new(),
workdir: None,
entrypoint: None,
env_vars: Vec::new(),
build_stages: Vec::new(),
is_multistage: false,
instruction_count: 0,
};
let from_regex = Regex::new(r"(?i)^FROM\s+(.+?)(?:\s+AS\s+(.+))?$").unwrap();
let expose_regex = Regex::new(r"(?i)^EXPOSE\s+(.+)$").unwrap();
let workdir_regex = Regex::new(r"(?i)^WORKDIR\s+(.+)$").unwrap();
let cmd_regex = Regex::new(r"(?i)^CMD\s+(.+)$").unwrap();
let entrypoint_regex = Regex::new(r"(?i)^ENTRYPOINT\s+(.+)$").unwrap();
let env_regex = Regex::new(r"(?i)^ENV\s+(.+)$").unwrap();
for line in lines {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
info.instruction_count += 1;
if let Some(captures) = from_regex.captures(line) {
if info.base_image.is_none() {
info.base_image = Some(captures.get(1).unwrap().as_str().trim().to_string());
}
if let Some(stage_name) = captures.get(2) {
info.build_stages
.push(stage_name.as_str().trim().to_string());
info.is_multistage = true;
}
}
if let Some(captures) = expose_regex.captures(line) {
let ports_str = captures.get(1).unwrap().as_str();
for port in ports_str.split_whitespace() {
if let Ok(port_num) = port.parse::<u16>() {
info.exposed_ports.push(port_num);
}
}
}
if let Some(captures) = workdir_regex.captures(line) {
info.workdir = Some(captures.get(1).unwrap().as_str().trim().to_string());
}
if let Some(captures) = cmd_regex.captures(line)
&& info.entrypoint.is_none()
{
info.entrypoint = Some(captures.get(1).unwrap().as_str().trim().to_string());
}
if let Some(captures) = entrypoint_regex.captures(line) {
info.entrypoint = Some(captures.get(1).unwrap().as_str().trim().to_string());
}
if let Some(captures) = env_regex.captures(line) {
info.env_vars
.push(captures.get(1).unwrap().as_str().trim().to_string());
}
}
Ok(info)
}
fn parse_compose_file(path: &PathBuf) -> Result<ComposeFileInfo> {
let content = fs::read_to_string(path)?;
let yaml_value: serde_yaml::Value = serde_yaml::from_str(&content).map_err(|e| {
crate::error::AnalysisError::DependencyParsing {
file: path.display().to_string(),
reason: format!("YAML parsing error: {}", e),
}
})?;
let mut info = ComposeFileInfo {
path: path.clone(),
environment: extract_environment_from_filename(path),
version: None,
service_names: Vec::new(),
networks: Vec::new(),
volumes: Vec::new(),
external_dependencies: Vec::new(),
};
if let Some(version) = yaml_value.get("version").and_then(|v| v.as_str()) {
info.version = Some(version.to_string());
}
if let Some(services) = yaml_value.get("services").and_then(|s| s.as_mapping()) {
for (service_name, _) in services {
if let Some(name) = service_name.as_str() {
info.service_names.push(name.to_string());
}
}
}
if let Some(networks) = yaml_value.get("networks").and_then(|n| n.as_mapping()) {
for (network_name, network_config) in networks {
if let Some(name) = network_name.as_str() {
info.networks.push(name.to_string());
if let Some(config) = network_config.as_mapping()
&& config
.get("external")
.and_then(|e| e.as_bool())
.unwrap_or(false)
{
info.external_dependencies.push(format!("network:{}", name));
}
}
}
}
if let Some(volumes) = yaml_value.get("volumes").and_then(|v| v.as_mapping()) {
for (volume_name, volume_config) in volumes {
if let Some(name) = volume_name.as_str() {
info.volumes.push(name.to_string());
if let Some(config) = volume_config.as_mapping()
&& config
.get("external")
.and_then(|e| e.as_bool())
.unwrap_or(false)
{
info.external_dependencies.push(format!("volume:{}", name));
}
}
}
}
Ok(info)
}
fn extract_environment_from_filename(path: &Path) -> Option<String> {
if let Some(filename) = path.file_name().and_then(|n| n.to_str()) {
let filename_lower = filename.to_lowercase();
let map_env = |env: &str| -> Option<String> {
match env {
"dev" | "development" | "local" => Some("development".to_string()),
"prod" | "production" => Some("production".to_string()),
"test" | "testing" => Some("test".to_string()),
"stage" | "staging" => Some("staging".to_string()),
_ if env.len() <= 10 && !env.is_empty() => Some(env.to_string()),
_ => None,
}
};
if let Some(last_dot) = filename_lower.rfind('.')
&& let Some(env_dot_pos) = filename_lower[..last_dot].rfind('.')
{
let env = &filename_lower[env_dot_pos + 1..last_dot];
if let Some(result) = map_env(env) {
return Some(result);
}
}
if let Some(dot_pos) = filename_lower.rfind('.') {
let ext = &filename_lower[dot_pos + 1..];
let base = &filename_lower[..dot_pos];
if (base.contains("dockerfile") || base.contains("docker-compose") || base == "compose")
&& let Some(result) = map_env(ext)
{
return Some(result);
}
}
}
None
}
fn extract_services_from_compose(compose_files: &[ComposeFileInfo]) -> Result<Vec<DockerService>> {
let mut services = Vec::new();
for compose_file in compose_files {
let content = fs::read_to_string(&compose_file.path)?;
let yaml_value: serde_yaml::Value = serde_yaml::from_str(&content).map_err(|e| {
crate::error::AnalysisError::DependencyParsing {
file: compose_file.path.display().to_string(),
reason: format!("YAML parsing error: {}", e),
}
})?;
if let Some(services_yaml) = yaml_value.get("services").and_then(|s| s.as_mapping()) {
for (service_name, service_config) in services_yaml {
if let (Some(name), Some(config)) =
(service_name.as_str(), service_config.as_mapping())
{
let service = parse_docker_service(name, config, &compose_file.path)?;
services.push(service);
}
}
}
}
Ok(services)
}
fn parse_docker_service(
name: &str,
config: &serde_yaml::Mapping,
compose_file: &Path,
) -> Result<DockerService> {
let mut service = DockerService {
name: name.to_string(),
compose_file: compose_file.to_path_buf(),
image_or_build: ImageOrBuild::Image("unknown".to_string()),
ports: Vec::new(),
environment: HashMap::new(),
depends_on: Vec::new(),
networks: Vec::new(),
volumes: Vec::new(),
health_check: None,
restart_policy: None,
resource_limits: None,
};
if let Some(image) = config.get("image").and_then(|i| i.as_str()) {
service.image_or_build = ImageOrBuild::Image(image.to_string());
} else if let Some(build_config) = config.get("build") {
if let Some(context) = build_config.as_str() {
service.image_or_build = ImageOrBuild::Build {
context: context.to_string(),
dockerfile: None,
args: HashMap::new(),
};
} else if let Some(build_mapping) = build_config.as_mapping() {
let context = build_mapping
.get("context")
.and_then(|c| c.as_str())
.unwrap_or(".")
.to_string();
let dockerfile = build_mapping
.get("dockerfile")
.and_then(|d| d.as_str())
.map(|s| s.to_string());
let mut args = HashMap::new();
if let Some(args_config) = build_mapping.get("args").and_then(|a| a.as_mapping()) {
for (key, value) in args_config {
if let (Some(k), Some(v)) = (key.as_str(), value.as_str()) {
args.insert(k.to_string(), v.to_string());
}
}
}
service.image_or_build = ImageOrBuild::Build {
context,
dockerfile,
args,
};
}
}
if let Some(ports_config) = config.get("ports").and_then(|p| p.as_sequence()) {
for port_item in ports_config {
if let Some(port_mapping) = parse_port_mapping(port_item) {
service.ports.push(port_mapping);
}
}
}
if let Some(env_config) = config.get("environment") {
parse_environment_variables(env_config, &mut service.environment);
}
if let Some(depends_config) = config.get("depends_on") {
if let Some(depends_sequence) = depends_config.as_sequence() {
for dep in depends_sequence {
if let Some(dep_name) = dep.as_str() {
service.depends_on.push(dep_name.to_string());
}
}
} else if let Some(depends_mapping) = depends_config.as_mapping() {
for (dep_name, _) in depends_mapping {
if let Some(name) = dep_name.as_str() {
service.depends_on.push(name.to_string());
}
}
}
}
if let Some(networks_config) = config.get("networks") {
if let Some(networks_sequence) = networks_config.as_sequence() {
for network in networks_sequence {
if let Some(network_name) = network.as_str() {
service.networks.push(network_name.to_string());
}
}
} else if let Some(networks_mapping) = networks_config.as_mapping() {
for (network_name, _) in networks_mapping {
if let Some(name) = network_name.as_str() {
service.networks.push(name.to_string());
}
}
}
}
if let Some(volumes_config) = config.get("volumes").and_then(|v| v.as_sequence()) {
for volume_item in volumes_config {
if let Some(volume_mount) = parse_volume_mount(volume_item) {
service.volumes.push(volume_mount);
}
}
}
if let Some(restart) = config.get("restart").and_then(|r| r.as_str()) {
service.restart_policy = Some(restart.to_string());
}
if let Some(healthcheck_config) = config.get("healthcheck").and_then(|h| h.as_mapping())
&& let Some(test) = healthcheck_config.get("test").and_then(|t| t.as_str())
{
service.health_check = Some(HealthCheck {
test: test.to_string(),
interval: healthcheck_config
.get("interval")
.and_then(|i| i.as_str())
.map(|s| s.to_string()),
timeout: healthcheck_config
.get("timeout")
.and_then(|t| t.as_str())
.map(|s| s.to_string()),
retries: healthcheck_config
.get("retries")
.and_then(|r| r.as_u64())
.map(|r| r as u32),
});
}
Ok(service)
}
fn parse_port_mapping(port_value: &serde_yaml::Value) -> Option<PortMapping> {
if let Some(port_str) = port_value.as_str() {
if let Some(colon_pos) = port_str.find(':') {
let host_part = &port_str[..colon_pos];
let container_part = &port_str[colon_pos + 1..];
if let (Ok(host_port), Ok(container_port)) =
(host_part.parse::<u16>(), container_part.parse::<u16>())
{
return Some(PortMapping {
host_port: Some(host_port),
container_port,
protocol: "tcp".to_string(),
exposed_to_host: true,
});
}
} else if let Ok(container_port) = port_str.parse::<u16>() {
return Some(PortMapping {
host_port: None,
container_port,
protocol: "tcp".to_string(),
exposed_to_host: false,
});
}
} else if let Some(port_num) = port_value.as_u64() {
return Some(PortMapping {
host_port: None,
container_port: port_num as u16,
protocol: "tcp".to_string(),
exposed_to_host: false,
});
}
None
}
fn parse_volume_mount(volume_value: &serde_yaml::Value) -> Option<VolumeMount> {
if let Some(volume_str) = volume_value.as_str() {
let parts: Vec<&str> = volume_str.split(':').collect();
if parts.len() >= 2 {
return Some(VolumeMount {
source: parts[0].to_string(),
target: parts[1].to_string(),
mount_type: if parts[0].starts_with('/') || parts[0].starts_with('.') {
"bind".to_string()
} else {
"volume".to_string()
},
read_only: parts.get(2).is_some_and(|&opt| opt == "ro"),
});
}
}
None
}
fn parse_environment_variables(
env_value: &serde_yaml::Value,
env_map: &mut HashMap<String, String>,
) {
if let Some(env_mapping) = env_value.as_mapping() {
for (key, value) in env_mapping {
if let Some(key_str) = key.as_str() {
let value_str = value.as_str().unwrap_or("").to_string();
env_map.insert(key_str.to_string(), value_str);
}
}
} else if let Some(env_sequence) = env_value.as_sequence() {
for env_item in env_sequence {
if let Some(env_str) = env_item.as_str() {
if let Some(eq_pos) = env_str.find('=') {
let key = env_str[..eq_pos].to_string();
let value = env_str[eq_pos + 1..].to_string();
env_map.insert(key, value);
} else {
env_map.insert(env_str.to_string(), String::new());
}
}
}
}
}
fn analyze_networking(
services: &[DockerService],
compose_files: &[ComposeFileInfo],
) -> Result<NetworkingConfig> {
let mut custom_networks = Vec::new();
let mut connected_services: HashMap<String, Vec<String>> = HashMap::new();
for compose_file in compose_files {
for network_name in &compose_file.networks {
let network_info = NetworkInfo {
name: network_name.clone(),
driver: None, external: compose_file
.external_dependencies
.contains(&format!("network:{}", network_name)),
connected_services: Vec::new(),
};
custom_networks.push(network_info);
}
}
for service in services {
for network in &service.networks {
connected_services
.entry(network.clone())
.or_default()
.push(service.name.clone());
}
}
for network in &mut custom_networks {
if let Some(services) = connected_services.get(&network.name) {
network.connected_services = services.clone();
}
}
let service_discovery = ServiceDiscoveryConfig {
internal_dns: !services.is_empty(), external_tools: detect_service_discovery_tools(services),
service_mesh: detect_service_mesh(services),
};
let load_balancing = detect_load_balancers(services);
let external_connectivity = analyze_external_connectivity(services);
Ok(NetworkingConfig {
custom_networks,
service_discovery,
load_balancing,
external_connectivity,
})
}
fn determine_orchestration_pattern(
services: &[DockerService],
networking: &NetworkingConfig,
) -> OrchestrationPattern {
if services.is_empty() {
return OrchestrationPattern::SingleContainer;
}
if services.len() == 1 {
return OrchestrationPattern::SingleContainer;
}
let has_multiple_backends = services
.iter()
.filter(|s| match &s.image_or_build {
ImageOrBuild::Image(img) => {
!img.contains("nginx") && !img.contains("proxy") && !img.contains("traefik")
}
_ => true,
})
.count()
> 2;
let has_service_discovery = networking.service_discovery.internal_dns
|| !networking.service_discovery.external_tools.is_empty();
let _has_load_balancing = !networking.load_balancing.is_empty();
let has_message_queues = services.iter().any(|s| match &s.image_or_build {
ImageOrBuild::Image(img) => {
img.contains("redis")
|| img.contains("rabbitmq")
|| img.contains("kafka")
|| img.contains("nats")
}
_ => false,
});
if networking.service_discovery.service_mesh {
OrchestrationPattern::ServiceMesh
} else if has_message_queues && has_multiple_backends {
OrchestrationPattern::EventDriven
} else if has_multiple_backends && has_service_discovery {
OrchestrationPattern::Microservices
} else {
OrchestrationPattern::DockerCompose
}
}
fn detect_service_discovery_tools(services: &[DockerService]) -> Vec<String> {
let mut tools = Vec::new();
for service in services {
if let ImageOrBuild::Image(image) = &service.image_or_build {
if image.contains("consul") {
tools.push("consul".to_string());
}
if image.contains("etcd") {
tools.push("etcd".to_string());
}
if image.contains("zookeeper") {
tools.push("zookeeper".to_string());
}
}
}
tools.sort();
tools.dedup();
tools
}
fn detect_service_mesh(services: &[DockerService]) -> bool {
services.iter().any(|s| {
if let ImageOrBuild::Image(image) = &s.image_or_build {
image.contains("istio")
|| image.contains("linkerd")
|| image.contains("envoy")
|| image.contains("consul-connect")
} else {
false
}
})
}
fn detect_load_balancers(services: &[DockerService]) -> Vec<LoadBalancerConfig> {
let mut load_balancers = Vec::new();
for service in services {
let is_load_balancer = match &service.image_or_build {
ImageOrBuild::Image(image) => {
image.contains("nginx")
|| image.contains("traefik")
|| image.contains("haproxy")
|| image.contains("envoy")
|| image.contains("kong")
}
_ => false,
};
if is_load_balancer {
let backends: Vec<String> = services
.iter()
.filter(|s| s.name != service.name && !service.depends_on.contains(&s.name))
.map(|s| s.name.clone())
.collect();
if !backends.is_empty() {
let lb_type = match &service.image_or_build {
ImageOrBuild::Image(image) => {
if image.contains("nginx") {
"nginx"
} else if image.contains("traefik") {
"traefik"
} else if image.contains("haproxy") {
"haproxy"
} else if image.contains("envoy") {
"envoy"
} else if image.contains("kong") {
"kong"
} else {
"unknown"
}
}
_ => "unknown",
};
load_balancers.push(LoadBalancerConfig {
service: service.name.clone(),
lb_type: lb_type.to_string(),
backends,
});
}
}
}
load_balancers
}
fn analyze_external_connectivity(services: &[DockerService]) -> ExternalConnectivity {
let mut exposed_services = Vec::new();
let mut ingress_patterns = Vec::new();
let mut api_gateways = Vec::new();
for service in services {
let mut external_ports = Vec::new();
let mut protocols = Vec::new();
for port in &service.ports {
if port.exposed_to_host {
if let Some(host_port) = port.host_port {
external_ports.push(host_port);
}
protocols.push(port.protocol.clone());
}
}
if !external_ports.is_empty() {
let ssl_enabled = external_ports.contains(&443)
|| external_ports.contains(&8443)
|| service
.environment
.keys()
.any(|k| k.to_lowercase().contains("ssl") || k.to_lowercase().contains("tls"));
exposed_services.push(ExposedService {
service: service.name.clone(),
external_ports,
protocols: protocols
.into_iter()
.collect::<std::collections::HashSet<_>>()
.into_iter()
.collect(),
ssl_enabled,
});
}
if service.name.to_lowercase().contains("gateway")
|| service.name.to_lowercase().contains("api")
|| service.name.to_lowercase().contains("proxy")
{
api_gateways.push(service.name.clone());
}
if let ImageOrBuild::Image(image) = &service.image_or_build
&& (image.contains("kong")
|| image.contains("zuul")
|| image.contains("ambassador")
|| image.contains("traefik"))
&& !api_gateways.contains(&service.name)
{
api_gateways.push(service.name.clone());
}
}
if exposed_services.len() == 1 && api_gateways.len() == 1 {
ingress_patterns.push("Single API Gateway".to_string());
} else if exposed_services.len() > 1 && api_gateways.is_empty() {
ingress_patterns.push("Multiple Direct Entry Points".to_string());
} else if !api_gateways.is_empty() {
ingress_patterns.push("API Gateway Pattern".to_string());
}
let has_reverse_proxy = services.iter().any(|s| {
if let ImageOrBuild::Image(image) = &s.image_or_build {
image.contains("nginx") || image.contains("apache") || image.contains("caddy")
} else {
false
}
});
if has_reverse_proxy {
ingress_patterns.push("Reverse Proxy".to_string());
}
ExternalConnectivity {
exposed_services,
ingress_patterns,
api_gateways,
}
}
fn analyze_environments(
dockerfiles: &[DockerfileInfo],
compose_files: &[ComposeFileInfo],
) -> Vec<DockerEnvironment> {
let mut environments: HashMap<String, DockerEnvironment> = HashMap::new();
for dockerfile in dockerfiles {
let env_name = dockerfile
.environment
.clone()
.unwrap_or_else(|| "default".to_string());
environments
.entry(env_name.clone())
.or_insert_with(|| DockerEnvironment {
name: env_name,
dockerfiles: Vec::new(),
compose_files: Vec::new(),
config_overrides: HashMap::new(),
})
.dockerfiles
.push(dockerfile.path.clone());
}
for compose_file in compose_files {
let env_name = compose_file
.environment
.clone()
.unwrap_or_else(|| "default".to_string());
environments
.entry(env_name.clone())
.or_insert_with(|| DockerEnvironment {
name: env_name,
dockerfiles: Vec::new(),
compose_files: Vec::new(),
config_overrides: HashMap::new(),
})
.compose_files
.push(compose_file.path.clone());
}
environments.into_values().collect()
}
fn suggest_service_name(dockerfile_path: &Path, project_root: &Path) -> String {
let dockerfile_dir = dockerfile_path.parent().unwrap_or(dockerfile_path);
let name = if dockerfile_dir == project_root {
project_root
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("app")
} else {
dockerfile_dir
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("app")
};
sanitize_service_name(name)
}
fn sanitize_service_name(name: &str) -> String {
let sanitized: String = name
.to_lowercase()
.chars()
.map(|c| if c.is_ascii_alphanumeric() { c } else { '-' })
.collect();
let mut result = String::new();
let mut prev_hyphen = true;
for c in sanitized.chars() {
if c == '-' {
if !prev_hyphen {
result.push(c);
prev_hyphen = true;
}
} else {
result.push(c);
prev_hyphen = false;
}
}
if result.ends_with('-') {
result.pop();
}
if result.is_empty() {
"app".to_string()
} else {
result
}
}
fn compute_build_context(dockerfile_path: &Path, project_root: &Path) -> String {
let dockerfile_dir = dockerfile_path.parent().unwrap_or(dockerfile_path);
if let Ok(relative) = dockerfile_dir.strip_prefix(project_root) {
let path_str = relative.to_string_lossy().to_string();
if path_str.is_empty() {
".".to_string()
} else {
path_str
}
} else {
".".to_string()
}
}
fn infer_default_port(base_image: &Option<String>) -> Option<u16> {
let image = base_image.as_ref()?;
let image_lower = image.to_lowercase();
let image_name = image_lower
.split('/')
.next_back()
.unwrap_or(&image_lower)
.split(':')
.next()
.unwrap_or(&image_lower);
match image_name {
s if s.starts_with("node") => Some(3000),
s if s.contains("python") => Some(8000),
s if s.contains("flask") => Some(5000),
s if s.contains("django") => Some(8000),
s if s.contains("fastapi") => Some(8000),
s if s.starts_with("golang") || s.starts_with("go") => Some(8080),
s if s.starts_with("rust") => Some(8080),
s if s.starts_with("nginx") => Some(80),
s if s.starts_with("httpd") || s.starts_with("apache") => Some(80),
s if s.starts_with("caddy") => Some(80),
s if s.contains("openjdk") || s.contains("java") => Some(8080),
s if s.contains("tomcat") => Some(8080),
s if s.contains("spring") => Some(8080),
s if s.starts_with("ruby") => Some(3000),
s if s.contains("rails") => Some(3000),
s if s.starts_with("php") => Some(80),
s if s.contains("dotnet") || s.contains("aspnet") => Some(80),
s if s.contains("elixir") || s.contains("phoenix") => Some(4000),
_ => None,
}
}
pub fn discover_dockerfiles_for_deployment(
project_root: &Path,
) -> Result<Vec<DiscoveredDockerfile>> {
let dockerfiles = find_dockerfiles(project_root)?;
let discovered: Vec<DiscoveredDockerfile> = dockerfiles
.into_iter()
.filter_map(|path| {
let info = parse_dockerfile(&path).ok()?;
let build_context = compute_build_context(&path, project_root);
let suggested_name = suggest_service_name(&path, project_root);
let suggested_port = info
.exposed_ports
.first()
.copied()
.or_else(|| infer_default_port(&info.base_image));
Some(DiscoveredDockerfile {
path,
build_context,
suggested_service_name: suggested_name,
suggested_port,
base_image: info.base_image,
is_multistage: info.is_multistage,
environment: info.environment,
})
})
.collect();
Ok(discovered)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_dockerfile_name() {
assert!(is_dockerfile_name("Dockerfile"));
assert!(is_dockerfile_name("dockerfile"));
assert!(is_dockerfile_name("Dockerfile.dev"));
assert!(is_dockerfile_name("dockerfile.prod"));
assert!(is_dockerfile_name("api.dockerfile"));
assert!(!is_dockerfile_name("README.md"));
assert!(!is_dockerfile_name("package.json"));
}
#[test]
fn test_is_compose_file_name() {
assert!(is_compose_file_name("docker-compose.yml"));
assert!(is_compose_file_name("docker-compose.yaml"));
assert!(is_compose_file_name("docker-compose.dev.yml"));
assert!(is_compose_file_name("docker-compose.prod.yaml"));
assert!(is_compose_file_name("compose.yml"));
assert!(is_compose_file_name("compose.yaml"));
assert!(!is_compose_file_name("README.md"));
assert!(!is_compose_file_name("package.json"));
}
#[test]
fn test_extract_environment_from_filename() {
assert_eq!(
extract_environment_from_filename(&PathBuf::from("Dockerfile.dev")),
Some("development".to_string())
);
assert_eq!(
extract_environment_from_filename(&PathBuf::from("docker-compose.prod.yml")),
Some("production".to_string())
);
assert_eq!(
extract_environment_from_filename(&PathBuf::from("Dockerfile")),
None
);
}
#[test]
fn test_suggest_service_name_from_subdirectory() {
let path = PathBuf::from("/project/services/api/Dockerfile");
let root = PathBuf::from("/project");
assert_eq!(suggest_service_name(&path, &root), "api");
}
#[test]
fn test_suggest_service_name_from_root() {
let path = PathBuf::from("/project/Dockerfile");
let root = PathBuf::from("/project");
assert_eq!(suggest_service_name(&path, &root), "project");
}
#[test]
fn test_suggest_service_name_nested() {
let path = PathBuf::from("/myapp/apps/web-frontend/Dockerfile");
let root = PathBuf::from("/myapp");
assert_eq!(suggest_service_name(&path, &root), "web-frontend");
}
#[test]
fn test_suggest_service_name_sanitizes() {
let path = PathBuf::from("/project/my_service_api/Dockerfile");
let root = PathBuf::from("/project");
assert_eq!(suggest_service_name(&path, &root), "my-service-api");
}
#[test]
fn test_sanitize_service_name() {
assert_eq!(sanitize_service_name("My_Service"), "my-service");
assert_eq!(sanitize_service_name("api-v2"), "api-v2");
assert_eq!(sanitize_service_name("__leading__"), "leading");
assert_eq!(sanitize_service_name("trailing--"), "trailing");
assert_eq!(sanitize_service_name("multi---hyphens"), "multi-hyphens");
assert_eq!(sanitize_service_name("special@#chars!"), "special-chars");
assert_eq!(sanitize_service_name(""), "app"); }
#[test]
fn test_compute_build_context_subdirectory() {
let path = PathBuf::from("/project/services/api/Dockerfile");
let root = PathBuf::from("/project");
assert_eq!(compute_build_context(&path, &root), "services/api");
}
#[test]
fn test_compute_build_context_root() {
let path = PathBuf::from("/project/Dockerfile");
let root = PathBuf::from("/project");
assert_eq!(compute_build_context(&path, &root), ".");
}
#[test]
fn test_compute_build_context_deep_nested() {
let path = PathBuf::from("/myapp/packages/frontend/apps/web/Dockerfile");
let root = PathBuf::from("/myapp");
assert_eq!(
compute_build_context(&path, &root),
"packages/frontend/apps/web"
);
}
#[test]
fn test_infer_default_port_node() {
assert_eq!(infer_default_port(&Some("node:18".to_string())), Some(3000));
assert_eq!(
infer_default_port(&Some("node:18-alpine".to_string())),
Some(3000)
);
}
#[test]
fn test_infer_default_port_nginx() {
assert_eq!(
infer_default_port(&Some("nginx:latest".to_string())),
Some(80)
);
assert_eq!(
infer_default_port(&Some("nginx:1.25-alpine".to_string())),
Some(80)
);
}
#[test]
fn test_infer_default_port_python() {
assert_eq!(
infer_default_port(&Some("python:3.11".to_string())),
Some(8000)
);
}
#[test]
fn test_infer_default_port_go() {
assert_eq!(
infer_default_port(&Some("golang:1.21".to_string())),
Some(8080)
);
}
#[test]
fn test_infer_default_port_java() {
assert_eq!(
infer_default_port(&Some("openjdk:17".to_string())),
Some(8080)
);
}
#[test]
fn test_infer_default_port_ruby() {
assert_eq!(
infer_default_port(&Some("ruby:3.2".to_string())),
Some(3000)
);
}
#[test]
fn test_infer_default_port_with_registry() {
assert_eq!(
infer_default_port(&Some("gcr.io/my-project/node:18".to_string())),
Some(3000)
);
assert_eq!(
infer_default_port(&Some("docker.io/library/nginx:latest".to_string())),
Some(80)
);
}
#[test]
fn test_infer_default_port_unknown() {
assert_eq!(
infer_default_port(&Some("custom-base:latest".to_string())),
None
);
assert_eq!(infer_default_port(&None), None);
}
}