use colored::Colorize;
use std::collections::BTreeMap;
use std::fs;
use std::io::Write;
use std::process::exit;
use crate::containered::{generate_dockerfile, generate_dockerignore};
use crate::entities::compose_opts::{ComposeOpts, ComposeServiceOpts};
use crate::entities::containered_opts::PortBinding;
use crate::entities::custom_command::CustomCommand;
use crate::entities::environment::RunEnvironment;
use crate::entities::info::ShortName;
use crate::entities::requirements::Requirement;
use crate::entities::variables::Variable;
use crate::pipelines::DescribedPipeline;
use crate::project::DeployerProjectOptions;
pub async fn generate_compose_file(
config: &DeployerProjectOptions,
env: &RunEnvironment<'_>,
pipeline: &DescribedPipeline,
opts: &ComposeOpts,
exclusive_exec_tag: &str,
vars: &BTreeMap<String, Variable>,
) -> anyhow::Result<()> {
generate_dockerfile(config, env, pipeline, &opts.app, exclusive_exec_tag, vars).await?;
let dockerfile_name = format!("Dockerfile.{exclusive_exec_tag}");
let image_name = format!("{}/{}", config.project_name, pipeline.title);
let mut compose = String::new();
compose.push_str("# Generated by Deployer 2.X\n");
compose.push_str("services:\n");
compose.push_str(" app:\n");
compose.push_str(" build:\n");
compose.push_str(" context: .\n");
compose.push_str(&format!(" dockerfile: {dockerfile_name}\n"));
compose.push_str(&format!(" image: {image_name}\n"));
compose.push_str(&format!(" container_name: {exclusive_exec_tag}-app\n"));
if !opts.app.port_bindings.is_empty() {
compose.push_str(" ports:\n");
for PortBinding { from, to } in &opts.app.port_bindings {
compose.push_str(&format!(" - \"{from}:{to}\"\n"));
}
}
if opts.app.allow_internal_host_bind && opts.effective_executor().is_docker() {
compose.push_str(" extra_hosts:\n");
compose.push_str(" - \"host.docker.internal:host-gateway\"\n");
}
compose.push_str(" volumes:\n");
compose.push_str(&format!(
" - {}:/app/artifacts\n",
env.artifacts_dir.join(&pipeline.title).to_string_lossy()
));
if !opts.services.is_empty() {
compose.push_str(" depends_on:\n");
for (service_name, service_opts) in &opts.services {
if service_opts.healthcheck_cmd.is_some() {
compose.push_str(&format!(" {service_name}:\n"));
compose.push_str(" condition: service_healthy\n");
} else {
compose.push_str(&format!(" {service_name}:\n"));
compose.push_str(" condition: service_started\n");
}
}
}
let mut app_env: BTreeMap<String, String> = BTreeMap::new();
app_env.insert("DEPLOYER_CONTAINERED_RUN".to_string(), "1".to_string());
for (placeholder, short_name) in &opts.with {
if let Some((_, variable)) = config
.variables
.iter()
.find(|(k, _): &(&ShortName, _)| k.as_str() == short_name.as_str())
&& let Ok(value) = variable.get_value(env).await
{
app_env.insert(placeholder.clone(), value);
}
}
if !app_env.is_empty() {
compose.push_str(" environment:\n");
for (key, value) in &app_env {
compose.push_str(&format!(" {key}: \"{}\"\n", escape_yaml_value(value)));
}
}
compose.push('\n');
for (service_name, service_opts) in &opts.services {
write_service(&mut compose, service_name, service_opts, env, &opts.with, config).await?;
}
let named_volumes = collect_named_volumes(opts);
if !named_volumes.is_empty() {
compose.push_str("volumes:\n");
for vol_name in &named_volumes {
compose.push_str(&format!(" {vol_name}:\n"));
}
}
let compose_filepath = env.run_dir.join(format!("docker-compose.{exclusive_exec_tag}.yaml"));
let mut compose_file = fs::File::options()
.create(true)
.write(true)
.truncate(true)
.open(&compose_filepath)
.map_err(|e| {
anyhow::anyhow!(
"Can't open `docker-compose.{}.yaml` due to: `{}`; filepath: {:?}",
exclusive_exec_tag,
e,
compose_filepath
)
})?;
compose_file.write_all(compose.as_bytes())?;
Ok(())
}
async fn write_service(
compose: &mut String,
name: &str,
opts: &ComposeServiceOpts,
env: &RunEnvironment<'_>,
with: &BTreeMap<String, ShortName>,
config: &DeployerProjectOptions,
) -> anyhow::Result<()> {
compose.push_str(&format!(" {name}:\n"));
compose.push_str(&format!(" image: {}\n", opts.image));
if let Some(cmd) = &opts.command {
compose.push_str(&format!(" command: {cmd}\n"));
}
if !opts.ports.is_empty() {
compose.push_str(" ports:\n");
for PortBinding { from, to } in &opts.ports {
compose.push_str(&format!(" - \"{from}:{to}\"\n"));
}
}
if !opts.environment.is_empty() || !with.is_empty() {
compose.push_str(" environment:\n");
let resolved_with = resolve_with_vars(with, env, config).await;
for (key, value) in &opts.environment {
let resolved = resolve_placeholders(value, &resolved_with);
compose.push_str(&format!(" {key}: \"{}\"\n", escape_yaml_value(&resolved)));
}
}
if !opts.volumes.is_empty() {
compose.push_str(" volumes:\n");
for volume in &opts.volumes {
compose.push_str(&format!(" - {volume}\n"));
}
}
if let Some(healthcheck_cmd) = &opts.healthcheck_cmd {
compose.push_str(" healthcheck:\n");
compose.push_str(&format!(
" test: [\"CMD-SHELL\", \"{}\"]\n",
escape_yaml_value(healthcheck_cmd)
));
compose.push_str(&format!(" interval: {}s\n", opts.healthcheck_interval_secs));
compose.push_str(&format!(" retries: {}\n", opts.healthcheck_retries));
}
compose.push('\n');
Ok(())
}
async fn resolve_with_vars(
with: &BTreeMap<String, ShortName>,
env: &RunEnvironment<'_>,
config: &DeployerProjectOptions,
) -> BTreeMap<String, String> {
let mut resolved = BTreeMap::new();
for (placeholder, short_name) in with {
if let Some((_, variable)) = config
.variables
.iter()
.find(|(k, _): &(&ShortName, _)| k.as_str() == short_name.as_str())
&& let Ok(value) = variable.get_value(env).await
{
resolved.insert(placeholder.clone(), value);
}
}
resolved
}
fn resolve_placeholders(input: &str, vars: &BTreeMap<String, String>) -> String {
let mut result = input.to_string();
for (placeholder, value) in vars {
result = result.replace(placeholder.as_str(), value.as_str());
}
result
}
fn collect_named_volumes(opts: &ComposeOpts) -> Vec<String> {
let mut volumes = Vec::new();
for service_opts in opts.services.values() {
for volume in &service_opts.volumes {
if let Some(vol_name) = volume.split(':').next()
&& !vol_name.starts_with('.')
&& !vol_name.starts_with('/')
&& volume.contains(':')
&& !volumes.contains(&vol_name.to_string())
{
volumes.push(vol_name.to_string());
}
}
}
volumes
}
fn escape_yaml_value(value: &str) -> String {
value.replace('\\', "\\\\").replace('"', "\\\"")
}
fn choose_compose_executor(opts: &ComposeOpts, sudo: bool) -> String {
let executor = opts.effective_executor();
if executor.is_docker() {
format!("{}docker", if sudo { "" } else { "sudo " })
} else {
"sudo podman".to_string()
}
}
pub async fn execute_pipeline_compose(
config: &DeployerProjectOptions,
env: &RunEnvironment<'_>,
pipeline: &DescribedPipeline,
) -> anyhow::Result<bool> {
println!("Run path: {}", env.run_dir.canonicalize()?.to_string_lossy());
let sudo = is_user_in_group("docker")?;
let opts = pipeline
.compose_opts
.as_ref()
.expect("No compose_opts for this pipeline!");
let exclusive_exec_tag = pipeline.exclusive_exec_tag.clone().unwrap_or(String::from("default")) + "-compose";
let executor = opts.effective_executor();
if executor.is_docker() && Requirement::in_path("docker").satisfy(env).await.is_err() {
println!("{}", "Docker is not installed!".red());
exit(1);
} else if !executor.is_docker() && Requirement::in_path("podman").satisfy(env).await.is_err() {
println!("{}", "Podman is not installed!".red());
exit(1);
}
opts.app.sync_fake_content(env)?;
let app_vars = config.variables_for(&opts.app.with)?;
generate_compose_file(config, env, pipeline, opts, &exclusive_exec_tag, &app_vars).await?;
generate_dockerignore(env, config)?;
println!(
"Started `{}` compose build...",
format!("{}/{}", config.project_name, pipeline.title).green()
);
let compose_file = format!("docker-compose.{exclusive_exec_tag}.yaml");
let compose_project = opts
.project_name
.clone()
.unwrap_or_else(|| format!("{}-{}", config.project_name, pipeline.title));
let exec = choose_compose_executor(opts, sudo);
let build_cmd = format!(
"{exec} compose -p {compose_project} -f {compose_file} build{}",
if env.new_build { " --no-cache" } else { "" },
);
if CustomCommand::run_simple_observer(env, &build_cmd).await.is_err() {
println!("{}", "Compose build failed!".red());
exit(1);
}
println!("{}", "Compose images built successfully.".green());
let volume_path = env.artifacts_dir.join(&pipeline.title);
std::fs::create_dir_all(&volume_path)?;
let up_cmd = format!(
"{exec} compose -p {compose_project} -f {compose_file} up{}{}{}",
if opts.detach { " -d" } else { "" },
if opts.abort_on_container_exit && !opts.detach {
" --abort-on-container-exit"
} else {
""
},
if opts.remove_on_exit && !opts.detach {
" --force-recreate --remove-orphans"
} else {
""
},
);
if CustomCommand::run_simple_observer(env, &up_cmd).await.is_err() {
println!("{}", "Compose run failed!".red());
let _ = CustomCommand::run_simple_observer(
env,
format!("{exec} compose -p {compose_project} -f {compose_file} down --remove-orphans"),
)
.await;
exit(1);
}
if !opts.detach && opts.remove_on_exit {
let _ = CustomCommand::run_simple_observer(
env,
format!("{exec} compose -p {compose_project} -f {compose_file} down --remove-orphans -v"),
)
.await;
}
println!("{}", "Compose run is done.".green());
Ok(true)
}
fn is_user_in_group(group_name: &str) -> anyhow::Result<bool> {
let groups = match nix::unistd::getgroups() {
Ok(groups) => groups,
Err(e) => anyhow::bail!("Failed to get user groups: {}", e),
};
for gid in groups {
if let Ok(Some(group)) = nix::unistd::Group::from_gid(gid)
&& group.name.as_str().eq(group_name)
{
return Ok(true);
}
}
Ok(false)
}