use colored::Colorize;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet};
use std::fs;
use std::io::Write;
use std::path::PathBuf;
use std::process::exit;
use crate::bset;
use crate::entities::containered_opts::{ContaineredOpts, PortBinding};
use crate::entities::containered_opts::{copy_cmd, try_enplace};
use crate::entities::custom_command::CustomCommand;
use crate::entities::driver::PipelineDriver;
use crate::entities::environment::RunEnvironment;
use crate::entities::requirements::Requirement;
use crate::entities::variables::{VarValue, Variable};
use crate::pipelines::DescribedPipeline;
use crate::project::DeployerProjectOptions;
use crate::rw::log;
pub const BASE_IMAGE: &str = "ubuntu:latest";
const DEPL_CONFIG_FILE: &str = ".deploy-config.{pipeline-name}.yaml";
const PREFLIGHT_DEFAULT: &str = "";
pub const DEPL_DRIVER_INSTALL_CMDS: &str = r#"RUN apt-get update && apt-get install -y build-essential curl git && rm -rf /var/lib/apt/lists/*
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y --profile minimal --default-toolchain stable
ENV PATH="/root/.cargo/bin:${PATH}"
RUN cargo install depl"#;
const GENERIC_DOCKERFILE_DEPL_DRIVER: &str = r#"# Generated by Deployer 2.X
FROM {deployer-base-image} AS deployer-builder
WORKDIR /app
{deployer-build-cmds}
FROM {base-image} AS deployer-executor
WORKDIR /app
{preflight-commands}
{user-setup}
COPY --from=deployer-builder /app/deployer/target/release/depl /depl
{cache-strategy}
ENV DEPLOYER_CONTAINERED_RUN=1
CMD ["/depl", "--config", "deploy-config.yaml", "run", "{pipeline-name}", "--current", "--no-clear"]
"#;
const GENERIC_DOCKERFILE_SHELL_DRIVER: &str = r#"# Generated by Deployer 2.X
FROM {base-image} AS deployer-executor
WORKDIR /app
ENV TERM=xterm
{preflight-commands}
{user-setup}
{cache-strategy}
{pre-run}
CMD ["/app/{run-pipeline-file}"]
"#;
fn is_user_in_group(group_name: &str) -> anyhow::Result<bool> {
let groups = match nix::unistd::getgroups() {
Ok(groups) => groups,
Err(e) => anyhow::bail!("Failed to get user groups: {}", e),
};
for gid in groups {
if let Ok(Some(group)) = nix::unistd::Group::from_gid(gid)
&& group.name.as_str().eq(group_name)
{
return Ok(true);
}
}
Ok(false)
}
pub async fn generate_single_pipeline_config(
config: &DeployerProjectOptions,
env: &RunEnvironment<'_>,
pipeline: &DescribedPipeline,
) -> anyhow::Result<String> {
use std::fs::Permissions;
use std::os::unix::fs::PermissionsExt;
let mut pipeline = pipeline.to_owned();
pipeline.default = None;
pipeline.containered_opts = None;
pipeline.compose_opts = None;
pipeline.exclusive_exec_tag = None;
pipeline.desc = None;
pipeline.tags.clear();
let config_filepath = DEPL_CONFIG_FILE.replace("{pipeline-name}", pipeline.title.as_str());
log(format!("Prev config on {config_filepath:?}"));
let actions = pipeline
.actions
.iter()
.map(|used| used.definition(&config.actions).unwrap().clone())
.collect::<BTreeSet<_>>();
let minimal_project_config = DeployerProjectOptions {
pipelines: vec![pipeline],
actions,
project_name: config.project_name.to_owned(),
cache_files: Default::default(),
ignore_files: Default::default(),
..(config.to_owned())
};
let mut variables = minimal_project_config.variables.clone();
for v in variables.values_mut() {
*v = Variable {
is_secret: v.is_secret,
value: VarValue::Plain {
value: v.get_value(env).await?,
},
};
}
let curr_config = serde_pretty_yaml::to_string_pretty(&minimal_project_config)?;
let prev_config = std::fs::read_to_string(env.run_dir.join(config_filepath.as_str())).unwrap_or_default();
if prev_config != curr_config {
log(format!("Saving minimal pipeline config on {config_filepath:?}"));
crate::rw::write(env.run_dir, config_filepath.as_str(), &minimal_project_config);
fs::set_permissions(
env.run_dir.join(config_filepath.as_str()),
Permissions::from_mode(0o600),
)?;
}
Ok(config_filepath)
}
pub async fn generate_shell_driver_scripts(
config: &DeployerProjectOptions,
env: &RunEnvironment<'_>,
pipeline: &DescribedPipeline,
) -> anyhow::Result<(String, String)> {
use std::fs::Permissions;
use std::os::unix::fs::PermissionsExt;
let curr_path = PathBuf::from(".");
let build_env = RunEnvironment {
master_pipeline: &pipeline.title,
run_dir: &curr_path,
cache_dir: &curr_path,
config_dir: &curr_path,
project_dir: Some(&curr_path),
storage_dir: &curr_path,
artifacts_dir: &curr_path.join(crate::ARTIFACTS_DIR),
artifacts_placements: &pipeline.artifacts,
remotes: &std::collections::BTreeMap::new(),
ignore: &config.ignore_files,
log_file: None,
new_build: false,
containered_build: true,
containered_run: false,
ansible_run: false,
daemons: Default::default(),
observe_cli: &None,
skipper: Default::default(),
driver: PipelineDriver::Shell,
restart_requested: None,
};
let run_env = RunEnvironment {
master_pipeline: &pipeline.title,
run_dir: &curr_path,
cache_dir: &curr_path,
config_dir: &curr_path,
project_dir: Some(&curr_path),
storage_dir: &curr_path,
artifacts_dir: &curr_path.join(crate::ARTIFACTS_DIR),
artifacts_placements: &pipeline.artifacts,
remotes: &std::collections::BTreeMap::new(),
ignore: &config.ignore_files,
log_file: None,
new_build: false,
containered_build: false,
containered_run: true,
ansible_run: false,
daemons: Default::default(),
observe_cli: &None,
skipper: Default::default(),
driver: PipelineDriver::Shell,
restart_requested: None,
};
let build_script = pipeline.to_shell_script(config, &build_env).await?;
let run_script = pipeline.to_shell_script(config, &run_env).await?;
let build_script_filename = format!(".pipe.{}.build.sh", env.master_pipeline);
let run_script_filename = format!(".pipe.{}.run.sh", env.master_pipeline);
std::fs::write(env.run_dir.join(build_script_filename.as_str()), build_script)?;
std::fs::write(env.run_dir.join(run_script_filename.as_str()), run_script)?;
fs::set_permissions(
env.run_dir.join(build_script_filename.as_str()),
Permissions::from_mode(0o700),
)?;
fs::set_permissions(
env.run_dir.join(run_script_filename.as_str()),
Permissions::from_mode(0o700),
)?;
Ok((build_script_filename, run_script_filename))
}
pub fn generate_dockerignore(env: &RunEnvironment, config: &DeployerProjectOptions) -> anyhow::Result<()> {
let mut ignore = bset!();
config.cache_files.iter().for_each(|i| {
ignore.insert(i.to_owned());
});
config.ignore_files.iter().for_each(|i| {
ignore.insert(i.to_owned());
});
ignore.insert(PathBuf::from("artifacts"));
let ignore = ignore
.iter()
.map(|el| el.to_str().unwrap().to_string())
.collect::<Vec<_>>();
let ignore = ignore.join("\n");
let mut dockerignore = fs::File::options()
.create(true)
.write(true)
.truncate(true)
.open(env.run_dir.join(".dockerignore"))
.map_err(|e| anyhow::anyhow!(format!("Can't open `.dockerignore` due to: {}", e)))?;
dockerignore.write_all(ignore.as_bytes())?;
Ok(())
}
pub async fn generate_dockerfile(
config: &DeployerProjectOptions,
env: &RunEnvironment<'_>,
pipeline: &DescribedPipeline,
opts: &ContaineredOpts,
exclusive_exec_tag: &str,
vars: &BTreeMap<String, Variable>,
) -> anyhow::Result<()> {
let resulting_image = match env.driver {
PipelineDriver::Deployer => {
let config_filepath = generate_single_pipeline_config(config, env, pipeline).await?;
GENERIC_DOCKERFILE_DEPL_DRIVER
.replace(
"{deployer-base-image}",
opts.build_deployer_base_image.as_deref().unwrap_or(BASE_IMAGE),
)
.replace(
"{deployer-build-cmds}",
&if opts.deployer_build_cmds.is_empty() {
DEPL_DRIVER_INSTALL_CMDS.to_string()
} else {
try_enplace(&opts.deployer_build_cmds, env, vars).await?.join("\n")
},
)
.replace("{base-image}", opts.base_image.as_deref().unwrap_or(BASE_IMAGE))
.replace(
"{preflight-commands}",
&if opts.preflight_cmds.is_empty() {
PREFLIGHT_DEFAULT.to_string()
} else {
try_enplace(&opts.preflight_cmds, env, vars).await?.join("\n")
},
)
.replace(
"{user-setup}",
&if !opts.user.is_empty() {
let user = opts.user.as_str();
format!("RUN useradd -m {user} && echo \"{user} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers && chown -R {user}:{user} /app\nUSER {user}")
} else {
"".to_string()
}
)
.replace(
"{cache-strategy}",
&opts
.concat_strategies(env, &opts.user, config_filepath.as_str(), vars)
.await?
.unwrap_or_else(|| copy_cmd(&opts.user, ".", ".")),
)
.replace("{depl-config-file}", config_filepath.as_str())
.replace("{pipeline-name}", &pipeline.title)
}
PipelineDriver::Shell => {
let (build_config, run_config) = generate_shell_driver_scripts(config, env, pipeline).await?;
GENERIC_DOCKERFILE_SHELL_DRIVER
.replace("{base-image}", opts.base_image.as_deref().unwrap_or(BASE_IMAGE))
.replace(
"{preflight-commands}",
&if opts.preflight_cmds.is_empty() {
PREFLIGHT_DEFAULT.to_string()
} else {
try_enplace(&opts.preflight_cmds, env, vars).await?.join("\n")
},
)
.replace(
"{user-setup}",
&if !opts.user.is_empty() {
let user = opts.user.as_str();
format!("RUN useradd -m {user}\nRUN echo \"{user} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers\nRUN chown -R {user}:{user} /app\nUSER {user}")
} else {
"".to_string()
}
)
.replace(
"{cache-strategy}",
&opts
.concat_strategies(env, &opts.user, &build_config, vars)
.await?
.unwrap_or_else(|| copy_cmd(&opts.user, ".", ".")),
)
.replace("{pre-run}", ©_cmd(&opts.user, &run_config, "."))
.replace("{run-pipeline-file}", &run_config)
}
};
let filepath = env.run_dir.join(format!("Dockerfile.{exclusive_exec_tag}"));
let mut dockerfile = fs::File::options()
.create(true)
.write(true)
.truncate(true)
.open(&filepath)
.map_err(|e| {
anyhow::anyhow!(format!(
"Can't open `Dockerfile.{}` due to: `{}`; filepath: {:?}",
exclusive_exec_tag, e, filepath
))
})?;
dockerfile.write_all(resulting_image.as_bytes())?;
Ok(())
}
#[derive(Deserialize, Serialize, Default, PartialEq)]
struct ImagesReplacement {
depl_from: String,
depl_to: String,
base_from: String,
base_to: String,
}
static PREVENT_METADATA_LOCK: &str = ".deployer-prevent-metadata.lock.json";
fn choose_executor(opts: &ContaineredOpts, sudo: bool) -> String {
if opts.executor.is_docker() {
format!("{}docker", if sudo { "" } else { "sudo " })
} else {
"sudo podman".to_string()
}
}
async fn check_and_pull_once(
env: &RunEnvironment<'_>,
opts: &mut ContaineredOpts,
exclusive_exec_tag: &str,
sudo: bool,
driver: PipelineDriver,
) -> anyhow::Result<()> {
let mut repls = crate::rw::read::<ImagesReplacement>(env.run_dir, PREVENT_METADATA_LOCK);
if repls.base_from.ne(opts.base_image.as_deref().unwrap_or(BASE_IMAGE)) {
if !repls.base_to.is_empty()
&& CustomCommand::run_simple_observer(env, format!("{} rmi {}", choose_executor(opts, sudo), repls.base_to))
.await
.is_err()
{
println!("{}", "Can't remove the old image!".red());
exit(1);
}
if CustomCommand::run_simple_observer(
env,
format!(
"{} pull {}",
choose_executor(opts, sudo),
opts.base_image.as_deref().unwrap_or(BASE_IMAGE)
),
)
.await
.is_err()
{
println!("{}", "Can't pull the image!".red());
exit(1);
}
if CustomCommand::run_simple_observer(
env,
format!(
"{} tag {} {}_executor:latest",
choose_executor(opts, sudo),
opts.base_image.as_deref().unwrap_or(BASE_IMAGE),
exclusive_exec_tag
),
)
.await
.is_err()
{
println!("{}", "Can't tag the image!".red());
exit(1);
}
repls.base_from = opts.base_image.as_deref().unwrap_or(BASE_IMAGE).to_owned();
repls.base_to = format!("{exclusive_exec_tag}_executor:latest");
}
if repls
.depl_from
.ne(opts.build_deployer_base_image.as_deref().unwrap_or(BASE_IMAGE))
&& driver.is_deployer()
{
if !repls.depl_to.is_empty()
&& CustomCommand::run_simple_observer(env, format!("{} rmi {}", choose_executor(opts, sudo), repls.depl_to))
.await
.is_err()
{
println!("{}", "Can't remove the old image!".red());
exit(1);
}
if CustomCommand::run_simple_observer(
env,
format!(
"{} pull {}",
choose_executor(opts, sudo),
opts.build_deployer_base_image.as_deref().unwrap_or(BASE_IMAGE)
),
)
.await
.is_err()
{
println!("{}", "Can't pull the image!".red());
exit(1);
}
if CustomCommand::run_simple_observer(
env,
format!(
"{} tag {} {}_builder:latest",
choose_executor(opts, sudo),
opts.build_deployer_base_image.as_deref().unwrap_or(BASE_IMAGE),
exclusive_exec_tag
),
)
.await
.is_err()
{
println!("{}", "Can't tag the image!".red());
exit(1);
}
repls.depl_from = opts
.build_deployer_base_image
.as_deref()
.unwrap_or(BASE_IMAGE)
.to_owned();
repls.depl_to = format!("{exclusive_exec_tag}_builder:latest");
}
opts.base_image = Some(repls.base_to.to_owned());
opts.build_deployer_base_image = Some(repls.depl_to.to_owned());
crate::rw::write(env.run_dir, PREVENT_METADATA_LOCK, &repls);
Ok(())
}
pub async fn execute_pipeline_containered(
config: &DeployerProjectOptions,
env: &RunEnvironment<'_>,
pipeline: &DescribedPipeline,
) -> anyhow::Result<bool> {
println!("Run path: {}", env.run_dir.canonicalize()?.to_string_lossy());
let sudo = is_user_in_group("docker")?;
let exclusive_exec_tag = pipeline.exclusive_exec_tag.clone().unwrap_or(String::from("default")) + "-containered";
let mut opts = pipeline.containered_opts.clone().unwrap();
if opts.executor.is_docker() && Requirement::in_path("docker").satisfy(env).await.is_err() {
println!("{}", "Docker is not installed!".red());
exit(1);
} else if !opts.executor.is_docker() && Requirement::in_path("podman").satisfy(env).await.is_err() {
println!("{}", "Podman is not installed!".red());
exit(1);
}
opts.sync_fake_content(env)?;
if opts.prevent_metadata_loading {
check_and_pull_once(env, &mut opts, &exclusive_exec_tag, sudo, pipeline.driver).await?;
}
let vars = config.variables_for(&opts.with)?;
generate_dockerfile(config, env, pipeline, &opts, &exclusive_exec_tag, &vars).await?;
generate_dockerignore(env, config)?;
println!(
"Started `{}` image build...",
&format!("{}", format!("{}/{}", config.project_name, pipeline.title).green())
);
let build_cmd = format!(
"{} build {}-t {}/{} -f Dockerfile.{}{} .",
choose_executor(&opts, sudo),
if env.new_build { "--no-cache " } else { "" },
config.project_name,
pipeline.title,
exclusive_exec_tag,
if opts.use_containerd_local_storage_cache && opts.executor.is_docker() {
format!(
" --cache-to type=local,dest=.docker-cache/{exclusive_exec_tag},compression=zstd --cache-from type=local,src=.docker-cache/{exclusive_exec_tag}"
)
} else {
String::from("")
},
);
if CustomCommand::run_simple_observer(env, &build_cmd).await.is_err() {
println!("{}", "Image wasn't build!".red());
exit(1);
}
println!("{}", "Image was built successfully.".green());
let volume_path = env.artifacts_dir.join(&pipeline.title);
std::fs::create_dir_all(&volume_path)?;
if !opts.executor.is_docker() {
std::fs::create_dir_all(
env
.artifacts_dir
.join(pipeline.exclusive_exec_tag.as_deref().unwrap_or("default")),
)?;
}
if CustomCommand::run_simple_observer(
env,
format!(
"{} run{}{}{} --mount type=bind,src={:?},dst=/app/artifacts {}/{}",
choose_executor(&opts, sudo),
if opts.run_detached { " -d" } else { "" },
opts
.port_bindings
.iter()
.map(|PortBinding { from, to }| format!(" -p {from}:{to}"))
.collect::<Vec<_>>()
.join(""),
if opts.allow_internal_host_bind && opts.executor.is_docker() {
" --add-host=host.docker.internal:host-gateway"
} else {
""
},
volume_path,
config.project_name,
pipeline.title
),
)
.await
.is_err()
{
println!("{}", "Deployer didn't executed the pipeline!".red());
exit(1);
}
println!("{}", "Containered run is done.".green());
Ok(true)
}