use anyhow::bail;
use colored::Colorize;
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use std::sync::{Arc, atomic::AtomicBool};
use crate::ansible::execute_pipeline_with_ansible;
use crate::cmd::RunArgs;
use crate::compose::execute_pipeline_compose;
use crate::containered::execute_pipeline_containered;
use crate::entities::daemons::Daemons;
use crate::entities::driver::PipelineDriver;
use crate::entities::environment::RunEnvironment;
use crate::entities::info::ShortName;
use crate::entities::observe_executor::ObserveClient;
use crate::entities::remote_host::RemoteHost;
use crate::entities::runs::Runs;
use crate::entities::skipper::Skipper;
use crate::globals::DeployerGlobalConfig;
use crate::pipelines::DescribedPipeline;
use crate::project::DeployerProjectOptions;
use crate::remote::{sync_artifacts_from_remote, sync_to_remote};
use crate::rw::{copy_all, log, symlink, write};
use crate::{ARTIFACTS_DIR, BUILD_CACHE_LIST};
pub(crate) fn prepare_artifacts_folder(current_dir: &std::path::Path) -> anyhow::Result<PathBuf> {
let artifacts_dir = current_dir.join(ARTIFACTS_DIR);
std::fs::create_dir_all(artifacts_dir.as_path())
.unwrap_or_else(|_| panic!("Can't create `{artifacts_dir:?}` folder!"));
Ok(artifacts_dir)
}
pub(crate) fn prepare_run_folder(
config: &DeployerProjectOptions,
runs: &mut Runs,
exclusive_exec_tag: &Option<String>,
cache_dir: &Path,
args: &RunArgs,
) -> anyhow::Result<(PathBuf, bool)> {
let run_path = if let Some(run_at) = args.run_at.as_ref() {
run_at.to_owned()
} else {
runs.fetch_or_create(&config.project_name, exclusive_exec_tag, cache_dir, args.fresh)
};
let fresh = !run_path.exists() || args.fresh;
std::fs::create_dir_all(run_path.as_path()).unwrap_or_else(|_| panic!("Can't create `{run_path:?}` folder!"));
write(cache_dir, BUILD_CACHE_LIST, &runs);
Ok((run_path, fresh))
}
pub fn sync_run_folder(
config: &DeployerProjectOptions,
pipeline: &DescribedPipeline,
run_dir: &Path,
project_dir: &Path,
link_cache: bool,
copy_cache: bool,
) -> anyhow::Result<()> {
if pipeline.copy_only.is_empty() {
let mut ignore = vec![
PathBuf::from(ARTIFACTS_DIR),
PathBuf::from(run_dir.file_name().unwrap()),
];
ignore.extend(config.ignore_files.iter().cloned());
ignore.extend(config.cache_files.iter().cloned());
copy_all(project_dir, project_dir, run_dir, &ignore)?;
if link_cache {
for cache_item in &config.cache_files {
symlink(project_dir.join(cache_item), run_dir.join(cache_item));
log(format!("-> {cache_item:?}"));
}
}
if copy_cache {
for cache_item in &config.cache_files {
let cache_item_path = project_dir.join(cache_item);
copy_all(
&cache_item_path,
&cache_item_path,
run_dir.join(cache_item),
&[] as &[&str],
)?;
log(format!("-> {cache_item:?}"));
}
}
} else {
for each_file in pipeline.copy_only.iter() {
copy_all(
project_dir,
project_dir.join(each_file.as_str()),
run_dir.join(each_file.as_str()),
&[] as &[&str],
)?;
}
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub async fn run(
config: &DeployerProjectOptions,
globals: &DeployerGlobalConfig,
runs: &mut Runs,
cache_dir: &Path,
config_dir: &Path,
storage_dir: &Path,
args: &RunArgs,
observe_cli: Option<ObserveClient>,
skipper: Skipper,
restart_requested: Option<Arc<AtomicBool>>,
) -> anyhow::Result<bool> {
let remote_folder = match std::env::var("DEPLOYER_REMOTE_AS_WORKER") {
Ok(path) => Some(PathBuf::from(path)),
Err(_) => None,
};
if *config == Default::default() && remote_folder.is_none() {
panic!("Config is invalid! Reinit the project.");
}
check_args_on_conflicts(args)?;
if let Some(run_dir) = &remote_folder
&& !args.pipeline_tags.is_empty()
{
return run_as_worker(
config,
run_dir,
cache_dir,
config_dir,
storage_dir,
&globals.remote_hosts,
args,
skipper,
)
.await;
}
let project_dir = std::env::current_dir().expect("Can't get current dir!");
let artifacts_dir = prepare_artifacts_folder(&project_dir)?;
let mut success = true;
if !args.remotes.is_empty() {
return run_as_controller(
config,
runs,
&artifacts_dir,
&project_dir,
cache_dir,
&globals.remote_hosts,
args,
);
}
if args.pipeline_tags.is_empty() {
if config.pipelines.is_empty() {
panic!("The pipelines' list is empty! Check the config file for errors.");
}
let cntr = config.pipelines.iter().filter(|p| p.default.is_some_and(|v| v)).count();
if cntr == 0 {
panic!("There is no default Pipelines! Please, specify at least one to execute.");
}
for pipeline in config.pipelines.iter().filter(|p| p.default.is_some_and(|v| v)) {
let (run_path, new_build) = if args.current {
(project_dir.clone(), false)
} else {
prepare_run_folder(config, runs, &pipeline.exclusive_exec_tag, cache_dir, args)?
};
sync_run_folder(
config,
pipeline,
&run_path,
&project_dir,
args.link_cache,
args.copy_cache,
)?;
if pipeline.driver.is_shell() && observe_cli.is_some() {
panic!("`depl watch` can't be run with shell driver!");
}
let mut env = RunEnvironment {
master_pipeline: &pipeline.title,
run_dir: &run_path,
cache_dir,
config_dir,
storage_dir,
project_dir: Some(&project_dir),
artifacts_dir: &artifacts_dir,
artifacts_placements: &pipeline.artifacts,
new_build,
remotes: &globals.remote_hosts,
ignore: &config.ignore_files,
log_file: None,
containered_build: matches!(std::env::var("DEPLOYER_CONTAINERED_BUILD"), Ok(val) if val.as_str().eq("1")),
containered_run: matches!(std::env::var("DEPLOYER_CONTAINERED_RUN"), Ok(val) if val.as_str().eq("1")),
ansible_run: matches!(std::env::var("DEPLOYER_ANSIBLE_RUN"), Ok(val) if val.as_str().eq("1")),
daemons: Default::default(),
observe_cli: &observe_cli,
skipper: skipper.clone(),
driver: pipeline.driver,
restart_requested: restart_requested.clone(),
};
let status = execute_pipeline(
config,
&mut env,
pipeline,
args.containerized,
args.compose,
args.ansible,
&args.driver,
)
.await?;
success &= status;
if !status {
break;
}
}
} else {
for pipeline_tag in &args.pipeline_tags {
if let Some(pipeline) = config.pipelines.iter().find(|p| p.title.as_str().eq(pipeline_tag)) {
let (run_path, new_build) = if args.current {
(project_dir.clone(), false)
} else {
prepare_run_folder(config, runs, &pipeline.exclusive_exec_tag, cache_dir, args)?
};
sync_run_folder(
config,
pipeline,
&run_path,
&project_dir,
args.link_cache,
args.copy_cache,
)?;
if pipeline.driver.is_shell() && observe_cli.is_some() && !matches!(args.driver.as_deref(), Some("deployer")) {
panic!("`depl watch` can't be run with shell driver!");
}
let mut env = RunEnvironment {
master_pipeline: &pipeline.title,
run_dir: &run_path,
cache_dir,
config_dir,
storage_dir,
project_dir: Some(&project_dir),
artifacts_dir: &artifacts_dir,
artifacts_placements: &pipeline.artifacts,
new_build,
remotes: &globals.remote_hosts,
ignore: &config.ignore_files,
log_file: None,
containered_build: matches!(std::env::var("DEPLOYER_CONTAINERED_BUILD"), Ok(val) if val.as_str().eq("1")),
containered_run: matches!(std::env::var("DEPLOYER_CONTAINERED_RUN"), Ok(val) if val.as_str().eq("1")),
ansible_run: matches!(std::env::var("DEPLOYER_ANSIBLE_RUN"), Ok(val) if val.as_str().eq("1")),
daemons: Daemons::default(),
observe_cli: &observe_cli,
skipper: skipper.clone(),
driver: pipeline.driver,
restart_requested: restart_requested.clone(),
};
let status = execute_pipeline(
config,
&mut env,
pipeline,
args.containerized,
args.compose,
args.ansible,
&args.driver,
)
.await?;
success &= status;
if !status {
break;
}
} else {
panic!(
"There is no such Pipeline `{}` set up for this project. Maybe, you've forgotten set up this Pipeline for project via `{}`?",
pipeline_tag.green(),
"deployer with {pipeline-short-name-and-ver}".green(),
);
}
}
}
Ok(success)
}
#[allow(clippy::too_many_arguments)]
pub async fn run_as_worker(
config: &DeployerProjectOptions,
run_dir: &Path,
cache_dir: &Path,
config_dir: &Path,
storage_dir: &Path,
remotes: &BTreeMap<ShortName, RemoteHost>,
args: &RunArgs,
skipper: Skipper,
) -> anyhow::Result<bool> {
let artifacts_dir = prepare_artifacts_folder(run_dir)?;
let mut success = true;
for pipeline_tag in &args.pipeline_tags {
if let Some(pipeline) = config.pipelines.iter().find(|p| p.title.as_str().eq(pipeline_tag)) {
let mut env = RunEnvironment {
master_pipeline: &pipeline.title,
run_dir,
cache_dir,
config_dir,
storage_dir,
project_dir: None,
artifacts_dir: &artifacts_dir,
artifacts_placements: &pipeline.artifacts,
new_build: true,
remotes,
ignore: &config.ignore_files,
log_file: None,
containered_build: matches!(std::env::var("DEPLOYER_CONTAINERED_BUILD"), Ok(val) if val.as_str().eq("1")),
containered_run: matches!(std::env::var("DEPLOYER_CONTAINERED_RUN"), Ok(val) if val.as_str().eq("1")),
ansible_run: matches!(std::env::var("DEPLOYER_ANSIBLE_RUN"), Ok(val) if val.as_str().eq("1")),
daemons: Daemons::default(),
observe_cli: &None,
skipper: skipper.clone(),
driver: pipeline.driver,
restart_requested: None,
};
success &= execute_pipeline(
config,
&mut env,
pipeline,
args.containerized,
args.compose,
args.ansible,
&args.driver,
)
.await?;
} else {
panic!(
"There is no such Pipeline `{}` set up for this project. Maybe, you've forgotten set up this Pipeline for project via `{}`?",
pipeline_tag.green(),
"deployer with {pipeline-short-name-and-ver}".green(),
);
}
}
Ok(success)
}
pub fn run_as_controller(
config: &DeployerProjectOptions,
runs: &mut Runs,
artifacts_dir: &Path,
project_dir: &Path,
cache_dir: &Path,
remotes: &BTreeMap<ShortName, RemoteHost>,
args: &RunArgs,
) -> anyhow::Result<bool> {
use std::collections::HashSet;
let mut remote = vec![];
for short_name in &args.remotes {
if let Some(host) = remotes.get(&ShortName::new(short_name)?) {
remote.push(host.clone());
}
}
if remote.is_empty() {
bail!("There is no such remote hosts in Registry.");
}
let mut success = true;
let mut ignore = HashSet::new();
config.cache_files.iter().for_each(|i| {
ignore.insert(i.to_owned());
});
config.ignore_files.iter().for_each(|i| {
ignore.insert(i.to_owned());
});
ignore.insert(PathBuf::from("artifacts"));
'outer: for pipeline_tag in &args.pipeline_tags {
if let Some(pipeline) = config.pipelines.iter().find(|p| p.title.as_str().eq(pipeline_tag)) {
let (run_path, _) = prepare_run_folder(config, runs, &pipeline.exclusive_exec_tag, cache_dir, args)?;
sync_run_folder(
config,
pipeline,
&run_path,
project_dir,
args.link_cache,
args.copy_cache,
)?;
for host in remote.iter() {
println!("Starting run on remote host `{}`...", host.short_name.as_str().green());
let now = std::time::Instant::now();
let generated_remote = sync_to_remote(&run_path, host, &ignore)?;
match host.call_deployer_to_build(&generated_remote, pipeline.title.as_str()) {
Err(e) => println!("{e}"),
Ok((status, out)) => {
for line in out {
println!("{line}")
}
success &= status;
if !status {
break 'outer;
}
}
}
sync_artifacts_from_remote(&generated_remote, artifacts_dir, host)?;
println!(
"Run at remote and got artifacts from host: `{}` ({}).",
host.short_name.as_str().green(),
format!("{:.2?}", now.elapsed()).green()
);
}
} else {
panic!(
"There is no such Pipeline `{}` set up for this project. Maybe, you've forgotten set up this Pipeline for project via `{}`?",
pipeline_tag.green(),
"deployer with {pipeline-short-name-and-ver}".green(),
);
}
}
Ok(success)
}
pub async fn execute_pipeline(
config: &DeployerProjectOptions,
env: &mut RunEnvironment<'_>,
pipeline: &DescribedPipeline,
with_containerized: bool,
with_compose: bool,
with_ansible: bool,
with_driver_overriden: &Option<String>,
) -> anyhow::Result<bool> {
if with_containerized {
if pipeline.containered_opts.is_none() {
panic!("There is no `containered_opts` for this pipeline!");
}
if env.containered_build || env.containered_run {
panic!("Deployer can't build or run pipeline with Docker/Podman in already containerized environment!");
}
let env = RunEnvironment {
daemons: env.daemons.clone(),
skipper: env.skipper.clone(),
restart_requested: env.restart_requested.clone(),
..(*env)
};
return execute_pipeline_containered(config, &env, pipeline).await;
}
if with_compose {
if pipeline.compose_opts.is_none() {
panic!("There is no `compose_opts` for this pipeline!");
}
if env.containered_build || env.containered_run {
panic!("Deployer can't run Docker Compose in already containerized environment!");
}
let env = RunEnvironment {
daemons: env.daemons.clone(),
skipper: env.skipper.clone(),
restart_requested: env.restart_requested.clone(),
..(*env)
};
return execute_pipeline_compose(config, &env, pipeline).await;
}
if with_ansible {
if pipeline.ansible_opts.is_none() {
panic!("There is no `ansible_opts` for this pipeline!");
}
if env.ansible_run {
panic!("Deployer can't run Ansible pipeline at another Ansible host!");
}
return execute_pipeline_with_ansible(config, env, pipeline).await;
}
match (with_driver_overriden.as_deref(), env.driver) {
(Some("deployer"), _) => pipeline.exec_with_deployer_driver(config, env).await,
(Some("shell"), _) => pipeline.exec_with_shell(config, env).await,
(_, PipelineDriver::Deployer) => pipeline.exec_with_deployer_driver(config, env).await,
(_, PipelineDriver::Shell) => pipeline.exec_with_shell(config, env).await,
}
}
fn check_args_on_conflicts(args: &RunArgs) -> anyhow::Result<()> {
if args.link_cache && args.copy_cache {
panic!(
"Select only one option from `{}` and `{}`. See help via `{}`.",
"c".green(),
"C".green(),
"depl run -h".green()
);
}
if (args.fresh || args.link_cache || args.copy_cache || args.run_at.is_some()) && args.current {
panic!(
"Select either `{}` or `{}`/{}`/`{}`/`{}` options. See help via `{}`.",
"o".green(),
"j".green(),
"f".green(),
"c".green(),
"C".green(),
"depl run -h".green(),
);
}
if [args.containerized, args.compose, args.ansible]
.iter()
.filter(|v| **v)
.count()
> 1
{
panic!(
"Select only one option from `{}`, `{}` and `{}`. See help via `{}`.",
"d".green(),
"m".green(),
"a".green(),
"depl run -h".green()
);
}
if std::env::var("DEPLOYER_REMOTE_AS_WORKER").is_ok_and(|v| !v.is_empty()) && args.pipeline_tags.is_empty() {
panic!("You always should specify pipeline tags for executing while remote builds.")
}
if !args.remotes.is_empty()
&& (std::env::var("DEPLOYER_REMOTE_AS_WORKER").is_ok_and(|v| !v.is_empty())
|| args.pipeline_tags.is_empty()
|| args.link_cache
|| args.copy_cache
|| args.fresh
|| args.run_at.is_some()
|| args.current)
{
panic!("If you specify remote hosts to run on, you should specify only pipelines list and nothing more.")
}
Ok(())
}