use crate::instance_spec::{InstanceSpec, ReviewChangeSet, TemplateUploader};
use crate::types::{ParameterKey, ParameterMap, ParameterValue};
use sha2::Digest;
pub struct BinaryName(pub String);
pub struct BuildTarget(pub String);
pub struct S3ObjectKey(pub String);
impl From<&S3ObjectKey> for String {
fn from(value: &S3ObjectKey) -> String {
value.0.clone()
}
}
#[derive(Clone)]
pub struct S3BucketName(pub String);
impl From<&S3BucketName> for String {
fn from(value: &S3BucketName) -> String {
value.0.clone()
}
}
pub enum BuildType {
Debug,
Release,
}
impl BuildType {
fn path(&self) -> &str {
match self {
Self::Debug => "debug",
Self::Release => "release",
}
}
fn args(&self) -> &[&str] {
match self {
Self::Debug => &[],
Self::Release => &["--release"],
}
}
}
pub enum S3BucketSource {
Static(S3BucketName),
StackOutput {
stack_name: crate::types::StackName,
output_key: crate::types::OutputKey,
},
}
pub struct ZipFile {
body: aws_sdk_s3::primitives::ByteStream,
s3_object_key: S3ObjectKey,
}
impl ZipFile {
pub async fn upload(
self,
s3: &aws_sdk_s3::client::Client,
s3_bucket_name: &S3BucketName,
) -> ParameterValue {
if !self.object_exists(s3, s3_bucket_name).await {
s3.put_object()
.bucket(s3_bucket_name)
.key(&self.s3_object_key)
.body(self.body)
.send()
.await
.unwrap();
}
ParameterValue(self.s3_object_key.0.clone())
}
async fn object_exists(
&self,
s3: &aws_sdk_s3::client::Client,
s3_bucket_name: &S3BucketName,
) -> bool {
let result = s3
.head_object()
.bucket(s3_bucket_name)
.key(&self.s3_object_key.0)
.send()
.await;
match result {
Err(error) => match error.into_service_error() {
aws_sdk_s3::operation::head_object::HeadObjectError::NotFound { .. } => false,
other => panic!("Unexpected head object error: {other:#?}"),
},
Ok(_) => true,
}
}
}
pub struct Target {
pub binary_name: BinaryName,
pub build_target: BuildTarget,
pub build_type: BuildType,
pub extra_files: std::collections::BTreeMap<std::path::PathBuf, std::path::PathBuf>,
}
impl Target {
#[must_use]
pub fn path(&self) -> std::path::PathBuf {
std::path::Path::new("./target")
.join(&self.build_target.0)
.join(self.build_type.path())
.join(&self.binary_name.0)
}
pub async fn build(&self) {
log::info!("Building lambda target");
cmd_proc::Command::new("cargo")
.arguments(["build", "--target", &self.build_target.0])
.arguments(self.build_type.args())
.status()
.await
.unwrap_or_else(|error| panic!("Failed to build lambda target: {error}"));
}
pub async fn build_zip(&self) -> ZipFile {
self.build().await;
self.generate_zip()
}
fn generate_zip(&self) -> ZipFile {
let path = self.path();
log::info!("Reading binary from: {}", path.display());
let binary = std::fs::read(path).unwrap();
log::info!("Compressing binary into zip");
let mut cursor: std::io::Cursor<Vec<u8>> = std::io::Cursor::new(vec![]);
let mut zip = zip::write::ZipWriter::new(&mut cursor);
zip.start_file::<&str, ()>("bootstrap", Self::zip_file_options(0o555))
.unwrap();
std::io::Write::write_all(&mut zip, binary.as_ref()).unwrap();
self.write_extra_files(&mut zip);
zip.finish().unwrap();
log::info!("Computing zip hash");
let body = cursor.into_inner();
let hash = hex::encode(sha2::Sha256::digest(&body).as_slice());
log::info!("Content hash: {hash}");
ZipFile {
body: aws_sdk_s3::primitives::ByteStream::from(body),
s3_object_key: S3ObjectKey(format!("{hash}.zip")),
}
}
fn write_extra_files<W: std::io::Write + std::io::Seek>(
&self,
zip: &mut zip::write::ZipWriter<W>,
) {
for (host, target) in &self.extra_files {
zip.start_file_from_path(target, Self::zip_file_options(0o444))
.unwrap();
std::io::copy(&mut std::fs::File::open(host).unwrap(), zip).unwrap();
}
}
fn zip_file_options(unix_permissions: u32) -> zip::write::FileOptions<'static, ()> {
zip::write::FileOptions::default()
.unix_permissions(unix_permissions)
.last_modified_time(zip::DateTime::default())
}
pub async fn deploy_parameter_update(
s3: &aws_sdk_s3::client::Client,
cloudformation: &aws_sdk_cloudformation::Client,
s3_bucket_name: &S3BucketName,
instance_spec: &InstanceSpec,
parameter_key: &ParameterKey,
template_uploader: &Option<TemplateUploader<'_>>,
zip_file: ZipFile,
) {
let parameter_value = zip_file.upload(s3, s3_bucket_name).await;
instance_spec
.context(cloudformation, template_uploader.as_ref())
.parameter_update(&ParameterMap(std::collections::BTreeMap::from([(
parameter_key.clone(),
parameter_value,
)])))
.await;
}
#[allow(clippy::too_many_arguments)]
pub async fn deploy_template_update(
s3: &aws_sdk_s3::client::Client,
cloudformation: &aws_sdk_cloudformation::Client,
s3_bucket_name: &S3BucketName,
instance_spec: &InstanceSpec,
review_change_set: &ReviewChangeSet,
parameter_key: &ParameterKey,
template_uploader: &Option<TemplateUploader<'_>>,
zip_file: ZipFile,
) {
let parameter_value = zip_file.upload(s3, s3_bucket_name).await;
instance_spec
.context(cloudformation, template_uploader.as_ref())
.update(
review_change_set,
&ParameterMap(std::collections::BTreeMap::from([(
parameter_key.clone(),
parameter_value,
)])),
)
.await;
}
}
pub mod cli {
use crate::instance_spec::{Registry, ReviewChangeSet, TemplateUploader};
use crate::lambda::deploy::S3BucketSource;
use crate::types::{ParameterKey, ParameterMap, ParameterValue, StackName};
#[derive(Clone, Debug, Eq, PartialEq, clap::Parser)]
pub struct App {
#[clap(subcommand)]
command: Command,
}
impl App {
pub async fn run(&self, config: &'_ Config<'_>) {
self.command.run(config).await
}
}
pub struct Config<'a> {
pub cloudformation: &'a aws_sdk_cloudformation::client::Client,
pub parameter_key: ParameterKey,
pub registry: Registry,
pub s3: &'a aws_sdk_s3::client::Client,
pub s3_bucket_source: S3BucketSource,
pub target: crate::lambda::deploy::Target,
pub template_uploader: Option<&'a TemplateUploader<'a>>,
}
impl Config<'_> {
pub(crate) async fn build(&self) {
self.target.build().await
}
pub(crate) async fn upload(&self) -> ParameterValue {
let s3_bucket_name = self.load_s3_bucket_name().await;
let parameter_value = self
.target
.build_zip()
.await
.upload(self.s3, &s3_bucket_name)
.await;
log::info!("Lambda object key: {}", parameter_value.0);
parameter_value
}
pub(crate) async fn deploy_template(
&self,
stack_name: &StackName,
review_change_set: &ReviewChangeSet,
) {
let instance_spec = self
.registry
.find(stack_name)
.expect("instance spec not registered");
let parameter_value = self.upload().await;
instance_spec
.context(self.cloudformation, self.template_uploader)
.sync(
review_change_set,
&ParameterMap(std::collections::BTreeMap::from([(
self.parameter_key.clone(),
parameter_value,
)])),
)
.await
}
pub(crate) async fn deploy_parameter(&self, stack_name: &StackName) {
let instance_spec = self
.registry
.find(stack_name)
.expect("instance spec not registered");
let parameter_value = self.upload().await;
instance_spec
.context(self.cloudformation, self.template_uploader)
.parameter_update(&ParameterMap(std::collections::BTreeMap::from([(
self.parameter_key.clone(),
parameter_value,
)])))
.await
}
async fn load_s3_bucket_name(&self) -> crate::lambda::deploy::S3BucketName {
match &self.s3_bucket_source {
S3BucketSource::StackOutput {
stack_name,
output_key,
} => crate::lambda::deploy::S3BucketName(
crate::stack::read_stack_output(self.cloudformation, stack_name, output_key)
.await,
),
S3BucketSource::Static(s3_bucket_name) => s3_bucket_name.clone(),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, clap::Subcommand)]
pub enum Command {
DeployTemplate {
#[arg(long = "stack-name")]
name: StackName,
#[arg(long, default_value = "interactive")]
review_change_set: ReviewChangeSet,
},
DeployParameter {
#[arg(long = "stack-name")]
name: StackName,
},
Build,
Upload,
}
impl Command {
pub async fn run(&self, config: &'_ Config<'_>) {
match self {
Self::Build => config.build().await,
Self::Upload => {
config.upload().await;
}
Self::DeployTemplate {
name,
review_change_set,
} => config.deploy_template(name, review_change_set).await,
Self::DeployParameter { name } => config.deploy_parameter(name).await,
}
}
}
}