1use crate::ec2::Error;
4use aws_config::BehaviorVersion;
5pub use aws_config::Region;
6use aws_sdk_s3::{
7 config::retry::ReconnectMode,
8 operation::head_object::HeadObjectError,
9 presigning::PresigningConfig,
10 primitives::ByteStream,
11 types::{BucketLocationConstraint, CreateBucketConfiguration, Delete, ObjectIdentifier},
12 Client as S3Client,
13};
14use commonware_cryptography::{Hasher, Sha256};
15use std::{io::Read, path::Path, time::Duration};
16use tracing::{debug, info};
17
18pub const S3_BUCKET_NAME: &str = "commonware-deployer-cache";
20
21pub const S3_TOOLS_BINARIES_PREFIX: &str = "tools/binaries";
23
24pub const S3_TOOLS_CONFIGS_PREFIX: &str = "tools/configs";
26
27pub const S3_DEPLOYMENTS_PREFIX: &str = "deployments";
29
30pub const PRESIGN_DURATION: Duration = Duration::from_secs(6 * 60 * 60);
32
33pub async fn create_s3_client(region: Region) -> S3Client {
35 let retry = aws_config::retry::RetryConfig::adaptive()
36 .with_max_attempts(u32::MAX)
37 .with_initial_backoff(Duration::from_millis(500))
38 .with_max_backoff(Duration::from_secs(30))
39 .with_reconnect_mode(ReconnectMode::ReconnectOnTransientError);
40 let config = aws_config::defaults(BehaviorVersion::v2025_08_07())
41 .region(region)
42 .retry_config(retry)
43 .load()
44 .await;
45 S3Client::new(&config)
46}
47
48pub async fn ensure_bucket_exists(
50 client: &S3Client,
51 bucket_name: &str,
52 region: &str,
53) -> Result<(), Error> {
54 match client.head_bucket().bucket(bucket_name).send().await {
56 Ok(_) => {
57 info!(bucket = bucket_name, "bucket already exists");
58 return Ok(());
59 }
60 Err(e) => {
61 let bucket_region = e
63 .raw_response()
64 .and_then(|r| r.headers().get("x-amz-bucket-region"))
65 .map(|s| s.to_string());
66
67 let service_err = e.into_service_error();
68 if service_err.is_not_found() {
69 debug!(bucket = bucket_name, "bucket not found, will create");
71 } else if let Some(bucket_region) = bucket_region {
72 info!(
74 bucket = bucket_name,
75 bucket_region = bucket_region.as_str(),
76 client_region = region,
77 "bucket exists in different region, using cross-region access"
78 );
79 return Ok(());
80 } else {
81 return Err(Error::S3BucketForbidden {
83 bucket: bucket_name.to_string(),
84 reason: super::BucketForbiddenReason::AccessDenied,
85 });
86 }
87 }
88 }
89
90 let mut request = client.create_bucket().bucket(bucket_name);
92 if region != "us-east-1" {
93 let location_constraint = BucketLocationConstraint::from(region);
94 let bucket_config = CreateBucketConfiguration::builder()
95 .location_constraint(location_constraint)
96 .build();
97 request = request.create_bucket_configuration(bucket_config);
98 }
99
100 match request.send().await {
101 Ok(_) => {
102 info!(bucket = bucket_name, region = region, "created bucket");
103 }
104 Err(e) => {
105 let service_err = e.into_service_error();
106 let s3_err = aws_sdk_s3::Error::from(service_err);
107 match &s3_err {
108 aws_sdk_s3::Error::BucketAlreadyExists(_)
109 | aws_sdk_s3::Error::BucketAlreadyOwnedByYou(_) => {
110 info!(bucket = bucket_name, "bucket already exists");
111 }
112 _ => {
113 return Err(Error::AwsS3 {
114 bucket: bucket_name.to_string(),
115 operation: super::S3Operation::CreateBucket,
116 source: Box::new(s3_err),
117 });
118 }
119 }
120 }
121 }
122 Ok(())
123}
124
125pub async fn object_exists(client: &S3Client, bucket: &str, key: &str) -> Result<bool, Error> {
127 match client.head_object().bucket(bucket).key(key).send().await {
128 Ok(_) => Ok(true),
129 Err(e) => {
130 let service_err = e.into_service_error();
131 if matches!(service_err, HeadObjectError::NotFound(_)) {
132 Ok(false)
133 } else {
134 Err(Error::AwsS3 {
135 bucket: bucket.to_string(),
136 operation: super::S3Operation::HeadObject,
137 source: Box::new(aws_sdk_s3::Error::from(service_err)),
138 })
139 }
140 }
141 }
142}
143
144pub async fn upload_file(
146 client: &S3Client,
147 bucket: &str,
148 key: &str,
149 path: &Path,
150) -> Result<(), Error> {
151 let body = ByteStream::from_path(path)
152 .await
153 .map_err(std::io::Error::other)?;
154
155 client
156 .put_object()
157 .bucket(bucket)
158 .key(key)
159 .body(body)
160 .send()
161 .await
162 .map_err(|e| Error::AwsS3 {
163 bucket: bucket.to_string(),
164 operation: super::S3Operation::PutObject,
165 source: Box::new(aws_sdk_s3::Error::from(e.into_service_error())),
166 })?;
167
168 debug!(bucket = bucket, key = key, "uploaded file to S3");
169 Ok(())
170}
171
172#[must_use = "the pre-signed URL should be used to download the file"]
174pub async fn upload_and_presign(
175 client: &S3Client,
176 bucket: &str,
177 key: &str,
178 path: &Path,
179 expires_in: Duration,
180) -> Result<String, Error> {
181 upload_file(client, bucket, key, path).await?;
182 presign_url(client, bucket, key, expires_in).await
183}
184
185#[must_use = "the pre-signed URL should be used to download the content"]
187pub async fn cache_content_and_presign(
188 client: &S3Client,
189 bucket: &str,
190 key: &str,
191 content: &'static [u8],
192 expires_in: Duration,
193) -> Result<String, Error> {
194 if !object_exists(client, bucket, key).await? {
195 debug!(key = key, "static content not in S3, uploading");
196 let body = ByteStream::from_static(content);
197 client
198 .put_object()
199 .bucket(bucket)
200 .key(key)
201 .body(body)
202 .send()
203 .await
204 .map_err(|e| Error::AwsS3 {
205 bucket: bucket.to_string(),
206 operation: super::S3Operation::PutObject,
207 source: Box::new(aws_sdk_s3::Error::from(e.into_service_error())),
208 })?;
209 }
210 presign_url(client, bucket, key, expires_in).await
211}
212
213pub fn hash_file(path: &Path) -> Result<String, Error> {
215 let mut file = std::fs::File::open(path)?;
216 let mut hasher = Sha256::new();
217 let mut buffer = [0u8; 8192];
218 loop {
219 let bytes_read = file.read(&mut buffer)?;
220 if bytes_read == 0 {
221 break;
222 }
223 hasher.update(&buffer[..bytes_read]);
224 }
225 Ok(hasher.finalize().to_string())
226}
227
228#[must_use = "the pre-signed URL should be used to download the file"]
230pub async fn cache_file_and_presign(
231 client: &S3Client,
232 bucket: &str,
233 key: &str,
234 path: &Path,
235 expires_in: Duration,
236) -> Result<String, Error> {
237 if !object_exists(client, bucket, key).await? {
238 debug!(key = key, "file not in S3, uploading");
239 upload_file(client, bucket, key, path).await?;
240 }
241 presign_url(client, bucket, key, expires_in).await
242}
243
244#[must_use = "the pre-signed URL should be used to download the object"]
246pub async fn presign_url(
247 client: &S3Client,
248 bucket: &str,
249 key: &str,
250 expires_in: Duration,
251) -> Result<String, Error> {
252 let presigning_config = PresigningConfig::expires_in(expires_in)?;
253
254 let presigned_request = client
255 .get_object()
256 .bucket(bucket)
257 .key(key)
258 .presigned(presigning_config)
259 .await?;
260
261 Ok(presigned_request.uri().to_string())
262}
263
264pub async fn delete_prefix(client: &S3Client, bucket: &str, prefix: &str) -> Result<(), Error> {
266 let mut continuation_token: Option<String> = None;
267 let mut deleted_count = 0;
268
269 loop {
270 let mut request = client.list_objects_v2().bucket(bucket).prefix(prefix);
271
272 if let Some(token) = continuation_token {
273 request = request.continuation_token(token);
274 }
275
276 let response = request.send().await.map_err(|e| Error::AwsS3 {
277 bucket: bucket.to_string(),
278 operation: super::S3Operation::ListObjects,
279 source: Box::new(aws_sdk_s3::Error::from(e.into_service_error())),
280 })?;
281
282 if let Some(objects) = response.contents {
284 let identifiers: Vec<ObjectIdentifier> = objects
285 .into_iter()
286 .filter_map(|obj| obj.key)
287 .map(|key| ObjectIdentifier::builder().key(key).build())
288 .collect::<Result<Vec<_>, _>>()?;
289
290 if !identifiers.is_empty() {
291 let count = identifiers.len();
292 let delete = Delete::builder().set_objects(Some(identifiers)).build()?;
293
294 client
295 .delete_objects()
296 .bucket(bucket)
297 .delete(delete)
298 .send()
299 .await
300 .map_err(|e| Error::AwsS3 {
301 bucket: bucket.to_string(),
302 operation: super::S3Operation::DeleteObjects,
303 source: Box::new(aws_sdk_s3::Error::from(e.into_service_error())),
304 })?;
305
306 deleted_count += count;
307 }
308 }
309
310 if response.is_truncated == Some(true) {
311 continuation_token = response.next_continuation_token;
312 } else {
313 break;
314 }
315 }
316
317 info!(
318 bucket = bucket,
319 prefix = prefix,
320 count = deleted_count,
321 "deleted objects from S3"
322 );
323 Ok(())
324}
325
326pub async fn delete_bucket(client: &S3Client, bucket: &str) -> Result<(), Error> {
328 client
329 .delete_bucket()
330 .bucket(bucket)
331 .send()
332 .await
333 .map_err(|e| Error::AwsS3 {
334 bucket: bucket.to_string(),
335 operation: super::S3Operation::DeleteBucket,
336 source: Box::new(aws_sdk_s3::Error::from(e.into_service_error())),
337 })?;
338 info!(bucket = bucket, "deleted bucket");
339 Ok(())
340}
341
342pub async fn delete_bucket_and_contents(client: &S3Client, bucket: &str) -> Result<(), Error> {
344 delete_prefix(client, bucket, "").await?;
346
347 delete_bucket(client, bucket).await?;
349
350 Ok(())
351}
352
353pub fn is_no_such_bucket_error(error: &Error) -> bool {
355 match error {
356 Error::AwsS3 { source, .. } => {
357 matches!(source.as_ref(), aws_sdk_s3::Error::NoSuchBucket(_))
358 }
359 _ => false,
360 }
361}