google_cloud_storage/storage/client.rs
1// Copyright 2025 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use super::request_options::RequestOptions;
16use crate::Error;
17use crate::builder::storage::ReadObject;
18use crate::builder::storage::UploadObject;
19use crate::download_resume_policy::DownloadResumePolicy;
20use crate::storage::checksum::Crc32c;
21use crate::upload_source::Payload;
22use auth::credentials::CacheableResource;
23use base64::Engine;
24use base64::prelude::BASE64_STANDARD;
25use http::Extensions;
26use sha2::{Digest, Sha256};
27use std::sync::Arc;
28
29/// Implements a client for the Cloud Storage API.
30///
31/// # Example
32/// ```
33/// # tokio_test::block_on(async {
34/// # use google_cloud_storage::client::Storage;
35/// let client = Storage::builder().build().await?;
36/// // use `client` to make requests to Cloud Storage.
37/// # gax::client_builder::Result::<()>::Ok(()) });
38/// ```
39///
40/// # Configuration
41///
42/// To configure `Storage` use the `with_*` methods in the type returned
43/// by [builder()][Storage::builder]. The default configuration should
44/// work for most applications. Common configuration changes include
45///
46/// * [with_endpoint()]: by default this client uses the global default endpoint
47/// (`https://storage.googleapis.com`). Applications using regional
48/// endpoints or running in restricted networks (e.g. a network configured
49/// with [Private Google Access with VPC Service Controls]) may want to
50/// override this default.
51/// * [with_credentials()]: by default this client uses
52/// [Application Default Credentials]. Applications using custom
53/// authentication may need to override this default.
54///
55/// # Pooling and Cloning
56///
57/// `Storage` holds a connection pool internally, it is advised to
58/// create one and then reuse it. You do not need to wrap `Storage` in
59/// an [Rc](std::rc::Rc) or [Arc] to reuse it, because it already uses an `Arc`
60/// internally.
61///
62/// # Service Description
63///
64/// The Cloud Storage API allows applications to read and write data through
65/// the abstractions of buckets and objects. For a description of these
66/// abstractions please see <https://cloud.google.com/storage/docs>.
67///
68/// Resources are named as follows:
69///
70/// - Projects are referred to as they are defined by the Resource Manager API,
71/// using strings like `projects/123456` or `projects/my-string-id`.
72///
73/// - Buckets are named using string names of the form:
74/// `projects/{project}/buckets/{bucket}`
75/// For globally unique buckets, `_` may be substituted for the project.
76///
77/// - Objects are uniquely identified by their name along with the name of the
78/// bucket they belong to, as separate strings in this API. For example:
79/// ```no_rust
80/// bucket = "projects/_/buckets/my-bucket"
81/// object = "my-object/with/a/folder-like/name"
82/// ```
83/// Note that object names can contain `/` characters, which are treated as
84/// any other character (no special directory semantics).
85///
86/// [with_endpoint()]: ClientBuilder::with_endpoint
87/// [with_credentials()]: ClientBuilder::with_credentials
88/// [Private Google Access with VPC Service Controls]: https://cloud.google.com/vpc-service-controls/docs/private-connectivity
89/// [Application Default Credentials]: https://cloud.google.com/docs/authentication#adc
90#[derive(Clone, Debug)]
91pub struct Storage {
92 inner: std::sync::Arc<StorageInner>,
93}
94
95#[derive(Clone, Debug)]
96pub(crate) struct StorageInner {
97 pub client: reqwest::Client,
98 pub cred: auth::credentials::Credentials,
99 pub endpoint: String,
100 pub options: RequestOptions,
101}
102
103impl Storage {
104 /// Returns a builder for [Storage].
105 ///
106 /// # Example
107 /// ```
108 /// # use google_cloud_storage::client::Storage;
109 /// # async fn sample() -> anyhow::Result<()> {
110 /// let client = Storage::builder().build().await?;
111 /// # Ok(()) }
112 /// ```
113 pub fn builder() -> ClientBuilder {
114 ClientBuilder::new()
115 }
116
117 /// Upload an object using a local buffer.
118 ///
119 /// If the data source does **not** implement [Seek] the client library must
120 /// buffer uploaded data until this data is persisted in the service. This
121 /// requires more memory in the client, and when the buffer grows too large,
122 /// may require stalling the upload until the service can persist the data.
123 ///
124 /// Use this function for data sources representing computations where
125 /// it is expensive or impossible to restart said computation. This function
126 /// is also useful when it is hard or impossible to predict the number of
127 /// bytes emitted by a stream, even if restarting the stream is not too
128 /// expensive.
129 ///
130 /// # Example
131 /// ```
132 /// # use google_cloud_storage::client::Storage;
133 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
134 /// let response = client
135 /// .upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
136 /// .send()
137 /// .await?;
138 /// println!("response details={response:?}");
139 /// # Ok(()) }
140 /// ```
141 ///
142 /// # Example
143 /// ```
144 /// # use google_cloud_storage::client::Storage;
145 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
146 /// let response = client
147 /// .upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
148 /// .send_unbuffered()
149 /// .await?;
150 /// println!("response details={response:?}");
151 /// # Ok(()) }
152 /// ```
153 ///
154 /// # Parameters
155 /// * `bucket` - the bucket name containing the object. In
156 /// `projects/_/buckets/{bucket_id}` format.
157 /// * `object` - the object name.
158 /// * `payload` - the object data.
159 ///
160 /// [Seek]: crate::upload_source::Seek
161 pub fn upload_object<B, O, T, P>(
162 &self,
163 bucket: B,
164 object: O,
165 payload: T,
166 ) -> UploadObject<P, Crc32c>
167 where
168 B: Into<String>,
169 O: Into<String>,
170 T: Into<Payload<P>>,
171 {
172 UploadObject::new(self.inner.clone(), bucket, object, payload)
173 }
174
175 /// A simple download into a buffer.
176 ///
177 /// # Parameters
178 /// * `bucket` - the bucket name containing the object. In
179 /// `projects/_/buckets/{bucket_id}` format.
180 /// * `object` - the object name.
181 ///
182 /// # Example
183 /// ```
184 /// # use google_cloud_storage::client::Storage;
185 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
186 /// let mut resp = client
187 /// .read_object("projects/_/buckets/my-bucket", "my-object")
188 /// .send()
189 /// .await?;
190 /// let mut contents = Vec::new();
191 /// while let Some(chunk) = resp.next().await.transpose()? {
192 /// contents.extend_from_slice(&chunk);
193 /// }
194 /// println!("object contents={:?}", bytes::Bytes::from_owner(contents));
195 /// # Ok(()) }
196 /// ```
197 pub fn read_object<B, O>(&self, bucket: B, object: O) -> ReadObject
198 where
199 B: Into<String>,
200 O: Into<String>,
201 {
202 ReadObject::new(self.inner.clone(), bucket, object)
203 }
204
205 pub(crate) fn new(builder: ClientBuilder) -> gax::client_builder::Result<Self> {
206 use gax::client_builder::Error;
207 let client = reqwest::Client::builder()
208 // Disable all automatic decompression. These could be enabled by users by enabling
209 // the corresponding features flags, but we will not be able to tell whether this
210 // has happened.
211 .no_brotli()
212 .no_deflate()
213 .no_gzip()
214 .no_zstd()
215 .build()
216 .map_err(Error::transport)?;
217 let mut builder = builder;
218 let cred = if let Some(c) = builder.credentials {
219 c
220 } else {
221 auth::credentials::Builder::default()
222 .build()
223 .map_err(Error::cred)?
224 };
225 let endpoint = builder
226 .endpoint
227 .unwrap_or_else(|| self::DEFAULT_HOST.to_string());
228 builder.credentials = Some(cred);
229 builder.endpoint = Some(endpoint);
230 let inner = Arc::new(StorageInner::new(client, builder));
231 Ok(Self { inner })
232 }
233}
234
235impl StorageInner {
236 /// Builds a client assuming `config.cred` and `config.endpoint` are initialized, panics otherwise.
237 pub(self) fn new(client: reqwest::Client, builder: ClientBuilder) -> Self {
238 Self {
239 client,
240 cred: builder
241 .credentials
242 .expect("StorageInner assumes the credentials are initialized"),
243 endpoint: builder
244 .endpoint
245 .expect("StorageInner assumes the endpoint is initialized"),
246 options: builder.default_options,
247 }
248 }
249
250 // Helper method to apply authentication headers to the request builder.
251 pub async fn apply_auth_headers(
252 &self,
253 builder: reqwest::RequestBuilder,
254 ) -> crate::Result<reqwest::RequestBuilder> {
255 let cached_auth_headers = self
256 .cred
257 .headers(Extensions::new())
258 .await
259 .map_err(Error::authentication)?;
260
261 let auth_headers = match cached_auth_headers {
262 CacheableResource::New { data, .. } => data,
263 CacheableResource::NotModified => {
264 unreachable!("headers are not cached");
265 }
266 };
267
268 let builder = builder.headers(auth_headers);
269 Ok(builder)
270 }
271}
272
273/// A builder for [Storage].
274///
275/// ```
276/// # use google_cloud_storage::client::Storage;
277/// # async fn sample() -> anyhow::Result<()> {
278/// let builder = Storage::builder();
279/// let client = builder
280/// .with_endpoint("https://storage.googleapis.com")
281/// .build()
282/// .await?;
283/// # Ok(()) }
284/// ```
285pub struct ClientBuilder {
286 pub(crate) endpoint: Option<String>,
287 pub(crate) credentials: Option<auth::credentials::Credentials>,
288 // Default options for requests.
289 pub(crate) default_options: RequestOptions,
290}
291
292impl ClientBuilder {
293 pub(crate) fn new() -> Self {
294 Self {
295 endpoint: None,
296 credentials: None,
297 default_options: RequestOptions::new(),
298 }
299 }
300
301 /// Creates a new client.
302 ///
303 /// # Example
304 /// ```
305 /// # use google_cloud_storage::client::Storage;
306 /// # async fn sample() -> anyhow::Result<()> {
307 /// let client = Storage::builder().build().await?;
308 /// # Ok(()) }
309 /// ```
310 pub async fn build(self) -> gax::client_builder::Result<Storage> {
311 Storage::new(self)
312 }
313
314 /// Sets the endpoint.
315 ///
316 /// # Example
317 /// ```
318 /// # use google_cloud_storage::client::Storage;
319 /// # async fn sample() -> anyhow::Result<()> {
320 /// let client = Storage::builder()
321 /// .with_endpoint("https://private.googleapis.com")
322 /// .build()
323 /// .await?;
324 /// # Ok(()) }
325 /// ```
326 pub fn with_endpoint<V: Into<String>>(mut self, v: V) -> Self {
327 self.endpoint = Some(v.into());
328 self
329 }
330
331 /// Configures the authentication credentials.
332 ///
333 /// Google Cloud Storage requires authentication for most buckets. Use this
334 /// method to change the credentials used by the client. More information
335 /// about valid credentials types can be found in the [google-cloud-auth]
336 /// crate documentation.
337 ///
338 /// # Example
339 /// ```
340 /// # use google_cloud_storage::client::Storage;
341 /// # async fn sample() -> anyhow::Result<()> {
342 /// use auth::credentials::mds;
343 /// let client = Storage::builder()
344 /// .with_credentials(
345 /// mds::Builder::default()
346 /// .with_scopes(["https://www.googleapis.com/auth/cloud-platform.read-only"])
347 /// .build()?)
348 /// .build()
349 /// .await?;
350 /// # Ok(()) }
351 /// ```
352 ///
353 /// [google-cloud-auth]: https://docs.rs/google-cloud-auth
354 pub fn with_credentials<V: Into<auth::credentials::Credentials>>(mut self, v: V) -> Self {
355 self.credentials = Some(v.into());
356 self
357 }
358
359 /// Configure the retry policy.
360 ///
361 /// The client libraries can automatically retry operations that fail. The
362 /// retry policy controls what errors are considered retryable, sets limits
363 /// on the number of attempts or the time trying to make attempts.
364 ///
365 /// # Example
366 /// ```
367 /// # use google_cloud_storage::client::Storage;
368 /// # async fn sample() -> anyhow::Result<()> {
369 /// use gax::retry_policy::{AlwaysRetry, RetryPolicyExt};
370 /// let client = Storage::builder()
371 /// .with_retry_policy(AlwaysRetry.with_attempt_limit(3))
372 /// .build()
373 /// .await?;
374 /// # Ok(()) }
375 /// ```
376 pub fn with_retry_policy<V: Into<gax::retry_policy::RetryPolicyArg>>(mut self, v: V) -> Self {
377 self.default_options.retry_policy = v.into().into();
378 self
379 }
380
381 /// Configure the retry backoff policy.
382 ///
383 /// The client libraries can automatically retry operations that fail. The
384 /// backoff policy controls how long to wait in between retry attempts.
385 ///
386 /// # Example
387 /// ```
388 /// # use google_cloud_storage::client::Storage;
389 /// # async fn sample() -> anyhow::Result<()> {
390 /// use gax::exponential_backoff::ExponentialBackoff;
391 /// use std::time::Duration;
392 /// let policy = ExponentialBackoff::default();
393 /// let client = Storage::builder()
394 /// .with_backoff_policy(policy)
395 /// .build()
396 /// .await?;
397 /// # Ok(()) }
398 /// ```
399 pub fn with_backoff_policy<V: Into<gax::backoff_policy::BackoffPolicyArg>>(
400 mut self,
401 v: V,
402 ) -> Self {
403 self.default_options.backoff_policy = v.into().into();
404 self
405 }
406
407 /// Configure the retry throttler.
408 ///
409 /// Advanced applications may want to configure a retry throttler to
410 /// [Address Cascading Failures] and when [Handling Overload] conditions.
411 /// The client libraries throttle their retry loop, using a policy to
412 /// control the throttling algorithm. Use this method to fine tune or
413 /// customize the default retry throtler.
414 ///
415 /// [Handling Overload]: https://sre.google/sre-book/handling-overload/
416 /// [Addressing Cascading Failures]: https://sre.google/sre-book/addressing-cascading-failures/
417 ///
418 /// # Example
419 /// ```
420 /// # use google_cloud_storage::client::Storage;
421 /// # async fn sample() -> anyhow::Result<()> {
422 /// use gax::retry_throttler::AdaptiveThrottler;
423 /// let client = Storage::builder()
424 /// .with_retry_throttler(AdaptiveThrottler::default())
425 /// .build()
426 /// .await?;
427 /// # Ok(()) }
428 /// ```
429 pub fn with_retry_throttler<V: Into<gax::retry_throttler::RetryThrottlerArg>>(
430 mut self,
431 v: V,
432 ) -> Self {
433 self.default_options.retry_throttler = v.into().into();
434 self
435 }
436
437 /// Sets the payload size threshold to switch from single-shot to resumable uploads.
438 ///
439 /// # Example
440 /// ```
441 /// # use google_cloud_storage::client::Storage;
442 /// # async fn sample() -> anyhow::Result<()> {
443 /// let client = Storage::builder()
444 /// .with_resumable_upload_threshold(0_usize) // Forces a resumable upload.
445 /// .build()
446 /// .await?;
447 /// let response = client
448 /// .upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
449 /// .send()
450 /// .await?;
451 /// println!("response details={response:?}");
452 /// # Ok(()) }
453 /// ```
454 ///
455 /// The client library can perform uploads using [single-shot] or
456 /// [resumable] uploads. For small objects, single-shot uploads offer better
457 /// performance, as they require a single HTTP transfer. For larger objects,
458 /// the additional request latency is not significant, and resumable uploads
459 /// offer better recovery on errors.
460 ///
461 /// The library automatically selects resumable uploads when the payload is
462 /// equal to or larger than this option. For smaller uploads the client
463 /// library uses single-shot uploads.
464 ///
465 /// The exact threshold depends on where the application is deployed and
466 /// destination bucket location with respect to where the application is
467 /// running. The library defaults should work well in most cases, but some
468 /// applications may benefit from fine-tuning.
469 ///
470 /// [single-shot]: https://cloud.google.com/storage/docs/uploading-objects
471 /// [resumable]: https://cloud.google.com/storage/docs/resumable-uploads
472 pub fn with_resumable_upload_threshold<V: Into<usize>>(mut self, v: V) -> Self {
473 self.default_options.resumable_upload_threshold = v.into();
474 self
475 }
476
477 /// Changes the buffer size for some resumable uploads.
478 ///
479 /// # Example
480 /// ```
481 /// # use google_cloud_storage::client::Storage;
482 /// # async fn sample() -> anyhow::Result<()> {
483 /// let client = Storage::builder()
484 /// .with_resumable_upload_buffer_size(32 * 1024 * 1024_usize)
485 /// .build()
486 /// .await?;
487 /// let response = client
488 /// .upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
489 /// .send()
490 /// .await?;
491 /// println!("response details={response:?}");
492 /// # Ok(()) }
493 /// ```
494 ///
495 /// When performing [resumable uploads] from sources without [Seek] the
496 /// client library needs to buffer data in memory until it is persisted by
497 /// the service. Otherwise the data would be lost if the upload fails.
498 /// Applications may want to tune this buffer size:
499 ///
500 /// - Use smaller buffer sizes to support more concurrent uploads in the
501 /// same application.
502 /// - Use larger buffer sizes for better throughput. Sending many small
503 /// buffers stalls the upload until the client receives a successful
504 /// response from the service.
505 ///
506 /// Keep in mind that there are diminishing returns on using larger buffers.
507 ///
508 /// [resumable uploads]: https://cloud.google.com/storage/docs/resumable-uploads
509 /// [Seek]: crate::upload_source::Seek
510 pub fn with_resumable_upload_buffer_size<V: Into<usize>>(mut self, v: V) -> Self {
511 self.default_options.resumable_upload_buffer_size = v.into();
512 self
513 }
514
515 /// Configure the resume policy for downloads.
516 ///
517 /// The Cloud Storage client library can automatically resume a download
518 /// that is interrupted by a transient error. Applications may want to
519 /// limit the number of download attempts, or may wish to expand the type
520 /// of errors treated as retryable.
521 ///
522 /// # Example
523 /// ```
524 /// # use google_cloud_storage::client::Storage;
525 /// # async fn sample() -> anyhow::Result<()> {
526 /// use google_cloud_storage::download_resume_policy::{AlwaysResume, DownloadResumePolicyExt};
527 /// let client = Storage::builder()
528 /// .with_download_resume_policy(AlwaysResume.with_attempt_limit(3))
529 /// .build()
530 /// .await?;
531 /// # Ok(()) }
532 /// ```
533 pub fn with_download_resume_policy<V>(mut self, v: V) -> Self
534 where
535 V: DownloadResumePolicy + 'static,
536 {
537 self.default_options.download_resume_policy = Arc::new(v);
538 self
539 }
540}
541
542/// The default host used by the service.
543const DEFAULT_HOST: &str = "https://storage.googleapis.com";
544
545pub(crate) mod info {
546 const NAME: &str = env!("CARGO_PKG_NAME");
547 const VERSION: &str = env!("CARGO_PKG_VERSION");
548 lazy_static::lazy_static! {
549 pub(crate) static ref X_GOOG_API_CLIENT_HEADER: String = {
550 let ac = gaxi::api_header::XGoogApiClient{
551 name: NAME,
552 version: VERSION,
553 library_type: gaxi::api_header::GCCL,
554 };
555 ac.grpc_header_value()
556 };
557 }
558}
559
560/// The set of characters that are percent encoded.
561///
562/// This set is defined at https://cloud.google.com/storage/docs/request-endpoints#encoding:
563///
564/// Encode the following characters when they appear in either the object name
565/// or query string of a request URL:
566/// !, #, $, &, ', (, ), *, +, ,, /, :, ;, =, ?, @, [, ], and space characters.
567const ENCODED_CHARS: percent_encoding::AsciiSet = percent_encoding::CONTROLS
568 .add(b'!')
569 .add(b'#')
570 .add(b'$')
571 .add(b'&')
572 .add(b'\'')
573 .add(b'(')
574 .add(b')')
575 .add(b'*')
576 .add(b'+')
577 .add(b',')
578 .add(b'/')
579 .add(b':')
580 .add(b';')
581 .add(b'=')
582 .add(b'?')
583 .add(b'@')
584 .add(b'[')
585 .add(b']')
586 .add(b' ');
587
588/// Percent encode a string.
589///
590/// To ensure compatibility certain characters need to be encoded when they appear
591/// in either the object name or query string of a request URL.
592pub(crate) fn enc(value: &str) -> String {
593 percent_encoding::utf8_percent_encode(value, &ENCODED_CHARS).to_string()
594}
595
596/// Represents an error that can occur when invalid range is specified.
597#[derive(thiserror::Error, Debug, PartialEq)]
598#[non_exhaustive]
599pub(crate) enum RangeError {
600 /// The provided read limit was negative.
601 #[error("read limit was negative, expected non-negative value.")]
602 NegativeLimit,
603 /// A negative offset was provided with a read limit.
604 #[error("negative read offsets cannot be used with read limits.")]
605 NegativeOffsetWithLimit,
606}
607
608#[derive(Debug)]
609/// KeyAes256 represents an AES-256 encryption key used with the
610/// Customer-Supplied Encryption Keys (CSEK) feature.
611///
612/// This key must be exactly 32 bytes in length and should be provided in its
613/// raw (unencoded) byte format.
614///
615/// # Examples
616///
617/// Creating a `KeyAes256` instance from a valid byte slice:
618/// ```
619/// # use google_cloud_storage::client::{KeyAes256, KeyAes256Error};
620/// let raw_key_bytes: [u8; 32] = [0x42; 32]; // Example 32-byte key
621/// let key_aes_256 = KeyAes256::new(&raw_key_bytes)?;
622/// # Ok::<(), KeyAes256Error>(())
623/// ```
624///
625/// Handling an error for an invalid key length:
626/// ```
627/// # use google_cloud_storage::client::{KeyAes256, KeyAes256Error};
628/// let invalid_key_bytes: &[u8] = b"too_short_key"; // Less than 32 bytes
629/// let result = KeyAes256::new(invalid_key_bytes);
630///
631/// assert!(matches!(result, Err(KeyAes256Error::InvalidLength)));
632/// ```
633pub struct KeyAes256 {
634 key: [u8; 32],
635}
636
637/// Represents errors that can occur when converting to [`KeyAes256`] instances.
638///
639/// # Example:
640/// ```
641/// # use google_cloud_storage::client::{KeyAes256, KeyAes256Error};
642/// let invalid_key_bytes: &[u8] = b"too_short_key"; // Less than 32 bytes
643/// let result = KeyAes256::new(invalid_key_bytes);
644///
645/// assert!(matches!(result, Err(KeyAes256Error::InvalidLength)));
646/// ```
647#[derive(thiserror::Error, Debug)]
648#[non_exhaustive]
649pub enum KeyAes256Error {
650 /// The provided key's length was not exactly 32 bytes.
651 #[error("Key has an invalid length: expected 32 bytes.")]
652 InvalidLength,
653}
654
655impl KeyAes256 {
656 /// Attempts to create a new [KeyAes256].
657 ///
658 /// This conversion will succeed only if the input slice is exactly 32 bytes long.
659 ///
660 /// # Example
661 /// ```
662 /// # use google_cloud_storage::client::{KeyAes256, KeyAes256Error};
663 /// let raw_key_bytes: [u8; 32] = [0x42; 32]; // Example 32-byte key
664 /// let key_aes_256 = KeyAes256::new(&raw_key_bytes)?;
665 /// # Ok::<(), KeyAes256Error>(())
666 /// ```
667 pub fn new(key: &[u8]) -> std::result::Result<Self, KeyAes256Error> {
668 match key.len() {
669 32 => Ok(Self {
670 key: key[..32].try_into().unwrap(),
671 }),
672 _ => Err(KeyAes256Error::InvalidLength),
673 }
674 }
675}
676
677impl std::convert::From<KeyAes256> for crate::model::CommonObjectRequestParams {
678 fn from(value: KeyAes256) -> Self {
679 crate::model::CommonObjectRequestParams::new()
680 .set_encryption_algorithm("AES256")
681 .set_encryption_key_bytes(value.key.to_vec())
682 .set_encryption_key_sha256_bytes(Sha256::digest(value.key).as_slice().to_owned())
683 }
684}
685
686pub(crate) fn apply_customer_supplied_encryption_headers(
687 builder: reqwest::RequestBuilder,
688 common_object_request_params: &Option<crate::model::CommonObjectRequestParams>,
689) -> reqwest::RequestBuilder {
690 common_object_request_params.iter().fold(builder, |b, v| {
691 b.header(
692 "x-goog-encryption-algorithm",
693 v.encryption_algorithm.clone(),
694 )
695 .header(
696 "x-goog-encryption-key",
697 BASE64_STANDARD.encode(v.encryption_key_bytes.clone()),
698 )
699 .header(
700 "x-goog-encryption-key-sha256",
701 BASE64_STANDARD.encode(v.encryption_key_sha256_bytes.clone()),
702 )
703 })
704}
705
706#[cfg(test)]
707pub(crate) mod tests {
708 use super::*;
709 use gax::retry_result::RetryResult;
710 use std::{sync::Arc, time::Duration};
711 use test_case::test_case;
712
713 type Result = anyhow::Result<()>;
714
715 pub(crate) fn test_builder() -> ClientBuilder {
716 ClientBuilder::new()
717 .with_credentials(auth::credentials::testing::test_credentials())
718 .with_endpoint("http://private.googleapis.com")
719 .with_backoff_policy(
720 gax::exponential_backoff::ExponentialBackoffBuilder::new()
721 .with_initial_delay(Duration::from_millis(1))
722 .with_maximum_delay(Duration::from_millis(2))
723 .build()
724 .expect("hard coded policy should build correctly"),
725 )
726 }
727
728 /// This is used by the request builder tests.
729 pub(crate) fn test_inner_client(builder: ClientBuilder) -> Arc<StorageInner> {
730 let client = reqwest::Client::new();
731 Arc::new(StorageInner::new(client, builder))
732 }
733
734 /// This is used by the request builder tests.
735 pub(crate) fn create_key_helper() -> (Vec<u8>, String, Vec<u8>, String) {
736 // Make a 32-byte key.
737 let key = vec![b'a'; 32];
738 let key_base64 = BASE64_STANDARD.encode(key.clone());
739
740 let key_sha256 = Sha256::digest(key.clone());
741 let key_sha256_base64 = BASE64_STANDARD.encode(key_sha256);
742 (key, key_base64, key_sha256.to_vec(), key_sha256_base64)
743 }
744
745 #[test]
746 // This tests converting to KeyAes256 from some different types
747 // that can get converted to &[u8].
748 fn test_key_aes_256() -> Result {
749 let v_slice: &[u8] = &[b'c'; 32];
750 KeyAes256::new(v_slice)?;
751
752 let v_vec: Vec<u8> = vec![b'a'; 32];
753 KeyAes256::new(&v_vec)?;
754
755 let v_array: [u8; 32] = [b'a'; 32];
756 KeyAes256::new(&v_array)?;
757
758 let v_bytes: bytes::Bytes = bytes::Bytes::copy_from_slice(&v_array);
759 KeyAes256::new(&v_bytes)?;
760
761 Ok(())
762 }
763
764 #[test_case(&[b'a'; 0]; "no bytes")]
765 #[test_case(&[b'a'; 1]; "not enough bytes")]
766 #[test_case(&[b'a'; 33]; "too many bytes")]
767 fn test_key_aes_256_err(input: &[u8]) {
768 KeyAes256::new(input).unwrap_err();
769 }
770
771 #[test]
772 fn test_key_aes_256_to_control_model_object() -> Result {
773 let (key, _, key_sha256, _) = create_key_helper();
774 let key_aes_256 = KeyAes256::new(&key)?;
775 let params = crate::model::CommonObjectRequestParams::from(key_aes_256);
776 assert_eq!(params.encryption_algorithm, "AES256");
777 assert_eq!(params.encryption_key_bytes, key);
778 assert_eq!(params.encryption_key_sha256_bytes, key_sha256);
779 Ok(())
780 }
781
782 mockall::mock! {
783 #[derive(Debug)]
784 pub RetryThrottler {}
785
786 impl gax::retry_throttler::RetryThrottler for RetryThrottler {
787 fn throttle_retry_attempt(&self) -> bool;
788 fn on_retry_failure(&mut self, flow: &RetryResult);
789 fn on_success(&mut self);
790 }
791 }
792
793 mockall::mock! {
794 #[derive(Debug)]
795 pub RetryPolicy {}
796
797 impl gax::retry_policy::RetryPolicy for RetryPolicy {
798 fn on_error(&self, loop_start: std::time::Instant, attempt_count: u32, idempotent: bool, error: gax::error::Error) -> RetryResult;
799 }
800 }
801
802 mockall::mock! {
803 #[derive(Debug)]
804 pub BackoffPolicy {}
805
806 impl gax::backoff_policy::BackoffPolicy for BackoffPolicy {
807 fn on_failure(&self, loop_start: std::time::Instant, attempt_count: u32) -> std::time::Duration;
808 }
809 }
810
811 mockall::mock! {
812 #[derive(Debug)]
813 pub DownloadResumePolicy {}
814
815 impl crate::download_resume_policy::DownloadResumePolicy for DownloadResumePolicy {
816 fn on_error(&self, query: &crate::download_resume_policy::ResumeQuery, error: gax::error::Error) -> crate::download_resume_policy::ResumeResult;
817 }
818 }
819}