google_cloud_storage/storage/client.rs
1// Copyright 2025 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use super::request_options::RequestOptions;
16use crate::builder::storage::ReadObject;
17use crate::builder::storage::WriteObject;
18use crate::read_resume_policy::ReadResumePolicy;
19use crate::storage::bidi::OpenObject;
20use crate::storage::common_options::CommonOptions;
21use crate::streaming_source::Payload;
22use base64::Engine;
23use base64::prelude::BASE64_STANDARD;
24use gaxi::http::HttpRequestBuilder;
25use gaxi::options::{ClientConfig, Credentials};
26use google_cloud_auth::credentials::Builder as CredentialsBuilder;
27use google_cloud_gax::client_builder::{Error as BuilderError, Result as BuilderResult};
28use std::sync::Arc;
29
30/// Implements a client for the Cloud Storage API.
31///
32/// # Example
33/// ```
34/// # async fn sample() -> anyhow::Result<()> {
35/// # use google_cloud_storage::client::Storage;
36/// let client = Storage::builder().build().await?;
37/// // use `client` to make requests to Cloud Storage.
38/// # Ok(()) }
39/// ```
40///
41/// # Configuration
42///
43/// To configure `Storage` use the `with_*` methods in the type returned
44/// by [builder()][Storage::builder]. The default configuration should
45/// work for most applications. Common configuration changes include
46///
47/// * [with_endpoint()]: by default this client uses the global default endpoint
48/// (`https://storage.googleapis.com`). Applications using regional
49/// endpoints or running in restricted networks (e.g. a network configured
50/// with [Private Google Access with VPC Service Controls]) may want to
51/// override this default.
52/// * [with_credentials()]: by default this client uses
53/// [Application Default Credentials]. Applications using custom
54/// authentication may need to override this default.
55///
56/// # Pooling and Cloning
57///
58/// `Storage` holds a connection pool internally, it is advised to
59/// create one and then reuse it. You do not need to wrap `Storage` in
60/// an [Rc](std::rc::Rc) or [Arc] to reuse it, because it already uses an `Arc`
61/// internally.
62///
63/// # Service Description
64///
65/// The Cloud Storage API allows applications to read and write data through
66/// the abstractions of buckets and objects. For a description of these
67/// abstractions please see <https://cloud.google.com/storage/docs>.
68///
69/// Resources are named as follows:
70///
71/// - Projects are referred to as they are defined by the Resource Manager API,
72/// using strings like `projects/123456` or `projects/my-string-id`.
73///
74/// - Buckets are named using string names of the form:
75/// `projects/{project}/buckets/{bucket}`
76/// For globally unique buckets, `_` may be substituted for the project.
77///
78/// - Objects are uniquely identified by their name along with the name of the
79/// bucket they belong to, as separate strings in this API. For example:
80/// ```no_rust
81/// bucket = "projects/_/buckets/my-bucket"
82/// object = "my-object/with/a/folder-like/name"
83/// ```
84/// Note that object names can contain `/` characters, which are treated as
85/// any other character (no special directory semantics).
86///
87/// [with_endpoint()]: ClientBuilder::with_endpoint
88/// [with_credentials()]: ClientBuilder::with_credentials
89/// [Private Google Access with VPC Service Controls]: https://cloud.google.com/vpc-service-controls/docs/private-connectivity
90/// [Application Default Credentials]: https://cloud.google.com/docs/authentication#adc
91#[derive(Clone, Debug)]
92pub struct Storage<S = crate::stub::DefaultStorage>
93where
94 S: crate::stub::Storage + 'static,
95{
96 stub: std::sync::Arc<S>,
97 options: RequestOptions,
98}
99
100#[derive(Clone, Debug)]
101pub(crate) struct StorageInner {
102 pub client: gaxi::http::ReqwestClient,
103 pub options: RequestOptions,
104 pub grpc: gaxi::grpc::Client,
105}
106
107impl Storage {
108 /// Returns a builder for [Storage].
109 ///
110 /// # Example
111 /// ```
112 /// # use google_cloud_storage::client::Storage;
113 /// # async fn sample() -> anyhow::Result<()> {
114 /// let client = Storage::builder().build().await?;
115 /// # Ok(()) }
116 /// ```
117 pub fn builder() -> ClientBuilder {
118 ClientBuilder::new()
119 }
120}
121
122impl<S> Storage<S>
123where
124 S: crate::storage::stub::Storage + 'static,
125{
126 /// Creates a new client from the provided stub.
127 ///
128 /// The most common case for calling this function is in tests mocking the
129 /// client's behavior.
130 pub fn from_stub(stub: S) -> Self
131 where
132 S: super::stub::Storage + 'static,
133 {
134 Self {
135 stub: std::sync::Arc::new(stub),
136 options: RequestOptions::new(),
137 }
138 }
139
140 /// Write an object with data from any data source.
141 ///
142 /// # Example
143 /// ```
144 /// # use google_cloud_storage::client::Storage;
145 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
146 /// let response = client
147 /// .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
148 /// .send_buffered()
149 /// .await?;
150 /// println!("response details={response:?}");
151 /// # Ok(()) }
152 /// ```
153 ///
154 /// # Example
155 /// ```
156 /// # use google_cloud_storage::client::Storage;
157 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
158 /// let response = client
159 /// .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
160 /// .send_unbuffered()
161 /// .await?;
162 /// println!("response details={response:?}");
163 /// # Ok(()) }
164 /// ```
165 ///
166 /// You can use many different types as the payload. For example, a string,
167 /// a [bytes::Bytes], a [tokio::fs::File], or a custom type that implements
168 /// the [StreamingSource] trait.
169 ///
170 /// If your data source also implements [Seek], prefer [send_unbuffered()]
171 /// to start the write. Otherwise use [send_buffered()].
172 ///
173 /// # Parameters
174 /// * `bucket` - the bucket name containing the object. In
175 /// `projects/_/buckets/{bucket_id}` format.
176 /// * `object` - the object name.
177 /// * `payload` - the object data.
178 ///
179 /// [Seek]: crate::streaming_source::Seek
180 /// [StreamingSource]: crate::streaming_source::StreamingSource
181 /// [send_buffered()]: crate::builder::storage::WriteObject::send_buffered
182 /// [send_unbuffered()]: crate::builder::storage::WriteObject::send_unbuffered
183 pub fn write_object<B, O, T, P>(&self, bucket: B, object: O, payload: T) -> WriteObject<P, S>
184 where
185 B: Into<String>,
186 O: Into<String>,
187 T: Into<Payload<P>>,
188 {
189 WriteObject::new(
190 self.stub.clone(),
191 bucket,
192 object,
193 payload,
194 self.options.clone(),
195 )
196 }
197
198 /// Reads the contents of an object.
199 ///
200 /// # Example
201 /// ```
202 /// # use google_cloud_storage::client::Storage;
203 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
204 /// let mut resp = client
205 /// .read_object("projects/_/buckets/my-bucket", "my-object")
206 /// .send()
207 /// .await?;
208 /// let mut contents = Vec::new();
209 /// while let Some(chunk) = resp.next().await.transpose()? {
210 /// contents.extend_from_slice(&chunk);
211 /// }
212 /// println!("object contents={:?}", bytes::Bytes::from_owner(contents));
213 /// # Ok(()) }
214 /// ```
215 ///
216 /// # Parameters
217 /// * `bucket` - the bucket name containing the object. In
218 /// `projects/_/buckets/{bucket_id}` format.
219 /// * `object` - the object name.
220 pub fn read_object<B, O>(&self, bucket: B, object: O) -> ReadObject<S>
221 where
222 B: Into<String>,
223 O: Into<String>,
224 {
225 ReadObject::new(self.stub.clone(), bucket, object, self.options.clone())
226 }
227
228 /// Opens an object to read its contents using concurrent ranged reads.
229 ///
230 /// # Example
231 /// ```
232 /// # use google_cloud_storage::client::Storage;
233 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
234 /// use google_cloud_storage::model_ext::ReadRange;
235 /// let descriptor = client
236 /// .open_object("projects/_/buckets/my-bucket", "my-object")
237 /// .send()
238 /// .await?;
239 /// // Print the object metadata
240 /// println!("metadata = {:?}", descriptor.object());
241 /// // Read 2000 bytes starting at offset 1000.
242 /// let mut reader = descriptor.read_range(ReadRange::segment(1000, 2000)).await;
243 /// let mut contents = Vec::new();
244 /// while let Some(chunk) = reader.next().await.transpose()? {
245 /// contents.extend_from_slice(&chunk);
246 /// }
247 /// println!("range contents={:?}", bytes::Bytes::from_owner(contents));
248 /// // `descriptor` can be used to read more ranges, concurrently if needed.
249 /// # Ok(()) }
250 /// ```
251 ///
252 /// # Example: open and read in a single RPC
253 /// ```
254 /// # use google_cloud_storage::client::Storage;
255 /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
256 /// use google_cloud_storage::model_ext::ReadRange;
257 /// let (descriptor, mut reader) = client
258 /// .open_object("projects/_/buckets/my-bucket", "my-object")
259 /// .send_and_read(ReadRange::segment(1000, 2000))
260 /// .await?;
261 /// // `descriptor` can be used to read more ranges.
262 /// # Ok(()) }
263 /// ```
264 ///
265 /// <div class="warning">
266 /// The APIs used by this method are only enabled for some projects and
267 /// buckets. Contact your account team to enable this API.
268 /// </div>
269 ///
270 /// # Parameters
271 /// * `bucket` - the bucket name containing the object. In
272 /// `projects/_/buckets/{bucket_id}` format.
273 /// * `object` - the object name.
274 pub fn open_object<B, O>(&self, bucket: B, object: O) -> OpenObject<S>
275 where
276 B: Into<String>,
277 O: Into<String>,
278 {
279 OpenObject::new(self.stub.clone(), bucket, object, self.options.clone())
280 }
281}
282
283impl Storage {
284 pub(crate) async fn new(builder: ClientBuilder) -> BuilderResult<Self> {
285 let tracing = builder.config.tracing;
286 let inner = StorageInner::from_parts(builder).await?;
287 let options = inner.options.clone();
288 let stub = crate::storage::transport::Storage::new(Arc::new(inner), tracing);
289 Ok(Self { stub, options })
290 }
291}
292
293impl StorageInner {
294 /// Builds a client assuming `config.cred` and `config.endpoint` are initialized, panics otherwise.
295 pub(self) fn new(
296 client: gaxi::http::ReqwestClient,
297 options: RequestOptions,
298 grpc: gaxi::grpc::Client,
299 ) -> Self {
300 Self {
301 client,
302 options,
303 grpc,
304 }
305 }
306
307 pub(self) async fn from_parts(builder: ClientBuilder) -> BuilderResult<Self> {
308 let (mut config, options) = builder.into_parts()?;
309 config.disable_automatic_decompression = true;
310 config.disable_follow_redirects = true;
311
312 let client = gaxi::http::ReqwestClient::new(config.clone(), super::DEFAULT_HOST).await?;
313 #[cfg(google_cloud_unstable_tracing)]
314 let client = if gaxi::options::tracing_enabled(&config) {
315 client.with_instrumentation(&super::info::INSTRUMENTATION)
316 } else {
317 client
318 };
319 let inner = StorageInner::new(
320 client,
321 options,
322 gaxi::grpc::Client::new(config, super::DEFAULT_HOST).await?,
323 );
324 Ok(inner)
325 }
326}
327
328/// A builder for [Storage].
329///
330/// ```
331/// # use google_cloud_storage::client::Storage;
332/// # async fn sample() -> anyhow::Result<()> {
333/// let builder = Storage::builder();
334/// let client = builder
335/// .with_endpoint("https://storage.googleapis.com")
336/// .build()
337/// .await?;
338/// # Ok(()) }
339/// ```
340pub struct ClientBuilder {
341 // Common options for all clients (generated or not).
342 pub(crate) config: ClientConfig,
343 // Specific options for the storage client. `RequestOptions` also requires
344 // these, it makes sense to share them.
345 common_options: CommonOptions,
346}
347
348impl ClientBuilder {
349 pub(crate) fn new() -> Self {
350 let mut config = ClientConfig::default();
351 config.retry_policy = Some(Arc::new(crate::retry_policy::storage_default()));
352 config.backoff_policy = Some(Arc::new(crate::backoff_policy::default()));
353 {
354 let count = std::thread::available_parallelism().ok();
355 config.grpc_subchannel_count = Some(count.map(|x| x.get()).unwrap_or(1));
356 }
357 let common_options = CommonOptions::new();
358 Self {
359 config,
360 common_options,
361 }
362 }
363
364 /// Creates a new client.
365 ///
366 /// # Example
367 /// ```
368 /// # use google_cloud_storage::client::Storage;
369 /// # async fn sample() -> anyhow::Result<()> {
370 /// let client = Storage::builder().build().await?;
371 /// # Ok(()) }
372 /// ```
373 pub async fn build(self) -> BuilderResult<Storage> {
374 Storage::new(self).await
375 }
376
377 /// Sets the endpoint.
378 ///
379 /// # Example
380 /// ```
381 /// # use google_cloud_storage::client::Storage;
382 /// # async fn sample() -> anyhow::Result<()> {
383 /// let client = Storage::builder()
384 /// .with_endpoint("https://private.googleapis.com")
385 /// .build()
386 /// .await?;
387 /// # Ok(()) }
388 /// ```
389 pub fn with_endpoint<V: Into<String>>(mut self, v: V) -> Self {
390 self.config.endpoint = Some(v.into());
391 self
392 }
393
394 /// Configures the authentication credentials.
395 ///
396 /// Google Cloud Storage requires authentication for most buckets. Use this
397 /// method to change the credentials used by the client. More information
398 /// about valid credentials types can be found in the [google-cloud-auth]
399 /// crate documentation.
400 ///
401 /// # Example
402 /// ```
403 /// # use google_cloud_storage::client::Storage;
404 /// # async fn sample() -> anyhow::Result<()> {
405 /// use google_cloud_auth::credentials::mds;
406 /// let client = Storage::builder()
407 /// .with_credentials(
408 /// mds::Builder::default()
409 /// .with_scopes(["https://www.googleapis.com/auth/cloud-platform.read-only"])
410 /// .build()?)
411 /// .build()
412 /// .await?;
413 /// # Ok(()) }
414 /// ```
415 ///
416 /// [google-cloud-auth]: https://docs.rs/google-cloud-auth
417 pub fn with_credentials<V: Into<Credentials>>(mut self, v: V) -> Self {
418 self.config.cred = Some(v.into());
419 self
420 }
421
422 /// Configure the retry policy.
423 ///
424 /// The client libraries can automatically retry operations that fail. The
425 /// retry policy controls what errors are considered retryable, sets limits
426 /// on the number of attempts or the time trying to make attempts.
427 ///
428 /// # Example
429 /// ```
430 /// # use google_cloud_storage::client::Storage;
431 /// # async fn sample() -> anyhow::Result<()> {
432 /// use google_cloud_gax::retry_policy::{AlwaysRetry, RetryPolicyExt};
433 /// let client = Storage::builder()
434 /// .with_retry_policy(AlwaysRetry.with_attempt_limit(3))
435 /// .build()
436 /// .await?;
437 /// # Ok(()) }
438 /// ```
439 pub fn with_retry_policy<V: Into<google_cloud_gax::retry_policy::RetryPolicyArg>>(
440 mut self,
441 v: V,
442 ) -> Self {
443 self.config.retry_policy = Some(v.into().into());
444 self
445 }
446
447 /// Configure the retry backoff policy.
448 ///
449 /// The client libraries can automatically retry operations that fail. The
450 /// backoff policy controls how long to wait in between retry attempts.
451 ///
452 /// # Example
453 /// ```
454 /// # use google_cloud_storage::client::Storage;
455 /// # async fn sample() -> anyhow::Result<()> {
456 /// use google_cloud_gax::exponential_backoff::ExponentialBackoff;
457 /// use std::time::Duration;
458 /// let policy = ExponentialBackoff::default();
459 /// let client = Storage::builder()
460 /// .with_backoff_policy(policy)
461 /// .build()
462 /// .await?;
463 /// # Ok(()) }
464 /// ```
465 pub fn with_backoff_policy<V: Into<google_cloud_gax::backoff_policy::BackoffPolicyArg>>(
466 mut self,
467 v: V,
468 ) -> Self {
469 self.config.backoff_policy = Some(v.into().into());
470 self
471 }
472
473 /// Configure the retry throttler.
474 ///
475 /// Advanced applications may want to configure a retry throttler to
476 /// [Address Cascading Failures] and when [Handling Overload] conditions.
477 /// The client libraries throttle their retry loop, using a policy to
478 /// control the throttling algorithm. Use this method to fine tune or
479 /// customize the default retry throtler.
480 ///
481 /// [Handling Overload]: https://sre.google/sre-book/handling-overload/
482 /// [Address Cascading Failures]: https://sre.google/sre-book/addressing-cascading-failures/
483 ///
484 /// # Example
485 /// ```
486 /// # use google_cloud_storage::client::Storage;
487 /// # async fn sample() -> anyhow::Result<()> {
488 /// use google_cloud_gax::retry_throttler::AdaptiveThrottler;
489 /// let client = Storage::builder()
490 /// .with_retry_throttler(AdaptiveThrottler::default())
491 /// .build()
492 /// .await?;
493 /// # Ok(()) }
494 /// ```
495 pub fn with_retry_throttler<V: Into<google_cloud_gax::retry_throttler::RetryThrottlerArg>>(
496 mut self,
497 v: V,
498 ) -> Self {
499 self.config.retry_throttler = v.into().into();
500 self
501 }
502
503 /// Sets the payload size threshold to switch from single-shot to resumable uploads.
504 ///
505 /// # Example
506 /// ```
507 /// # use google_cloud_storage::client::Storage;
508 /// # async fn sample() -> anyhow::Result<()> {
509 /// let client = Storage::builder()
510 /// .with_resumable_upload_threshold(0_usize) // Forces a resumable upload.
511 /// .build()
512 /// .await?;
513 /// let response = client
514 /// .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
515 /// .send_buffered()
516 /// .await?;
517 /// println!("response details={response:?}");
518 /// # Ok(()) }
519 /// ```
520 ///
521 /// The client library can write objects using [single-shot] or [resumable]
522 /// uploads. For small objects, single-shot uploads offer better
523 /// performance, as they require a single HTTP transfer. For larger objects,
524 /// the additional request latency is not significant, and resumable uploads
525 /// offer better recovery on errors.
526 ///
527 /// The library automatically selects resumable uploads when the payload is
528 /// equal to or larger than this option. For smaller writes the client
529 /// library uses single-shot uploads.
530 ///
531 /// The exact threshold depends on where the application is deployed and
532 /// destination bucket location with respect to where the application is
533 /// running. The library defaults should work well in most cases, but some
534 /// applications may benefit from fine-tuning.
535 ///
536 /// [single-shot]: https://cloud.google.com/storage/docs/uploading-objects
537 /// [resumable]: https://cloud.google.com/storage/docs/resumable-uploads
538 pub fn with_resumable_upload_threshold<V: Into<usize>>(mut self, v: V) -> Self {
539 self.common_options.resumable_upload_threshold = v.into();
540 self
541 }
542
543 /// Changes the buffer size for some resumable uploads.
544 ///
545 /// # Example
546 /// ```
547 /// # use google_cloud_storage::client::Storage;
548 /// # async fn sample() -> anyhow::Result<()> {
549 /// let client = Storage::builder()
550 /// .with_resumable_upload_buffer_size(32 * 1024 * 1024_usize)
551 /// .build()
552 /// .await?;
553 /// let response = client
554 /// .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
555 /// .send_buffered()
556 /// .await?;
557 /// println!("response details={response:?}");
558 /// # Ok(()) }
559 /// ```
560 ///
561 /// When performing [resumable uploads] from sources without [Seek] the
562 /// client library needs to buffer data in memory until it is persisted by
563 /// the service. Otherwise the data would be lost if the upload is
564 /// interrupted. Applications may want to tune this buffer size:
565 ///
566 /// - Use smaller buffer sizes to support more concurrent writes in the
567 /// same application.
568 /// - Use larger buffer sizes for better throughput. Sending many small
569 /// buffers stalls the writer until the client receives a successful
570 /// response from the service.
571 ///
572 /// Keep in mind that there are diminishing returns on using larger buffers.
573 ///
574 /// [resumable uploads]: https://cloud.google.com/storage/docs/resumable-uploads
575 /// [Seek]: crate::streaming_source::Seek
576 pub fn with_resumable_upload_buffer_size<V: Into<usize>>(mut self, v: V) -> Self {
577 self.common_options.resumable_upload_buffer_size = v.into();
578 self
579 }
580
581 /// Configure the resume policy for object reads.
582 ///
583 /// The Cloud Storage client library can automatically resume a read request
584 /// that is interrupted by a transient error. Applications may want to
585 /// limit the number of read attempts, or may wish to expand the type
586 /// of errors treated as retryable.
587 ///
588 /// # Example
589 /// ```
590 /// # use google_cloud_storage::client::Storage;
591 /// # async fn sample() -> anyhow::Result<()> {
592 /// use google_cloud_storage::read_resume_policy::{AlwaysResume, ReadResumePolicyExt};
593 /// let client = Storage::builder()
594 /// .with_read_resume_policy(AlwaysResume.with_attempt_limit(3))
595 /// .build()
596 /// .await?;
597 /// # Ok(()) }
598 /// ```
599 pub fn with_read_resume_policy<V>(mut self, v: V) -> Self
600 where
601 V: ReadResumePolicy + 'static,
602 {
603 self.common_options.read_resume_policy = Arc::new(v);
604 self
605 }
606
607 /// Configure the number of subchannels used by the client.
608 ///
609 /// # Example
610 /// ```
611 /// # use google_cloud_storage::client::Storage;
612 /// # async fn sample() -> anyhow::Result<()> {
613 /// // By default the client uses `count` subchannels.
614 /// let count = std::thread::available_parallelism()?.get();
615 /// let client = Storage::builder()
616 /// .with_grpc_subchannel_count(std::cmp::max(1, count / 2))
617 /// .build()
618 /// .await?;
619 /// # Ok(()) }
620 /// ```
621 ///
622 /// gRPC-based clients may exhibit high latency if many requests need to be
623 /// demuxed over a single HTTP/2 connection (often called a *subchannel* in gRPC).
624 /// Consider using more subchannels if your application makes many
625 /// concurrent requests. Consider using fewer subchannels if your
626 /// application needs the file descriptors for other purposes.
627 ///
628 /// Keep in mind that Google Cloud limits the number of concurrent RPCs in
629 /// a single connection to about 100.
630 pub fn with_grpc_subchannel_count(mut self, v: usize) -> Self {
631 self.config.grpc_subchannel_count = Some(v);
632 self
633 }
634
635 /// Enable debug logs and traces using the [tracing] framework.
636 ///
637 /// <div class="warning">
638 ///
639 /// Traces at any level may contain sensitive data like bucket names, object
640 /// names, full URLs and error messages. Traces at the `INFO` level follow
641 /// [OpenTelemetry Semantic Conventions] with additional Storage attributes,
642 /// and are intended to be suitable for production monitoring. Traces at
643 /// the `DEBUG` level or lower are meant for detailed debugging and include
644 /// the full content of requests, responses, and the debug information for
645 /// the client.
646 ///
647 /// Review the contents of the traces and consult the [tracing]
648 /// framework documentation to set up filters and formatters to prevent
649 /// leaking sensitive information, depending on your intended use case.
650 ///
651 /// [OpenTelemetry Semantic Conventions]: https://opentelemetry.io/docs/concepts/semantic-conventions/
652 /// </div>
653 ///
654 /// # Example
655 /// ```
656 /// // In your `main` function enable a tracing subscriber, for example:
657 /// // tracing_subscriber::fmt::init();
658 /// # use google_cloud_storage::client::Storage;
659 /// # async fn sample() -> anyhow::Result<()> {
660 /// let client = Storage::builder()
661 /// .with_tracing()
662 /// .build()
663 /// .await?;
664 /// # Ok(()) }
665 /// ```
666 ///
667 /// # More information
668 ///
669 /// The [Enable logging] guide shows you how to initialize a subscriber to
670 /// log events to the console.
671 ///
672 /// [Enable logging]: https://docs.cloud.google.com/rust/enable-logging
673 /// [tracing]: https://docs.rs/tracing
674 pub fn with_tracing(mut self) -> Self {
675 self.config.tracing = true;
676 self
677 }
678
679 pub(crate) fn apply_default_credentials(&mut self) -> BuilderResult<()> {
680 if self.config.cred.is_some() {
681 return Ok(());
682 };
683 let default = CredentialsBuilder::default()
684 .build()
685 .map_err(BuilderError::cred)?;
686 self.config.cred = Some(default);
687 Ok(())
688 }
689
690 pub(crate) fn apply_default_endpoint(&mut self) -> BuilderResult<()> {
691 let _ = self
692 .config
693 .endpoint
694 .get_or_insert_with(|| super::DEFAULT_HOST.to_string());
695 Ok(())
696 }
697
698 // Breaks the builder into its parts, with defaults applied.
699 pub(crate) fn into_parts(
700 mut self,
701 ) -> google_cloud_gax::client_builder::Result<(ClientConfig, RequestOptions)> {
702 self.apply_default_credentials()?;
703 self.apply_default_endpoint()?;
704 let request_options =
705 RequestOptions::new_with_client_config(&self.config, self.common_options);
706 Ok((self.config, request_options))
707 }
708}
709
710/// The set of characters that are percent encoded.
711///
712/// This set is defined at https://cloud.google.com/storage/docs/request-endpoints#encoding:
713///
714/// Encode the following characters when they appear in either the object name
715/// or query string of a request URL:
716/// !, #, $, &, ', (, ), *, +, ,, /, :, ;, =, ?, @, [, ], and space characters.
717pub(crate) const ENCODED_CHARS: percent_encoding::AsciiSet = percent_encoding::CONTROLS
718 .add(b'!')
719 .add(b'#')
720 .add(b'$')
721 .add(b'&')
722 .add(b'\'')
723 .add(b'(')
724 .add(b')')
725 .add(b'*')
726 .add(b'+')
727 .add(b',')
728 .add(b'/')
729 .add(b':')
730 .add(b';')
731 .add(b'=')
732 .add(b'?')
733 .add(b'@')
734 .add(b'[')
735 .add(b']')
736 .add(b' ');
737
738/// Percent encode a string.
739///
740/// To ensure compatibility certain characters need to be encoded when they appear
741/// in either the object name or query string of a request URL.
742pub(crate) fn enc(value: &str) -> String {
743 percent_encoding::utf8_percent_encode(value, &ENCODED_CHARS).to_string()
744}
745
746pub(crate) fn apply_customer_supplied_encryption_headers(
747 builder: HttpRequestBuilder,
748 common_object_request_params: &Option<crate::model::CommonObjectRequestParams>,
749) -> HttpRequestBuilder {
750 common_object_request_params.iter().fold(builder, |b, v| {
751 b.header(
752 "x-goog-encryption-algorithm",
753 v.encryption_algorithm.clone(),
754 )
755 .header(
756 "x-goog-encryption-key",
757 BASE64_STANDARD.encode(v.encryption_key_bytes.clone()),
758 )
759 .header(
760 "x-goog-encryption-key-sha256",
761 BASE64_STANDARD.encode(v.encryption_key_sha256_bytes.clone()),
762 )
763 })
764}
765
766#[cfg(test)]
767pub(crate) mod tests {
768 use super::*;
769 use google_cloud_auth::credentials::anonymous::Builder as Anonymous;
770 use google_cloud_gax::retry_result::RetryResult;
771 use google_cloud_gax::retry_state::RetryState;
772 use std::{sync::Arc, time::Duration};
773
774 #[test]
775 fn default_settings() {
776 let builder = ClientBuilder::new().with_credentials(Anonymous::new().build());
777 let config = builder.config;
778 assert!(config.retry_policy.is_some(), "{config:?}");
779 assert!(config.backoff_policy.is_some(), "{config:?}");
780 {
781 assert!(
782 config.grpc_subchannel_count.is_some_and(|v| v >= 1),
783 "{config:?}"
784 );
785 }
786 }
787
788 #[test]
789 fn subchannel_count() {
790 let builder = ClientBuilder::new()
791 .with_credentials(Anonymous::new().build())
792 .with_grpc_subchannel_count(42);
793 let config = builder.config;
794 assert!(
795 config.grpc_subchannel_count.is_some_and(|v| v == 42),
796 "{config:?}"
797 );
798 }
799
800 pub(crate) fn test_builder() -> ClientBuilder {
801 ClientBuilder::new()
802 .with_credentials(Anonymous::new().build())
803 .with_endpoint("http://private.googleapis.com")
804 .with_backoff_policy(
805 google_cloud_gax::exponential_backoff::ExponentialBackoffBuilder::new()
806 .with_initial_delay(Duration::from_millis(1))
807 .with_maximum_delay(Duration::from_millis(2))
808 .build()
809 .expect("hard coded policy should build correctly"),
810 )
811 }
812
813 /// This is used by the request builder tests.
814 pub(crate) async fn test_inner_client(builder: ClientBuilder) -> Arc<StorageInner> {
815 let inner = StorageInner::from_parts(builder)
816 .await
817 .expect("creating an test inner client succeeds");
818 Arc::new(inner)
819 }
820
821 mockall::mock! {
822 #[derive(Debug)]
823 pub RetryThrottler {}
824
825 impl google_cloud_gax::retry_throttler::RetryThrottler for RetryThrottler {
826 fn throttle_retry_attempt(&self) -> bool;
827 fn on_retry_failure(&mut self, flow: &RetryResult);
828 fn on_success(&mut self);
829 }
830 }
831
832 mockall::mock! {
833 #[derive(Debug)]
834 pub RetryPolicy {}
835
836 impl google_cloud_gax::retry_policy::RetryPolicy for RetryPolicy {
837 fn on_error(&self, state: &RetryState, error: google_cloud_gax::error::Error) -> RetryResult;
838 }
839 }
840
841 mockall::mock! {
842 #[derive(Debug)]
843 pub BackoffPolicy {}
844
845 impl google_cloud_gax::backoff_policy::BackoffPolicy for BackoffPolicy {
846 fn on_failure(&self, state: &RetryState) -> std::time::Duration;
847 }
848 }
849
850 mockall::mock! {
851 #[derive(Debug)]
852 pub ReadResumePolicy {}
853
854 impl crate::read_resume_policy::ReadResumePolicy for ReadResumePolicy {
855 fn on_error(&self, query: &crate::read_resume_policy::ResumeQuery, error: google_cloud_gax::error::Error) -> crate::read_resume_policy::ResumeResult;
856 }
857 }
858}