Skip to main content

google_cloud_storage/storage/
client.rs

1// Copyright 2025 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use super::request_options::RequestOptions;
16use crate::builder::storage::ReadObject;
17use crate::builder::storage::WriteObject;
18use crate::read_resume_policy::ReadResumePolicy;
19use crate::storage::bidi::OpenObject;
20use crate::storage::common_options::CommonOptions;
21use crate::streaming_source::Payload;
22use base64::Engine;
23use base64::prelude::BASE64_STANDARD;
24use gaxi::http::HttpRequestBuilder;
25use gaxi::options::{ClientConfig, Credentials};
26use google_cloud_auth::credentials::Builder as CredentialsBuilder;
27use google_cloud_gax::client_builder::{Error as BuilderError, Result as BuilderResult};
28use std::sync::Arc;
29
30/// Implements a client for the Cloud Storage API.
31///
32/// # Example
33/// ```
34/// # async fn sample() -> anyhow::Result<()> {
35/// # use google_cloud_storage::client::Storage;
36/// let client = Storage::builder().build().await?;
37/// // use `client` to make requests to Cloud Storage.
38/// # Ok(()) }
39/// ```
40///
41/// # Configuration
42///
43/// To configure `Storage` use the `with_*` methods in the type returned
44/// by [builder()][Storage::builder]. The default configuration should
45/// work for most applications. Common configuration changes include
46///
47/// * [with_endpoint()]: by default this client uses the global default endpoint
48///   (`https://storage.googleapis.com`). Applications using regional
49///   endpoints or running in restricted networks (e.g. a network configured
50///   with [Private Google Access with VPC Service Controls]) may want to
51///   override this default.
52/// * [with_credentials()]: by default this client uses
53///   [Application Default Credentials]. Applications using custom
54///   authentication may need to override this default.
55///
56/// # Pooling and Cloning
57///
58/// `Storage` holds a connection pool internally, it is advised to
59/// create one and then reuse it.  You do not need to wrap `Storage` in
60/// an [Rc](std::rc::Rc) or [Arc] to reuse it, because it already uses an `Arc`
61/// internally.
62///
63/// # Service Description
64///
65/// The Cloud Storage API allows applications to read and write data through
66/// the abstractions of buckets and objects. For a description of these
67/// abstractions please see <https://cloud.google.com/storage/docs>.
68///
69/// Resources are named as follows:
70///
71/// - Projects are referred to as they are defined by the Resource Manager API,
72///   using strings like `projects/123456` or `projects/my-string-id`.
73///
74/// - Buckets are named using string names of the form:
75///   `projects/{project}/buckets/{bucket}`
76///   For globally unique buckets, `_` may be substituted for the project.
77///
78/// - Objects are uniquely identified by their name along with the name of the
79///   bucket they belong to, as separate strings in this API. For example:
80///   ```no_rust
81///   bucket = "projects/_/buckets/my-bucket"
82///   object = "my-object/with/a/folder-like/name"
83///   ```
84///   Note that object names can contain `/` characters, which are treated as
85///   any other character (no special directory semantics).
86///
87/// [with_endpoint()]: ClientBuilder::with_endpoint
88/// [with_credentials()]: ClientBuilder::with_credentials
89/// [Private Google Access with VPC Service Controls]: https://cloud.google.com/vpc-service-controls/docs/private-connectivity
90/// [Application Default Credentials]: https://cloud.google.com/docs/authentication#adc
91#[derive(Clone, Debug)]
92pub struct Storage<S = crate::stub::DefaultStorage>
93where
94    S: crate::stub::Storage + 'static,
95{
96    stub: std::sync::Arc<S>,
97    options: RequestOptions,
98}
99
100#[derive(Clone, Debug)]
101pub(crate) struct StorageInner {
102    pub client: gaxi::http::ReqwestClient,
103    pub options: RequestOptions,
104    pub grpc: gaxi::grpc::Client,
105}
106
107impl Storage {
108    /// Returns a builder for [Storage].
109    ///
110    /// # Example
111    /// ```
112    /// # use google_cloud_storage::client::Storage;
113    /// # async fn sample() -> anyhow::Result<()> {
114    /// let client = Storage::builder().build().await?;
115    /// # Ok(()) }
116    /// ```
117    pub fn builder() -> ClientBuilder {
118        ClientBuilder::new()
119    }
120}
121
122impl<S> Storage<S>
123where
124    S: crate::storage::stub::Storage + 'static,
125{
126    /// Creates a new client from the provided stub.
127    ///
128    /// The most common case for calling this function is in tests mocking the
129    /// client's behavior.
130    pub fn from_stub(stub: impl Into<std::sync::Arc<S>>) -> Self {
131        Self {
132            stub: stub.into(),
133            options: RequestOptions::new(),
134        }
135    }
136
137    /// Write an object with data from any data source.
138    ///
139    /// # Example
140    /// ```
141    /// # use google_cloud_storage::client::Storage;
142    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
143    /// let response = client
144    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
145    ///     .send_buffered()
146    ///     .await?;
147    /// println!("response details={response:?}");
148    /// # Ok(()) }
149    /// ```
150    ///
151    /// # Example
152    /// ```
153    /// # use google_cloud_storage::client::Storage;
154    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
155    /// let response = client
156    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
157    ///     .send_unbuffered()
158    ///     .await?;
159    /// println!("response details={response:?}");
160    /// # Ok(()) }
161    /// ```
162    ///
163    /// You can use many different types as the payload. For example, a string,
164    /// a [bytes::Bytes], a [tokio::fs::File], or a custom type that implements
165    /// the [StreamingSource] trait.
166    ///
167    /// If your data source also implements [Seek], prefer [send_unbuffered()]
168    /// to start the write. Otherwise use [send_buffered()].
169    ///
170    /// # Parameters
171    /// * `bucket` - the bucket name containing the object. In
172    ///   `projects/_/buckets/{bucket_id}` format.
173    /// * `object` - the object name.
174    /// * `payload` - the object data.
175    ///
176    /// [Seek]: crate::streaming_source::Seek
177    /// [StreamingSource]: crate::streaming_source::StreamingSource
178    /// [send_buffered()]: crate::builder::storage::WriteObject::send_buffered
179    /// [send_unbuffered()]: crate::builder::storage::WriteObject::send_unbuffered
180    pub fn write_object<B, O, T, P>(&self, bucket: B, object: O, payload: T) -> WriteObject<P, S>
181    where
182        B: Into<String>,
183        O: Into<String>,
184        T: Into<Payload<P>>,
185    {
186        WriteObject::new(
187            self.stub.clone(),
188            bucket,
189            object,
190            payload,
191            self.options.clone(),
192        )
193    }
194
195    /// Reads the contents of an object.
196    ///
197    /// # Example
198    /// ```
199    /// # use google_cloud_storage::client::Storage;
200    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
201    /// let mut resp = client
202    ///     .read_object("projects/_/buckets/my-bucket", "my-object")
203    ///     .send()
204    ///     .await?;
205    /// let mut contents = Vec::new();
206    /// while let Some(chunk) = resp.next().await.transpose()? {
207    ///   contents.extend_from_slice(&chunk);
208    /// }
209    /// println!("object contents={:?}", bytes::Bytes::from_owner(contents));
210    /// # Ok(()) }
211    /// ```
212    ///
213    /// # Parameters
214    /// * `bucket` - the bucket name containing the object. In
215    ///   `projects/_/buckets/{bucket_id}` format.
216    /// * `object` - the object name.
217    pub fn read_object<B, O>(&self, bucket: B, object: O) -> ReadObject<S>
218    where
219        B: Into<String>,
220        O: Into<String>,
221    {
222        ReadObject::new(self.stub.clone(), bucket, object, self.options.clone())
223    }
224
225    /// Opens an object to read its contents using concurrent ranged reads.
226    ///
227    /// # Example
228    /// ```
229    /// # use google_cloud_storage::client::Storage;
230    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
231    /// use google_cloud_storage::model_ext::ReadRange;
232    /// let descriptor = client
233    ///     .open_object("projects/_/buckets/my-bucket", "my-object")
234    ///     .send()
235    ///     .await?;
236    /// // Print the object metadata
237    /// println!("metadata = {:?}", descriptor.object());
238    /// // Read 2000 bytes starting at offset 1000.
239    /// let mut reader = descriptor.read_range(ReadRange::segment(1000, 2000)).await;
240    /// let mut contents = Vec::new();
241    /// while let Some(chunk) = reader.next().await.transpose()? {
242    ///   contents.extend_from_slice(&chunk);
243    /// }
244    /// println!("range contents={:?}", bytes::Bytes::from_owner(contents));
245    /// // `descriptor` can be used to read more ranges, concurrently if needed.
246    /// # Ok(()) }
247    /// ```
248    ///
249    /// # Example: open and read in a single RPC
250    /// ```
251    /// # use google_cloud_storage::client::Storage;
252    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
253    /// use google_cloud_storage::model_ext::ReadRange;
254    /// let (descriptor, mut reader) = client
255    ///     .open_object("projects/_/buckets/my-bucket", "my-object")
256    ///     .send_and_read(ReadRange::segment(1000, 2000))
257    ///     .await?;
258    /// // `descriptor` can be used to read more ranges.
259    /// # Ok(()) }
260    /// ```
261    ///
262    /// <div class="warning">
263    /// The APIs used by this method are only enabled for some projects and
264    /// buckets. Contact your account team to enable this API.
265    /// </div>
266    ///
267    /// # Parameters
268    /// * `bucket` - the bucket name containing the object. In
269    ///   `projects/_/buckets/{bucket_id}` format.
270    /// * `object` - the object name.
271    pub fn open_object<B, O>(&self, bucket: B, object: O) -> OpenObject<S>
272    where
273        B: Into<String>,
274        O: Into<String>,
275    {
276        OpenObject::new(self.stub.clone(), bucket, object, self.options.clone())
277    }
278}
279
280impl Storage {
281    pub(crate) async fn new(builder: ClientBuilder) -> BuilderResult<Self> {
282        let tracing = builder.config.tracing;
283        let inner = StorageInner::from_parts(builder).await?;
284        let options = inner.options.clone();
285        let stub = crate::storage::transport::Storage::new(Arc::new(inner), tracing);
286        Ok(Self { stub, options })
287    }
288}
289
290impl StorageInner {
291    /// Builds a client assuming `config.cred` and `config.endpoint` are initialized, panics otherwise.
292    pub(self) fn new(
293        client: gaxi::http::ReqwestClient,
294        options: RequestOptions,
295        grpc: gaxi::grpc::Client,
296    ) -> Self {
297        Self {
298            client,
299            options,
300            grpc,
301        }
302    }
303
304    pub(self) async fn from_parts(builder: ClientBuilder) -> BuilderResult<Self> {
305        let (mut config, options) = builder.into_parts()?;
306        config.disable_automatic_decompression = true;
307        config.disable_follow_redirects = true;
308
309        let client = gaxi::http::ReqwestClient::new(config.clone(), super::DEFAULT_HOST).await?;
310        let client = if gaxi::options::tracing_enabled(&config) {
311            client.with_instrumentation(&super::info::INSTRUMENTATION)
312        } else {
313            client
314        };
315        let grpc = if gaxi::options::tracing_enabled(&config) {
316            gaxi::grpc::Client::new_with_instrumentation(
317                config,
318                super::DEFAULT_HOST,
319                &super::info::INSTRUMENTATION,
320            )
321            .await?
322        } else {
323            gaxi::grpc::Client::new(config, super::DEFAULT_HOST).await?
324        };
325
326        let inner = StorageInner::new(client, options, grpc);
327        Ok(inner)
328    }
329}
330
331/// A builder for [Storage].
332///
333/// ```
334/// # use google_cloud_storage::client::Storage;
335/// # async fn sample() -> anyhow::Result<()> {
336/// let builder = Storage::builder();
337/// let client = builder
338///     .with_endpoint("https://storage.googleapis.com")
339///     .build()
340///     .await?;
341/// # Ok(()) }
342/// ```
343pub struct ClientBuilder {
344    // Common options for all clients (generated or not).
345    pub(crate) config: ClientConfig,
346    // Specific options for the storage client. `RequestOptions` also requires
347    // these, it makes sense to share them.
348    common_options: CommonOptions,
349}
350
351impl ClientBuilder {
352    pub(crate) fn new() -> Self {
353        let mut config = ClientConfig::default();
354        config.retry_policy = Some(Arc::new(crate::retry_policy::storage_default()));
355        config.backoff_policy = Some(Arc::new(crate::backoff_policy::default()));
356        {
357            let count = std::thread::available_parallelism().ok();
358            config.grpc_subchannel_count = Some(count.map(|x| x.get()).unwrap_or(1));
359        }
360        let common_options = CommonOptions::new();
361        Self {
362            config,
363            common_options,
364        }
365    }
366
367    /// Creates a new client.
368    ///
369    /// # Example
370    /// ```
371    /// # use google_cloud_storage::client::Storage;
372    /// # async fn sample() -> anyhow::Result<()> {
373    /// let client = Storage::builder().build().await?;
374    /// # Ok(()) }
375    /// ```
376    pub async fn build(self) -> BuilderResult<Storage> {
377        Storage::new(self).await
378    }
379
380    /// Sets the endpoint.
381    ///
382    /// # Example
383    /// ```
384    /// # use google_cloud_storage::client::Storage;
385    /// # async fn sample() -> anyhow::Result<()> {
386    /// let client = Storage::builder()
387    ///     .with_endpoint("https://private.googleapis.com")
388    ///     .build()
389    ///     .await?;
390    /// # Ok(()) }
391    /// ```
392    pub fn with_endpoint<V: Into<String>>(mut self, v: V) -> Self {
393        self.config.endpoint = Some(v.into());
394        self
395    }
396
397    /// Configures the authentication credentials.
398    ///
399    /// Google Cloud Storage requires authentication for most buckets. Use this
400    /// method to change the credentials used by the client. More information
401    /// about valid credentials types can be found in the [google-cloud-auth]
402    /// crate documentation.
403    ///
404    /// # Example
405    /// ```
406    /// # use google_cloud_storage::client::Storage;
407    /// # async fn sample() -> anyhow::Result<()> {
408    /// use google_cloud_auth::credentials::mds;
409    /// let client = Storage::builder()
410    ///     .with_credentials(
411    ///         mds::Builder::default()
412    ///             .with_scopes(["https://www.googleapis.com/auth/cloud-platform.read-only"])
413    ///             .build()?)
414    ///     .build()
415    ///     .await?;
416    /// # Ok(()) }
417    /// ```
418    ///
419    /// [google-cloud-auth]: https://docs.rs/google-cloud-auth
420    pub fn with_credentials<V: Into<Credentials>>(mut self, v: V) -> Self {
421        self.config.cred = Some(v.into());
422        self
423    }
424
425    /// Configure the retry policy.
426    ///
427    /// The client libraries can automatically retry operations that fail. The
428    /// retry policy controls what errors are considered retryable, sets limits
429    /// on the number of attempts or the time trying to make attempts.
430    ///
431    /// # Example
432    /// ```
433    /// # use google_cloud_storage::client::Storage;
434    /// # async fn sample() -> anyhow::Result<()> {
435    /// use google_cloud_gax::retry_policy::{AlwaysRetry, RetryPolicyExt};
436    /// let client = Storage::builder()
437    ///     .with_retry_policy(AlwaysRetry.with_attempt_limit(3))
438    ///     .build()
439    ///     .await?;
440    /// # Ok(()) }
441    /// ```
442    pub fn with_retry_policy<V: Into<google_cloud_gax::retry_policy::RetryPolicyArg>>(
443        mut self,
444        v: V,
445    ) -> Self {
446        self.config.retry_policy = Some(v.into().into());
447        self
448    }
449
450    /// Configure the retry backoff policy.
451    ///
452    /// The client libraries can automatically retry operations that fail. The
453    /// backoff policy controls how long to wait in between retry attempts.
454    ///
455    /// # Example
456    /// ```
457    /// # use google_cloud_storage::client::Storage;
458    /// # async fn sample() -> anyhow::Result<()> {
459    /// use google_cloud_gax::exponential_backoff::ExponentialBackoff;
460    /// use std::time::Duration;
461    /// let policy = ExponentialBackoff::default();
462    /// let client = Storage::builder()
463    ///     .with_backoff_policy(policy)
464    ///     .build()
465    ///     .await?;
466    /// # Ok(()) }
467    /// ```
468    pub fn with_backoff_policy<V: Into<google_cloud_gax::backoff_policy::BackoffPolicyArg>>(
469        mut self,
470        v: V,
471    ) -> Self {
472        self.config.backoff_policy = Some(v.into().into());
473        self
474    }
475
476    /// Configure the retry throttler.
477    ///
478    /// Advanced applications may want to configure a retry throttler to
479    /// [Address Cascading Failures] and when [Handling Overload] conditions.
480    /// The client libraries throttle their retry loop, using a policy to
481    /// control the throttling algorithm. Use this method to fine tune or
482    /// customize the default retry throtler.
483    ///
484    /// [Handling Overload]: https://sre.google/sre-book/handling-overload/
485    /// [Address Cascading Failures]: https://sre.google/sre-book/addressing-cascading-failures/
486    ///
487    /// # Example
488    /// ```
489    /// # use google_cloud_storage::client::Storage;
490    /// # async fn sample() -> anyhow::Result<()> {
491    /// use google_cloud_gax::retry_throttler::AdaptiveThrottler;
492    /// let client = Storage::builder()
493    ///     .with_retry_throttler(AdaptiveThrottler::default())
494    ///     .build()
495    ///     .await?;
496    /// # Ok(()) }
497    /// ```
498    pub fn with_retry_throttler<V: Into<google_cloud_gax::retry_throttler::RetryThrottlerArg>>(
499        mut self,
500        v: V,
501    ) -> Self {
502        self.config.retry_throttler = v.into().into();
503        self
504    }
505
506    /// Sets the payload size threshold to switch from single-shot to resumable uploads.
507    ///
508    /// # Example
509    /// ```
510    /// # use google_cloud_storage::client::Storage;
511    /// # async fn sample() -> anyhow::Result<()> {
512    /// let client = Storage::builder()
513    ///     .with_resumable_upload_threshold(0_usize) // Forces a resumable upload.
514    ///     .build()
515    ///     .await?;
516    /// let response = client
517    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
518    ///     .send_buffered()
519    ///     .await?;
520    /// println!("response details={response:?}");
521    /// # Ok(()) }
522    /// ```
523    ///
524    /// The client library can write objects using [single-shot] or [resumable]
525    /// uploads. For small objects, single-shot uploads offer better
526    /// performance, as they require a single HTTP transfer. For larger objects,
527    /// the additional request latency is not significant, and resumable uploads
528    /// offer better recovery on errors.
529    ///
530    /// The library automatically selects resumable uploads when the payload is
531    /// equal to or larger than this option. For smaller writes the client
532    /// library uses single-shot uploads.
533    ///
534    /// The exact threshold depends on where the application is deployed and
535    /// destination bucket location with respect to where the application is
536    /// running. The library defaults should work well in most cases, but some
537    /// applications may benefit from fine-tuning.
538    ///
539    /// [single-shot]: https://cloud.google.com/storage/docs/uploading-objects
540    /// [resumable]: https://cloud.google.com/storage/docs/resumable-uploads
541    pub fn with_resumable_upload_threshold<V: Into<usize>>(mut self, v: V) -> Self {
542        self.common_options.resumable_upload_threshold = v.into();
543        self
544    }
545
546    /// Changes the buffer size for some resumable uploads.
547    ///
548    /// # Example
549    /// ```
550    /// # use google_cloud_storage::client::Storage;
551    /// # async fn sample() -> anyhow::Result<()> {
552    /// let client = Storage::builder()
553    ///     .with_resumable_upload_buffer_size(32 * 1024 * 1024_usize)
554    ///     .build()
555    ///     .await?;
556    /// let response = client
557    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
558    ///     .send_buffered()
559    ///     .await?;
560    /// println!("response details={response:?}");
561    /// # Ok(()) }
562    /// ```
563    ///
564    /// When performing [resumable uploads] from sources without [Seek] the
565    /// client library needs to buffer data in memory until it is persisted by
566    /// the service. Otherwise the data would be lost if the upload is
567    /// interrupted. Applications may want to tune this buffer size:
568    ///
569    /// - Use smaller buffer sizes to support more concurrent writes in the
570    ///   same application.
571    /// - Use larger buffer sizes for better throughput. Sending many small
572    ///   buffers stalls the writer until the client receives a successful
573    ///   response from the service.
574    ///
575    /// Keep in mind that there are diminishing returns on using larger buffers.
576    ///
577    /// [resumable uploads]: https://cloud.google.com/storage/docs/resumable-uploads
578    /// [Seek]: crate::streaming_source::Seek
579    pub fn with_resumable_upload_buffer_size<V: Into<usize>>(mut self, v: V) -> Self {
580        self.common_options.resumable_upload_buffer_size = v.into();
581        self
582    }
583
584    /// Configure the resume policy for object reads.
585    ///
586    /// The Cloud Storage client library can automatically resume a read request
587    /// that is interrupted by a transient error. Applications may want to
588    /// limit the number of read attempts, or may wish to expand the type
589    /// of errors treated as retryable.
590    ///
591    /// # Example
592    /// ```
593    /// # use google_cloud_storage::client::Storage;
594    /// # async fn sample() -> anyhow::Result<()> {
595    /// use google_cloud_storage::read_resume_policy::{AlwaysResume, ReadResumePolicyExt};
596    /// let client = Storage::builder()
597    ///     .with_read_resume_policy(AlwaysResume.with_attempt_limit(3))
598    ///     .build()
599    ///     .await?;
600    /// # Ok(()) }
601    /// ```
602    pub fn with_read_resume_policy<V>(mut self, v: V) -> Self
603    where
604        V: ReadResumePolicy + 'static,
605    {
606        self.common_options.read_resume_policy = Arc::new(v);
607        self
608    }
609
610    /// Configure the number of subchannels used by the client.
611    ///
612    /// # Example
613    /// ```
614    /// # use google_cloud_storage::client::Storage;
615    /// # async fn sample() -> anyhow::Result<()> {
616    /// // By default the client uses `count` subchannels.
617    /// let count = std::thread::available_parallelism()?.get();
618    /// let client = Storage::builder()
619    ///     .with_grpc_subchannel_count(std::cmp::max(1, count / 2))
620    ///     .build()
621    ///     .await?;
622    /// # Ok(()) }
623    /// ```
624    ///
625    /// gRPC-based clients may exhibit high latency if many requests need to be
626    /// demuxed over a single HTTP/2 connection (often called a *subchannel* in gRPC).
627    /// Consider using more subchannels if your application makes many
628    /// concurrent requests. Consider using fewer subchannels if your
629    /// application needs the file descriptors for other purposes.
630    ///
631    /// Keep in mind that Google Cloud limits the number of concurrent RPCs in
632    /// a single connection to about 100.
633    pub fn with_grpc_subchannel_count(mut self, v: usize) -> Self {
634        self.config.grpc_subchannel_count = Some(v);
635        self
636    }
637
638    /// Enables observability signals for the client.
639    ///
640    /// # Example
641    /// ```
642    /// # use google_cloud_storage::client::Storage;
643    /// # async fn sample() -> anyhow::Result<()> {
644    /// let client = Storage::builder()
645    ///     .with_tracing()
646    ///     .build()
647    ///     .await?;
648    /// // For observing traces and logs, you must also enable a tracing subscriber in your `main` function,
649    /// // for example:
650    /// //     tracing_subscriber::fmt::init();
651    /// // For observing metrics, you must also install an OpenTelemetry meter provider in your `main` function,
652    /// // for example:
653    /// //     opentelemetry::global::set_meter_provider(provider.clone());
654    /// # Ok(()) }
655    /// ```
656    ///
657    /// <div class="warning">
658    ///
659    /// Observability signals at any level may contain sensitive data such as resource names (bucket
660    /// and object names), full URLs, and error messages.
661    ///
662    /// Before configuring subscribers or exporters for traces and logs, review the contents of the
663    /// spans and consult the [tracing] framework documentation to set up filters and formatters to
664    /// prevent leaking sensitive information, depending on your intended use case.
665    ///
666    /// [OpenTelemetry Semantic Conventions]: https://opentelemetry.io/docs/concepts/semantic-conventions/
667    /// [tracing]: https://docs.rs/tracing/latest/tracing/
668    ///
669    /// </div>
670    ///
671    /// The libraries are instrumented to generate the following signals:
672    ///
673    /// 1. `INFO` spans for each logical client request. Typically a single method call in the client
674    ///    struct gets such a span.
675    /// 1. A histogram metric measuring the elapsed time for each logical client request.
676    /// 1. `WARN` logs for each logical client requests that fail.
677    /// 1. `INFO` spans for each low-level attempt RPC attempt. Typically a single method in the client
678    ///    struct gets one such span, but there may be more if the library had to retry the RPC.
679    /// 1. `DEBUG` logs for each low-level attempt that fails.
680    ///
681    /// These spans and logs follow [OpenTelemetry Semantic Conventions] with additional Google
682    /// Cloud attributes. Both the spans and logs and are should be suitable for production
683    /// monitoring.
684    ///
685    /// The libraries also have `DEBUG` spans for each request, these include the full request body,
686    /// and the full response body for successful requests, and the full error message, with
687    /// details, for failed requests. Consider the contents of these requests and responses before
688    /// enabling them in production environments, as the request or responses may include sensitive
689    /// data. These `DEBUG` spans use the `google_cloud_storage::tracing` as their target and the
690    /// method name as the span name. You can use the name and/or target to set up your filters.
691    ///
692    /// # More information
693    ///
694    /// The [Enable logging] guide shows you how to initialize a subscriber to
695    /// log events to the console.
696    ///
697    /// [Enable logging]: https://docs.cloud.google.com/rust/enable-logging
698    /// [tracing]: https://docs.rs/tracing
699    pub fn with_tracing(mut self) -> Self {
700        self.config.tracing = true;
701        self
702    }
703
704    pub(crate) fn apply_default_credentials(&mut self) -> BuilderResult<()> {
705        if self.config.cred.is_some() {
706            return Ok(());
707        };
708        let default = CredentialsBuilder::default()
709            .build()
710            .map_err(BuilderError::cred)?;
711        self.config.cred = Some(default);
712        Ok(())
713    }
714
715    pub(crate) fn apply_default_endpoint(&mut self) -> BuilderResult<()> {
716        let _ = self
717            .config
718            .endpoint
719            .get_or_insert_with(|| super::DEFAULT_HOST.to_string());
720        Ok(())
721    }
722
723    // Breaks the builder into its parts, with defaults applied.
724    pub(crate) fn into_parts(
725        mut self,
726    ) -> google_cloud_gax::client_builder::Result<(ClientConfig, RequestOptions)> {
727        self.apply_default_credentials()?;
728        self.apply_default_endpoint()?;
729        let request_options =
730            RequestOptions::new_with_client_config(&self.config, self.common_options);
731        Ok((self.config, request_options))
732    }
733}
734
735/// The set of characters that are percent encoded.
736///
737/// This set is defined at https://cloud.google.com/storage/docs/request-endpoints#encoding:
738///
739/// Encode the following characters when they appear in either the object name
740/// or query string of a request URL:
741///     !, #, $, &, ', (, ), *, +, ,, /, :, ;, =, ?, @, [, ], and space characters.
742pub(crate) const ENCODED_CHARS: percent_encoding::AsciiSet = percent_encoding::CONTROLS
743    .add(b'!')
744    .add(b'#')
745    .add(b'$')
746    .add(b'&')
747    .add(b'\'')
748    .add(b'(')
749    .add(b')')
750    .add(b'*')
751    .add(b'+')
752    .add(b',')
753    .add(b'/')
754    .add(b':')
755    .add(b';')
756    .add(b'=')
757    .add(b'?')
758    .add(b'@')
759    .add(b'[')
760    .add(b']')
761    .add(b' ');
762
763/// Percent encode a string.
764///
765/// To ensure compatibility certain characters need to be encoded when they appear
766/// in either the object name or query string of a request URL.
767pub(crate) fn enc(value: &str) -> String {
768    percent_encoding::utf8_percent_encode(value, &ENCODED_CHARS).to_string()
769}
770
771pub(crate) fn apply_customer_supplied_encryption_headers(
772    builder: HttpRequestBuilder,
773    common_object_request_params: &Option<crate::model::CommonObjectRequestParams>,
774) -> HttpRequestBuilder {
775    common_object_request_params.iter().fold(builder, |b, v| {
776        b.header(
777            "x-goog-encryption-algorithm",
778            v.encryption_algorithm.clone(),
779        )
780        .header(
781            "x-goog-encryption-key",
782            BASE64_STANDARD.encode(v.encryption_key_bytes.clone()),
783        )
784        .header(
785            "x-goog-encryption-key-sha256",
786            BASE64_STANDARD.encode(v.encryption_key_sha256_bytes.clone()),
787        )
788    })
789}
790
791#[cfg(test)]
792pub(crate) mod tests {
793    use super::*;
794    use google_cloud_auth::credentials::anonymous::Builder as Anonymous;
795    use google_cloud_gax::retry_result::RetryResult;
796    use google_cloud_gax::retry_state::RetryState;
797    use std::{sync::Arc, time::Duration};
798
799    #[test]
800    fn default_settings() {
801        let builder = ClientBuilder::new().with_credentials(Anonymous::new().build());
802        let config = builder.config;
803        assert!(config.retry_policy.is_some(), "{config:?}");
804        assert!(config.backoff_policy.is_some(), "{config:?}");
805        {
806            assert!(
807                config.grpc_subchannel_count.is_some_and(|v| v >= 1),
808                "{config:?}"
809            );
810        }
811    }
812
813    #[test]
814    fn subchannel_count() {
815        let builder = ClientBuilder::new()
816            .with_credentials(Anonymous::new().build())
817            .with_grpc_subchannel_count(42);
818        let config = builder.config;
819        assert!(
820            config.grpc_subchannel_count.is_some_and(|v| v == 42),
821            "{config:?}"
822        );
823    }
824
825    #[derive(Debug)]
826    struct DummyStorage;
827
828    impl crate::storage::stub::Storage for DummyStorage {}
829
830    #[test]
831    fn from_stub_accepts_both_raw_and_arc() {
832        let stub = DummyStorage;
833        let _client = Storage::<DummyStorage>::from_stub(stub);
834
835        let stub_arc = std::sync::Arc::new(DummyStorage);
836        let _client_arc = Storage::<DummyStorage>::from_stub(stub_arc);
837    }
838
839    #[test]
840    fn from_stub_allows_sharing_stub() {
841        let stub_arc = std::sync::Arc::new(DummyStorage);
842
843        let _client1 = Storage::<DummyStorage>::from_stub(stub_arc.clone());
844        let _client2 = Storage::<DummyStorage>::from_stub(stub_arc);
845    }
846
847    pub(crate) fn test_builder() -> ClientBuilder {
848        ClientBuilder::new()
849            .with_credentials(Anonymous::new().build())
850            .with_endpoint("http://private.googleapis.com")
851            .with_backoff_policy(
852                google_cloud_gax::exponential_backoff::ExponentialBackoffBuilder::new()
853                    .with_initial_delay(Duration::from_millis(1))
854                    .with_maximum_delay(Duration::from_millis(2))
855                    .build()
856                    .expect("hard coded policy should build correctly"),
857            )
858    }
859
860    /// This is used by the request builder tests.
861    pub(crate) async fn test_inner_client(builder: ClientBuilder) -> Arc<StorageInner> {
862        let inner = StorageInner::from_parts(builder)
863            .await
864            .expect("creating an test inner client succeeds");
865        Arc::new(inner)
866    }
867
868    mockall::mock! {
869        #[derive(Debug)]
870        pub RetryThrottler {}
871
872        impl google_cloud_gax::retry_throttler::RetryThrottler for RetryThrottler {
873            fn throttle_retry_attempt(&self) -> bool;
874            fn on_retry_failure(&mut self, flow: &RetryResult);
875            fn on_success(&mut self);
876        }
877    }
878
879    mockall::mock! {
880        #[derive(Debug)]
881        pub RetryPolicy {}
882
883        impl google_cloud_gax::retry_policy::RetryPolicy for RetryPolicy {
884            fn on_error(&self, state: &RetryState, error: google_cloud_gax::error::Error) -> RetryResult;
885        }
886    }
887
888    mockall::mock! {
889        #[derive(Debug)]
890        pub BackoffPolicy {}
891
892        impl google_cloud_gax::backoff_policy::BackoffPolicy for BackoffPolicy {
893            fn on_failure(&self, state: &RetryState) -> std::time::Duration;
894        }
895    }
896
897    mockall::mock! {
898        #[derive(Debug)]
899        pub ReadResumePolicy {}
900
901        impl crate::read_resume_policy::ReadResumePolicy for ReadResumePolicy {
902            fn on_error(&self, query: &crate::read_resume_policy::ResumeQuery, error: google_cloud_gax::error::Error) -> crate::read_resume_policy::ResumeResult;
903        }
904    }
905}