google_cloud_storage/storage/
client.rs

1// Copyright 2025 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use super::request_options::RequestOptions;
16use crate::Error;
17use crate::builder::storage::ReadObject;
18use crate::builder::storage::WriteObject;
19use crate::read_resume_policy::ReadResumePolicy;
20use crate::storage::common_options::CommonOptions;
21use crate::streaming_source::Payload;
22use auth::credentials::CacheableResource;
23use base64::Engine;
24use base64::prelude::BASE64_STANDARD;
25use gaxi::options::ClientConfig;
26use gaxi::options::Credentials;
27use http::Extensions;
28use std::sync::Arc;
29
30/// Implements a client for the Cloud Storage API.
31///
32/// # Example
33/// ```
34/// # async fn sample() -> anyhow::Result<()> {
35/// # use google_cloud_storage::client::Storage;
36/// let client = Storage::builder().build().await?;
37/// // use `client` to make requests to Cloud Storage.
38/// # Ok(()) }
39/// ```
40///
41/// # Configuration
42///
43/// To configure `Storage` use the `with_*` methods in the type returned
44/// by [builder()][Storage::builder]. The default configuration should
45/// work for most applications. Common configuration changes include
46///
47/// * [with_endpoint()]: by default this client uses the global default endpoint
48///   (`https://storage.googleapis.com`). Applications using regional
49///   endpoints or running in restricted networks (e.g. a network configured
50///   with [Private Google Access with VPC Service Controls]) may want to
51///   override this default.
52/// * [with_credentials()]: by default this client uses
53///   [Application Default Credentials]. Applications using custom
54///   authentication may need to override this default.
55///
56/// # Pooling and Cloning
57///
58/// `Storage` holds a connection pool internally, it is advised to
59/// create one and then reuse it.  You do not need to wrap `Storage` in
60/// an [Rc](std::rc::Rc) or [Arc] to reuse it, because it already uses an `Arc`
61/// internally.
62///
63/// # Service Description
64///
65/// The Cloud Storage API allows applications to read and write data through
66/// the abstractions of buckets and objects. For a description of these
67/// abstractions please see <https://cloud.google.com/storage/docs>.
68///
69/// Resources are named as follows:
70///
71/// - Projects are referred to as they are defined by the Resource Manager API,
72///   using strings like `projects/123456` or `projects/my-string-id`.
73///
74/// - Buckets are named using string names of the form:
75///   `projects/{project}/buckets/{bucket}`
76///   For globally unique buckets, `_` may be substituted for the project.
77///
78/// - Objects are uniquely identified by their name along with the name of the
79///   bucket they belong to, as separate strings in this API. For example:
80///   ```no_rust
81///   bucket = "projects/_/buckets/my-bucket"
82///   object = "my-object/with/a/folder-like/name"
83///   ```
84///   Note that object names can contain `/` characters, which are treated as
85///   any other character (no special directory semantics).
86///
87/// [with_endpoint()]: ClientBuilder::with_endpoint
88/// [with_credentials()]: ClientBuilder::with_credentials
89/// [Private Google Access with VPC Service Controls]: https://cloud.google.com/vpc-service-controls/docs/private-connectivity
90/// [Application Default Credentials]: https://cloud.google.com/docs/authentication#adc
91#[derive(Clone, Debug)]
92pub struct Storage<S = crate::stub::DefaultStorage>
93where
94    S: crate::stub::Storage + 'static,
95{
96    stub: std::sync::Arc<S>,
97    options: RequestOptions,
98}
99
100#[derive(Clone, Debug)]
101pub(crate) struct StorageInner {
102    pub client: reqwest::Client,
103    pub cred: auth::credentials::Credentials,
104    pub endpoint: String,
105    pub options: RequestOptions,
106}
107
108impl Storage {
109    /// Returns a builder for [Storage].
110    ///
111    /// # Example
112    /// ```
113    /// # use google_cloud_storage::client::Storage;
114    /// # async fn sample() -> anyhow::Result<()> {
115    /// let client = Storage::builder().build().await?;
116    /// # Ok(()) }
117    /// ```
118    pub fn builder() -> ClientBuilder {
119        ClientBuilder::new()
120    }
121}
122
123impl<S> Storage<S>
124where
125    S: crate::storage::stub::Storage + 'static,
126{
127    /// Creates a new client from the provided stub.
128    ///
129    /// The most common case for calling this function is in tests mocking the
130    /// client's behavior.
131    pub fn from_stub(stub: S) -> Self
132    where
133        S: super::stub::Storage + 'static,
134    {
135        Self {
136            stub: std::sync::Arc::new(stub),
137            options: RequestOptions::new(),
138        }
139    }
140
141    /// Write an object with data from any data source.
142    ///
143    /// # Example
144    /// ```
145    /// # use google_cloud_storage::client::Storage;
146    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
147    /// let response = client
148    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
149    ///     .send_buffered()
150    ///     .await?;
151    /// println!("response details={response:?}");
152    /// # Ok(()) }
153    /// ```
154    ///
155    /// # Example
156    /// ```
157    /// # use google_cloud_storage::client::Storage;
158    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
159    /// let response = client
160    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
161    ///     .send_unbuffered()
162    ///     .await?;
163    /// println!("response details={response:?}");
164    /// # Ok(()) }
165    /// ```
166    ///
167    /// You can use many different types as the payload. For example, a string,
168    /// a [bytes::Bytes], a [tokio::fs::File], or a custom type that implements
169    /// the [StreamingSource] trait.
170    ///
171    /// If your data source also implements [Seek], prefer [send_unbuffered()]
172    /// to start the write. Otherwise use [send_buffered()].
173    ///
174    /// # Parameters
175    /// * `bucket` - the bucket name containing the object. In
176    ///   `projects/_/buckets/{bucket_id}` format.
177    /// * `object` - the object name.
178    /// * `payload` - the object data.
179    ///
180    /// [Seek]: crate::streaming_source::Seek
181    /// [StreamingSource]: crate::streaming_source::StreamingSource
182    /// [send_buffered()]: crate::builder::storage::WriteObject::send_buffered
183    /// [send_unbuffered()]: crate::builder::storage::WriteObject::send_unbuffered
184    pub fn write_object<B, O, T, P>(&self, bucket: B, object: O, payload: T) -> WriteObject<P, S>
185    where
186        B: Into<String>,
187        O: Into<String>,
188        T: Into<Payload<P>>,
189    {
190        WriteObject::new(
191            self.stub.clone(),
192            bucket,
193            object,
194            payload,
195            self.options.clone(),
196        )
197    }
198
199    /// Reads the contents of an object.
200    ///
201    /// # Example
202    /// ```
203    /// # use google_cloud_storage::client::Storage;
204    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
205    /// let mut resp = client
206    ///     .read_object("projects/_/buckets/my-bucket", "my-object")
207    ///     .send()
208    ///     .await?;
209    /// let mut contents = Vec::new();
210    /// while let Some(chunk) = resp.next().await.transpose()? {
211    ///   contents.extend_from_slice(&chunk);
212    /// }
213    /// println!("object contents={:?}", bytes::Bytes::from_owner(contents));
214    /// # Ok(()) }
215    /// ```
216    ///
217    /// # Parameters
218    /// * `bucket` - the bucket name containing the object. In
219    ///   `projects/_/buckets/{bucket_id}` format.
220    /// * `object` - the object name.
221    pub fn read_object<B, O>(&self, bucket: B, object: O) -> ReadObject<S>
222    where
223        B: Into<String>,
224        O: Into<String>,
225    {
226        ReadObject::new(self.stub.clone(), bucket, object, self.options.clone())
227    }
228}
229
230impl Storage {
231    pub(crate) fn new(builder: ClientBuilder) -> gax::client_builder::Result<Self> {
232        use gax::client_builder::Error;
233        let client = reqwest::Client::builder()
234            // Disable all automatic decompression. These could be enabled by users by enabling
235            // the corresponding features flags, but we will not be able to tell whether this
236            // has happened.
237            .no_brotli()
238            .no_deflate()
239            .no_gzip()
240            .no_zstd()
241            .build()
242            .map_err(Error::transport)?;
243        let (cred, endpoint, options) = builder.into_parts();
244        let cred = if let Some(c) = cred {
245            c
246        } else {
247            auth::credentials::Builder::default()
248                .build()
249                .map_err(Error::cred)?
250        };
251        let endpoint = endpoint.unwrap_or_else(|| super::DEFAULT_HOST.to_string());
252        let inner = Arc::new(StorageInner::new(client, cred, endpoint, options));
253        let options = inner.options.clone();
254        let stub = crate::storage::transport::Storage::new(inner);
255        Ok(Self { stub, options })
256    }
257}
258
259impl StorageInner {
260    /// Builds a client assuming `config.cred` and `config.endpoint` are initialized, panics otherwise.
261    pub(self) fn new(
262        client: reqwest::Client,
263        cred: Credentials,
264        endpoint: String,
265        options: RequestOptions,
266    ) -> Self {
267        Self {
268            client,
269            cred,
270            endpoint,
271            options,
272        }
273    }
274
275    // Helper method to apply authentication headers to the request builder.
276    pub async fn apply_auth_headers(
277        &self,
278        builder: reqwest::RequestBuilder,
279    ) -> crate::Result<reqwest::RequestBuilder> {
280        let cached_auth_headers = self
281            .cred
282            .headers(Extensions::new())
283            .await
284            .map_err(Error::authentication)?;
285
286        let auth_headers = match cached_auth_headers {
287            CacheableResource::New { data, .. } => data,
288            CacheableResource::NotModified => {
289                unreachable!("headers are not cached");
290            }
291        };
292
293        let builder = builder.headers(auth_headers);
294        Ok(builder)
295    }
296}
297
298/// A builder for [Storage].
299///
300/// ```
301/// # use google_cloud_storage::client::Storage;
302/// # async fn sample() -> anyhow::Result<()> {
303/// let builder = Storage::builder();
304/// let client = builder
305///     .with_endpoint("https://storage.googleapis.com")
306///     .build()
307///     .await?;
308/// # Ok(()) }
309/// ```
310pub struct ClientBuilder {
311    // Common options for all clients (generated or not).
312    pub(crate) config: ClientConfig,
313    // Specific options for the storage client. `RequestOptions` also requires
314    // these, it makes sense to share them.
315    common_options: CommonOptions,
316}
317
318impl ClientBuilder {
319    pub(crate) fn new() -> Self {
320        let mut config = ClientConfig::default();
321        config.retry_policy = Some(Arc::new(crate::retry_policy::storage_default()));
322        config.backoff_policy = Some(Arc::new(crate::backoff_policy::default()));
323        let common_options = CommonOptions::new();
324        Self {
325            config,
326            common_options,
327        }
328    }
329
330    /// Creates a new client.
331    ///
332    /// # Example
333    /// ```
334    /// # use google_cloud_storage::client::Storage;
335    /// # async fn sample() -> anyhow::Result<()> {
336    /// let client = Storage::builder().build().await?;
337    /// # Ok(()) }
338    /// ```
339    pub async fn build(self) -> gax::client_builder::Result<Storage> {
340        Storage::new(self)
341    }
342
343    /// Sets the endpoint.
344    ///
345    /// # Example
346    /// ```
347    /// # use google_cloud_storage::client::Storage;
348    /// # async fn sample() -> anyhow::Result<()> {
349    /// let client = Storage::builder()
350    ///     .with_endpoint("https://private.googleapis.com")
351    ///     .build()
352    ///     .await?;
353    /// # Ok(()) }
354    /// ```
355    pub fn with_endpoint<V: Into<String>>(mut self, v: V) -> Self {
356        self.config.endpoint = Some(v.into());
357        self
358    }
359
360    /// Configures the authentication credentials.
361    ///
362    /// Google Cloud Storage requires authentication for most buckets. Use this
363    /// method to change the credentials used by the client. More information
364    /// about valid credentials types can be found in the [google-cloud-auth]
365    /// crate documentation.
366    ///
367    /// # Example
368    /// ```
369    /// # use google_cloud_storage::client::Storage;
370    /// # async fn sample() -> anyhow::Result<()> {
371    /// use auth::credentials::mds;
372    /// let client = Storage::builder()
373    ///     .with_credentials(
374    ///         mds::Builder::default()
375    ///             .with_scopes(["https://www.googleapis.com/auth/cloud-platform.read-only"])
376    ///             .build()?)
377    ///     .build()
378    ///     .await?;
379    /// # Ok(()) }
380    /// ```
381    ///
382    /// [google-cloud-auth]: https://docs.rs/google-cloud-auth
383    pub fn with_credentials<V: Into<auth::credentials::Credentials>>(mut self, v: V) -> Self {
384        self.config.cred = Some(v.into());
385        self
386    }
387
388    /// Configure the retry policy.
389    ///
390    /// The client libraries can automatically retry operations that fail. The
391    /// retry policy controls what errors are considered retryable, sets limits
392    /// on the number of attempts or the time trying to make attempts.
393    ///
394    /// # Example
395    /// ```
396    /// # use google_cloud_storage::client::Storage;
397    /// # async fn sample() -> anyhow::Result<()> {
398    /// use gax::retry_policy::{AlwaysRetry, RetryPolicyExt};
399    /// let client = Storage::builder()
400    ///     .with_retry_policy(AlwaysRetry.with_attempt_limit(3))
401    ///     .build()
402    ///     .await?;
403    /// # Ok(()) }
404    /// ```
405    pub fn with_retry_policy<V: Into<gax::retry_policy::RetryPolicyArg>>(mut self, v: V) -> Self {
406        self.config.retry_policy = Some(v.into().into());
407        self
408    }
409
410    /// Configure the retry backoff policy.
411    ///
412    /// The client libraries can automatically retry operations that fail. The
413    /// backoff policy controls how long to wait in between retry attempts.
414    ///
415    /// # Example
416    /// ```
417    /// # use google_cloud_storage::client::Storage;
418    /// # async fn sample() -> anyhow::Result<()> {
419    /// use gax::exponential_backoff::ExponentialBackoff;
420    /// use std::time::Duration;
421    /// let policy = ExponentialBackoff::default();
422    /// let client = Storage::builder()
423    ///     .with_backoff_policy(policy)
424    ///     .build()
425    ///     .await?;
426    /// # Ok(()) }
427    /// ```
428    pub fn with_backoff_policy<V: Into<gax::backoff_policy::BackoffPolicyArg>>(
429        mut self,
430        v: V,
431    ) -> Self {
432        self.config.backoff_policy = Some(v.into().into());
433        self
434    }
435
436    /// Configure the retry throttler.
437    ///
438    /// Advanced applications may want to configure a retry throttler to
439    /// [Address Cascading Failures] and when [Handling Overload] conditions.
440    /// The client libraries throttle their retry loop, using a policy to
441    /// control the throttling algorithm. Use this method to fine tune or
442    /// customize the default retry throtler.
443    ///
444    /// [Handling Overload]: https://sre.google/sre-book/handling-overload/
445    /// [Addressing Cascading Failures]: https://sre.google/sre-book/addressing-cascading-failures/
446    ///
447    /// # Example
448    /// ```
449    /// # use google_cloud_storage::client::Storage;
450    /// # async fn sample() -> anyhow::Result<()> {
451    /// use gax::retry_throttler::AdaptiveThrottler;
452    /// let client = Storage::builder()
453    ///     .with_retry_throttler(AdaptiveThrottler::default())
454    ///     .build()
455    ///     .await?;
456    /// # Ok(()) }
457    /// ```
458    pub fn with_retry_throttler<V: Into<gax::retry_throttler::RetryThrottlerArg>>(
459        mut self,
460        v: V,
461    ) -> Self {
462        self.config.retry_throttler = v.into().into();
463        self
464    }
465
466    /// Sets the payload size threshold to switch from single-shot to resumable uploads.
467    ///
468    /// # Example
469    /// ```
470    /// # use google_cloud_storage::client::Storage;
471    /// # async fn sample() -> anyhow::Result<()> {
472    /// let client = Storage::builder()
473    ///     .with_resumable_upload_threshold(0_usize) // Forces a resumable upload.
474    ///     .build()
475    ///     .await?;
476    /// let response = client
477    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
478    ///     .send_buffered()
479    ///     .await?;
480    /// println!("response details={response:?}");
481    /// # Ok(()) }
482    /// ```
483    ///
484    /// The client library can write objects using [single-shot] or [resumable]
485    /// uploads. For small objects, single-shot uploads offer better
486    /// performance, as they require a single HTTP transfer. For larger objects,
487    /// the additional request latency is not significant, and resumable uploads
488    /// offer better recovery on errors.
489    ///
490    /// The library automatically selects resumable uploads when the payload is
491    /// equal to or larger than this option. For smaller writes the client
492    /// library uses single-shot uploads.
493    ///
494    /// The exact threshold depends on where the application is deployed and
495    /// destination bucket location with respect to where the application is
496    /// running. The library defaults should work well in most cases, but some
497    /// applications may benefit from fine-tuning.
498    ///
499    /// [single-shot]: https://cloud.google.com/storage/docs/uploading-objects
500    /// [resumable]: https://cloud.google.com/storage/docs/resumable-uploads
501    pub fn with_resumable_upload_threshold<V: Into<usize>>(mut self, v: V) -> Self {
502        self.common_options.resumable_upload_threshold = v.into();
503        self
504    }
505
506    /// Changes the buffer size for some resumable uploads.
507    ///
508    /// # Example
509    /// ```
510    /// # use google_cloud_storage::client::Storage;
511    /// # async fn sample() -> anyhow::Result<()> {
512    /// let client = Storage::builder()
513    ///     .with_resumable_upload_buffer_size(32 * 1024 * 1024_usize)
514    ///     .build()
515    ///     .await?;
516    /// let response = client
517    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
518    ///     .send_buffered()
519    ///     .await?;
520    /// println!("response details={response:?}");
521    /// # Ok(()) }
522    /// ```
523    ///
524    /// When performing [resumable uploads] from sources without [Seek] the
525    /// client library needs to buffer data in memory until it is persisted by
526    /// the service. Otherwise the data would be lost if the upload is
527    /// interrupted. Applications may want to tune this buffer size:
528    ///
529    /// - Use smaller buffer sizes to support more concurrent writes in the
530    ///   same application.
531    /// - Use larger buffer sizes for better throughput. Sending many small
532    ///   buffers stalls the writer until the client receives a successful
533    ///   response from the service.
534    ///
535    /// Keep in mind that there are diminishing returns on using larger buffers.
536    ///
537    /// [resumable uploads]: https://cloud.google.com/storage/docs/resumable-uploads
538    /// [Seek]: crate::streaming_source::Seek
539    pub fn with_resumable_upload_buffer_size<V: Into<usize>>(mut self, v: V) -> Self {
540        self.common_options.resumable_upload_buffer_size = v.into();
541        self
542    }
543
544    /// Configure the resume policy for object reads.
545    ///
546    /// The Cloud Storage client library can automatically resume a read request
547    /// that is interrupted by a transient error. Applications may want to
548    /// limit the number of read attempts, or may wish to expand the type
549    /// of errors treated as retryable.
550    ///
551    /// # Example
552    /// ```
553    /// # use google_cloud_storage::client::Storage;
554    /// # async fn sample() -> anyhow::Result<()> {
555    /// use google_cloud_storage::read_resume_policy::{AlwaysResume, ReadResumePolicyExt};
556    /// let client = Storage::builder()
557    ///     .with_read_resume_policy(AlwaysResume.with_attempt_limit(3))
558    ///     .build()
559    ///     .await?;
560    /// # Ok(()) }
561    /// ```
562    pub fn with_read_resume_policy<V>(mut self, v: V) -> Self
563    where
564        V: ReadResumePolicy + 'static,
565    {
566        self.common_options.read_resume_policy = Arc::new(v);
567        self
568    }
569
570    pub(crate) fn into_parts(self) -> (Option<Credentials>, Option<String>, RequestOptions) {
571        let request_options =
572            RequestOptions::new_with_client_config(&self.config, self.common_options);
573        (self.config.cred, self.config.endpoint, request_options)
574    }
575}
576
577/// The set of characters that are percent encoded.
578///
579/// This set is defined at https://cloud.google.com/storage/docs/request-endpoints#encoding:
580///
581/// Encode the following characters when they appear in either the object name
582/// or query string of a request URL:
583///     !, #, $, &, ', (, ), *, +, ,, /, :, ;, =, ?, @, [, ], and space characters.
584const ENCODED_CHARS: percent_encoding::AsciiSet = percent_encoding::CONTROLS
585    .add(b'!')
586    .add(b'#')
587    .add(b'$')
588    .add(b'&')
589    .add(b'\'')
590    .add(b'(')
591    .add(b')')
592    .add(b'*')
593    .add(b'+')
594    .add(b',')
595    .add(b'/')
596    .add(b':')
597    .add(b';')
598    .add(b'=')
599    .add(b'?')
600    .add(b'@')
601    .add(b'[')
602    .add(b']')
603    .add(b' ');
604
605/// Percent encode a string.
606///
607/// To ensure compatibility certain characters need to be encoded when they appear
608/// in either the object name or query string of a request URL.
609pub(crate) fn enc(value: &str) -> String {
610    percent_encoding::utf8_percent_encode(value, &ENCODED_CHARS).to_string()
611}
612
613pub(crate) fn apply_customer_supplied_encryption_headers(
614    builder: reqwest::RequestBuilder,
615    common_object_request_params: &Option<crate::model::CommonObjectRequestParams>,
616) -> reqwest::RequestBuilder {
617    common_object_request_params.iter().fold(builder, |b, v| {
618        b.header(
619            "x-goog-encryption-algorithm",
620            v.encryption_algorithm.clone(),
621        )
622        .header(
623            "x-goog-encryption-key",
624            BASE64_STANDARD.encode(v.encryption_key_bytes.clone()),
625        )
626        .header(
627            "x-goog-encryption-key-sha256",
628            BASE64_STANDARD.encode(v.encryption_key_sha256_bytes.clone()),
629        )
630    })
631}
632
633#[cfg(test)]
634pub(crate) mod tests {
635    use super::*;
636    use gax::retry_result::RetryResult;
637    use gax::retry_state::RetryState;
638    use std::{sync::Arc, time::Duration};
639
640    pub(crate) fn test_builder() -> ClientBuilder {
641        ClientBuilder::new()
642            .with_credentials(auth::credentials::anonymous::Builder::new().build())
643            .with_endpoint("http://private.googleapis.com")
644            .with_backoff_policy(
645                gax::exponential_backoff::ExponentialBackoffBuilder::new()
646                    .with_initial_delay(Duration::from_millis(1))
647                    .with_maximum_delay(Duration::from_millis(2))
648                    .build()
649                    .expect("hard coded policy should build correctly"),
650            )
651    }
652
653    /// This is used by the request builder tests.
654    pub(crate) fn test_inner_client(builder: ClientBuilder) -> Arc<StorageInner> {
655        let client = reqwest::Client::new();
656        let (cred, endpoint, options) = builder.into_parts();
657        Arc::new(StorageInner::new(
658            client,
659            cred.unwrap(),
660            endpoint.unwrap(),
661            options,
662        ))
663    }
664
665    mockall::mock! {
666        #[derive(Debug)]
667        pub RetryThrottler {}
668
669        impl gax::retry_throttler::RetryThrottler for RetryThrottler {
670            fn throttle_retry_attempt(&self) -> bool;
671            fn on_retry_failure(&mut self, flow: &RetryResult);
672            fn on_success(&mut self);
673        }
674    }
675
676    mockall::mock! {
677        #[derive(Debug)]
678        pub RetryPolicy {}
679
680        impl gax::retry_policy::RetryPolicy for RetryPolicy {
681            fn on_error(&self, state: &RetryState, error: gax::error::Error) -> RetryResult;
682        }
683    }
684
685    mockall::mock! {
686        #[derive(Debug)]
687        pub BackoffPolicy {}
688
689        impl gax::backoff_policy::BackoffPolicy for BackoffPolicy {
690            fn on_failure(&self, state: &RetryState) -> std::time::Duration;
691        }
692    }
693
694    mockall::mock! {
695        #[derive(Debug)]
696        pub ReadResumePolicy {}
697
698        impl crate::read_resume_policy::ReadResumePolicy for ReadResumePolicy {
699            fn on_error(&self, query: &crate::read_resume_policy::ResumeQuery, error: gax::error::Error) -> crate::read_resume_policy::ResumeResult;
700        }
701    }
702}