google_cloud_storage/storage/
write_object.rs

1// Copyright 2025 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     https://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Contains the request builder for [write_object()] and related types.
16//!
17//! [write_object()]: crate::storage::client::Storage::write_object()
18
19use super::streaming_source::{Seek, StreamingSource};
20use super::*;
21use crate::model_ext::KeyAes256;
22use crate::storage::checksum::details::update as checksum_update;
23use crate::storage::checksum::details::{Checksum, Md5};
24use crate::storage::request_options::RequestOptions;
25
26/// A request builder for object writes.
27///
28/// # Example: hello world
29/// ```
30/// use google_cloud_storage::client::Storage;
31/// async fn sample(client: &Storage) -> anyhow::Result<()> {
32///     let response = client
33///         .write_object("projects/_/buckets/my-bucket", "hello", "Hello World!")
34///         .send_unbuffered()
35///         .await?;
36///     println!("response details={response:?}");
37///     Ok(())
38/// }
39/// ```
40///
41/// # Example: upload a file
42/// ```
43/// use google_cloud_storage::client::Storage;
44/// async fn sample(client: &Storage) -> anyhow::Result<()> {
45///     let payload = tokio::fs::File::open("my-data").await?;
46///     let response = client
47///         .write_object("projects/_/buckets/my-bucket", "my-object", payload)
48///         .send_unbuffered()
49///         .await?;
50///     println!("response details={response:?}");
51///     Ok(())
52/// }
53/// ```
54///
55/// # Example: create a new object from a custom data source
56/// ```
57/// use google_cloud_storage::{client::Storage, streaming_source::StreamingSource};
58/// struct DataSource;
59/// impl StreamingSource for DataSource {
60///     type Error = std::io::Error;
61///     async fn next(&mut self) -> Option<Result<bytes::Bytes, Self::Error>> {
62///         # panic!();
63///     }
64/// }
65///
66/// async fn sample(client: &Storage) -> anyhow::Result<()> {
67///     let response = client
68///         .write_object("projects/_/buckets/my-bucket", "my-object", DataSource)
69///         .send_buffered()
70///         .await?;
71///     println!("response details={response:?}");
72///     Ok(())
73/// }
74/// ```
75pub struct WriteObject<T, S = crate::storage::transport::Storage>
76where
77    S: crate::storage::stub::Storage + 'static,
78{
79    stub: std::sync::Arc<S>,
80    pub(crate) request: crate::model_ext::WriteObjectRequest,
81    pub(crate) payload: Payload<T>,
82    pub(crate) options: RequestOptions,
83}
84
85impl<T, S> WriteObject<T, S>
86where
87    S: crate::storage::stub::Storage + 'static,
88{
89    /// Set a [request precondition] on the object generation to match.
90    ///
91    /// With this precondition the request fails if the current object
92    /// generation matches the provided value. A common value is `0`, which
93    /// prevents writes from succeeding if the object already exists.
94    ///
95    /// # Example
96    /// ```
97    /// # use google_cloud_storage::client::Storage;
98    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
99    /// let response = client
100    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
101    ///     .set_if_generation_match(0)
102    ///     .send_buffered()
103    ///     .await?;
104    /// println!("response details={response:?}");
105    /// # Ok(()) }
106    /// ```
107    ///
108    /// [request precondition]: https://cloud.google.com/storage/docs/request-preconditions
109    pub fn set_if_generation_match<V>(mut self, v: V) -> Self
110    where
111        V: Into<i64>,
112    {
113        self.request.spec.if_generation_match = Some(v.into());
114        self
115    }
116
117    /// Set a [request precondition] on the object generation to match.
118    ///
119    /// With this precondition the request fails if the current object
120    /// generation does not match the provided value.
121    ///
122    /// # Example
123    /// ```
124    /// # use google_cloud_storage::client::Storage;
125    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
126    /// let response = client
127    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
128    ///     .set_if_generation_not_match(0)
129    ///     .send_buffered()
130    ///     .await?;
131    /// println!("response details={response:?}");
132    /// # Ok(()) }
133    /// ```
134    ///
135    /// [request precondition]: https://cloud.google.com/storage/docs/request-preconditions
136    pub fn set_if_generation_not_match<V>(mut self, v: V) -> Self
137    where
138        V: Into<i64>,
139    {
140        self.request.spec.if_generation_not_match = Some(v.into());
141        self
142    }
143
144    /// Set a [request precondition] on the object meta generation.
145    ///
146    /// With this precondition the request fails if the current object metadata
147    /// generation does not match the provided value. This may be useful to
148    /// prevent changes when the metageneration is known.
149    ///
150    /// # Example
151    /// ```
152    /// # use google_cloud_storage::client::Storage;
153    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
154    /// let response = client
155    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
156    ///     .set_if_metageneration_match(1234)
157    ///     .send_buffered()
158    ///     .await?;
159    /// println!("response details={response:?}");
160    /// # Ok(()) }
161    /// ```
162    ///
163    /// [request precondition]: https://cloud.google.com/storage/docs/request-preconditions
164    pub fn set_if_metageneration_match<V>(mut self, v: V) -> Self
165    where
166        V: Into<i64>,
167    {
168        self.request.spec.if_metageneration_match = Some(v.into());
169        self
170    }
171
172    /// Set a [request precondition] on the object meta-generation.
173    ///
174    /// With this precondition the request fails if the current object metadata
175    /// generation matches the provided value. This is rarely useful in uploads,
176    /// it is more commonly used on reads to prevent a large response if the
177    /// data is already cached.
178    ///
179    /// # Example
180    /// ```
181    /// # use google_cloud_storage::client::Storage;
182    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
183    /// let response = client
184    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
185    ///     .set_if_metageneration_not_match(1234)
186    ///     .send_buffered()
187    ///     .await?;
188    /// println!("response details={response:?}");
189    /// # Ok(()) }
190    /// ```
191    ///
192    /// [request precondition]: https://cloud.google.com/storage/docs/request-preconditions
193    pub fn set_if_metageneration_not_match<V>(mut self, v: V) -> Self
194    where
195        V: Into<i64>,
196    {
197        self.request.spec.if_metageneration_not_match = Some(v.into());
198        self
199    }
200
201    /// Sets the ACL for the new object.
202    ///
203    /// # Example
204    /// ```
205    /// # use google_cloud_storage::client::Storage;
206    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
207    /// # use google_cloud_storage::model::ObjectAccessControl;
208    /// let response = client
209    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
210    ///     .set_acl([ObjectAccessControl::new().set_entity("allAuthenticatedUsers").set_role("READER")])
211    ///     .send_buffered()
212    ///     .await?;
213    /// println!("response details={response:?}");
214    /// # Ok(()) }
215    /// ```
216    pub fn set_acl<I, V>(mut self, v: I) -> Self
217    where
218        I: IntoIterator<Item = V>,
219        V: Into<crate::model::ObjectAccessControl>,
220    {
221        self.mut_resource().acl = v.into_iter().map(|a| a.into()).collect();
222        self
223    }
224
225    /// Sets the [cache control] for the new object.
226    ///
227    /// This can be used to control caching in [public objects].
228    ///
229    /// # Example
230    /// ```
231    /// # use google_cloud_storage::client::Storage;
232    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
233    /// let response = client
234    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
235    ///     .set_cache_control("public; max-age=7200")
236    ///     .send_buffered()
237    ///     .await?;
238    /// println!("response details={response:?}");
239    /// # Ok(()) }
240    /// ```
241    ///
242    /// [public objects]: https://cloud.google.com/storage/docs/access-control/making-data-public
243    /// [cache control]: https://datatracker.ietf.org/doc/html/rfc7234#section-5.2
244    pub fn set_cache_control<V: Into<String>>(mut self, v: V) -> Self {
245        self.mut_resource().cache_control = v.into();
246        self
247    }
248
249    /// Sets the [content disposition] for the new object.
250    ///
251    /// Google Cloud Storage can serve content directly to web browsers. This
252    /// attribute sets the `Content-Disposition` header, which may change how
253    /// the browser displays the contents.
254    ///
255    /// # Example
256    /// ```
257    /// # use google_cloud_storage::client::Storage;
258    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
259    /// let response = client
260    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
261    ///     .set_content_disposition("inline")
262    ///     .send_buffered()
263    ///     .await?;
264    /// println!("response details={response:?}");
265    /// # Ok(()) }
266    /// ```
267    ///
268    /// [content disposition]: https://datatracker.ietf.org/doc/html/rfc6266
269    pub fn set_content_disposition<V: Into<String>>(mut self, v: V) -> Self {
270        self.mut_resource().content_disposition = v.into();
271        self
272    }
273
274    /// Sets the [content encoding] for the object data.
275    ///
276    /// This can be used to upload compressed data and enable [transcoding] of
277    /// the data during reads.
278    ///
279    /// # Example
280    /// ```
281    /// # use google_cloud_storage::client::Storage;
282    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
283    /// use flate2::write::GzEncoder;
284    /// use std::io::Write;
285    /// let mut e = GzEncoder::new(Vec::new(), flate2::Compression::default());
286    /// e.write_all(b"hello world");
287    /// let response = client
288    ///     .write_object("projects/_/buckets/my-bucket", "my-object", bytes::Bytes::from_owner(e.finish()?))
289    ///     .set_content_encoding("gzip")
290    ///     .send_buffered()
291    ///     .await?;
292    /// println!("response details={response:?}");
293    /// # Ok(()) }
294    /// ```
295    ///
296    /// [transcoding]: https://cloud.google.com/storage/docs/transcoding
297    /// [content encoding]: https://datatracker.ietf.org/doc/html/rfc7231#section-3.1.2.2
298    pub fn set_content_encoding<V: Into<String>>(mut self, v: V) -> Self {
299        self.mut_resource().content_encoding = v.into();
300        self
301    }
302
303    /// Sets the [content language] for the new object.
304    ///
305    /// Google Cloud Storage can serve content directly to web browsers. This
306    /// attribute sets the `Content-Language` header, which may change how the
307    /// browser displays the contents.
308    ///
309    /// # Example
310    /// ```
311    /// # use google_cloud_storage::client::Storage;
312    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
313    /// let response = client
314    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
315    ///     .set_content_language("en")
316    ///     .send_buffered()
317    ///     .await?;
318    /// println!("response details={response:?}");
319    /// # Ok(()) }
320    /// ```
321    ///
322    /// [content language]: https://cloud.google.com/storage/docs/metadata#content-language
323    pub fn set_content_language<V: Into<String>>(mut self, v: V) -> Self {
324        self.mut_resource().content_language = v.into();
325        self
326    }
327
328    /// Sets the [content type] for the new object.
329    ///
330    /// Google Cloud Storage can serve content directly to web browsers. This
331    /// attribute sets the `Content-Type` header, which may change how the
332    /// browser interprets the contents.
333    ///
334    /// # Example
335    /// ```
336    /// # use google_cloud_storage::client::Storage;
337    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
338    /// let response = client
339    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
340    ///     .set_content_type("text/plain")
341    ///     .send_buffered()
342    ///     .await?;
343    /// println!("response details={response:?}");
344    /// # Ok(()) }
345    /// ```
346    ///
347    /// [content type]: https://datatracker.ietf.org/doc/html/rfc7231#section-3.1.1.5
348    pub fn set_content_type<V: Into<String>>(mut self, v: V) -> Self {
349        self.mut_resource().content_type = v.into();
350        self
351    }
352
353    /// Sets the [custom time] for the new object.
354    ///
355    /// This field is typically set in order to use the [DaysSinceCustomTime]
356    /// condition in Object Lifecycle Management.
357    ///
358    /// # Example
359    /// ```
360    /// # use google_cloud_storage::client::Storage;
361    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
362    /// let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
363    /// let response = client
364    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
365    ///     .set_custom_time(time)
366    ///     .send_buffered()
367    ///     .await?;
368    /// println!("response details={response:?}");
369    /// # Ok(()) }
370    /// ```
371    ///
372    /// [DaysSinceCustomTime]: https://cloud.google.com/storage/docs/lifecycle#dayssincecustomtime
373    /// [custom time]: https://cloud.google.com/storage/docs/metadata#custom-time
374    pub fn set_custom_time<V: Into<wkt::Timestamp>>(mut self, v: V) -> Self {
375        self.mut_resource().custom_time = Some(v.into());
376        self
377    }
378
379    /// Sets the [event based hold] flag for the new object.
380    ///
381    /// This field is typically set in order to prevent objects from being
382    /// deleted or modified.
383    ///
384    /// # Example
385    /// ```
386    /// # use google_cloud_storage::client::Storage;
387    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
388    /// let response = client
389    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
390    ///     .set_event_based_hold(true)
391    ///     .send_buffered()
392    ///     .await?;
393    /// println!("response details={response:?}");
394    /// # Ok(()) }
395    /// ```
396    ///
397    /// [event based hold]: https://cloud.google.com/storage/docs/object-holds
398    pub fn set_event_based_hold<V: Into<bool>>(mut self, v: V) -> Self {
399        self.mut_resource().event_based_hold = Some(v.into());
400        self
401    }
402
403    /// Sets the [custom metadata] for the new object.
404    ///
405    /// This field is typically set to annotate the object with
406    /// application-specific metadata.
407    ///
408    /// # Example
409    /// ```
410    /// # use google_cloud_storage::client::Storage;
411    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
412    /// let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
413    /// let response = client
414    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
415    ///     .set_metadata([("test-only", "true"), ("environment", "qa")])
416    ///     .send_buffered()
417    ///     .await?;
418    /// println!("response details={response:?}");
419    /// # Ok(()) }
420    /// ```
421    ///
422    /// [custom metadata]: https://cloud.google.com/storage/docs/metadata#custom-metadata
423    pub fn set_metadata<I, K, V>(mut self, i: I) -> Self
424    where
425        I: IntoIterator<Item = (K, V)>,
426        K: Into<String>,
427        V: Into<String>,
428    {
429        self.mut_resource().metadata = i.into_iter().map(|(k, v)| (k.into(), v.into())).collect();
430        self
431    }
432
433    /// Sets the [retention configuration] for the new object.
434    ///
435    /// # Example
436    /// ```
437    /// # use google_cloud_storage::client::Storage;
438    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
439    /// # use google_cloud_storage::model::object::{Retention, retention};
440    /// let response = client
441    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
442    ///     .set_retention(
443    ///         Retention::new()
444    ///             .set_mode(retention::Mode::Locked)
445    ///             .set_retain_until_time(wkt::Timestamp::try_from("2035-01-01T00:00:00Z")?))
446    ///     .send_buffered()
447    ///     .await?;
448    /// println!("response details={response:?}");
449    /// # Ok(()) }
450    /// ```
451    ///
452    /// [retention configuration]: https://cloud.google.com/storage/docs/metadata#retention-config
453    pub fn set_retention<V>(mut self, v: V) -> Self
454    where
455        V: Into<crate::model::object::Retention>,
456    {
457        self.mut_resource().retention = Some(v.into());
458        self
459    }
460
461    /// Sets the [storage class] for the new object.
462    ///
463    /// # Example
464    /// ```
465    /// # use google_cloud_storage::client::Storage;
466    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
467    /// let response = client
468    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
469    ///     .set_storage_class("ARCHIVE")
470    ///     .send_buffered()
471    ///     .await?;
472    /// println!("response details={response:?}");
473    /// # Ok(()) }
474    /// ```
475    ///
476    /// [storage class]: https://cloud.google.com/storage/docs/storage-classes
477    pub fn set_storage_class<V>(mut self, v: V) -> Self
478    where
479        V: Into<String>,
480    {
481        self.mut_resource().storage_class = v.into();
482        self
483    }
484
485    /// Sets the [temporary hold] flag for the new object.
486    ///
487    /// This field is typically set in order to prevent objects from being
488    /// deleted or modified.
489    ///
490    /// # Example
491    /// ```
492    /// # use google_cloud_storage::client::Storage;
493    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
494    /// let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
495    /// let response = client
496    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
497    ///     .set_temporary_hold(true)
498    ///     .send_buffered()
499    ///     .await?;
500    /// println!("response details={response:?}");
501    /// # Ok(()) }
502    /// ```
503    ///
504    /// [temporary hold]: https://cloud.google.com/storage/docs/object-holds
505    pub fn set_temporary_hold<V: Into<bool>>(mut self, v: V) -> Self {
506        self.mut_resource().temporary_hold = v.into();
507        self
508    }
509
510    /// Sets the resource name of the [Customer-managed encryption key] for this
511    /// object.
512    ///
513    /// The service imposes a number of restrictions on the keys used to encrypt
514    /// Google Cloud Storage objects. Read the documentation in full before
515    /// trying to use customer-managed encryption keys. In particular, verify
516    /// the service has the necessary permissions, and the key is in a
517    /// compatible location.
518    ///
519    /// # Example
520    /// ```
521    /// # use google_cloud_storage::client::Storage;
522    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
523    /// let response = client
524    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
525    ///     .set_kms_key("projects/test-project/locations/us-central1/keyRings/test-ring/cryptoKeys/test-key")
526    ///     .send_buffered()
527    ///     .await?;
528    /// println!("response details={response:?}");
529    /// # Ok(()) }
530    /// ```
531    ///
532    /// [Customer-managed encryption key]: https://cloud.google.com/storage/docs/encryption/customer-managed-keys
533    pub fn set_kms_key<V>(mut self, v: V) -> Self
534    where
535        V: Into<String>,
536    {
537        self.mut_resource().kms_key = v.into();
538        self
539    }
540
541    /// Configure this object to use one of the [predefined ACLs].
542    ///
543    /// # Example
544    /// ```
545    /// # use google_cloud_storage::client::Storage;
546    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
547    /// let response = client
548    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
549    ///     .set_predefined_acl("private")
550    ///     .send_buffered()
551    ///     .await?;
552    /// println!("response details={response:?}");
553    /// # Ok(()) }
554    /// ```
555    ///
556    /// [predefined ACLs]: https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
557    pub fn set_predefined_acl<V>(mut self, v: V) -> Self
558    where
559        V: Into<String>,
560    {
561        self.request.spec.predefined_acl = v.into();
562        self
563    }
564
565    /// The encryption key used with the Customer-Supplied Encryption Keys
566    /// feature. In raw bytes format (not base64-encoded).
567    ///
568    /// # Example
569    /// ```
570    /// # use google_cloud_storage::client::Storage;
571    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
572    /// # use google_cloud_storage::model_ext::KeyAes256;
573    /// let key: &[u8] = &[97; 32];
574    /// let response = client
575    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
576    ///     .set_key(KeyAes256::new(key)?)
577    ///     .send_buffered()
578    ///     .await?;
579    /// println!("response details={response:?}");
580    /// # Ok(()) }
581    /// ```
582    pub fn set_key(mut self, v: KeyAes256) -> Self {
583        self.request.params = Some(v.into());
584        self
585    }
586
587    /// Sets the object custom contexts.
588    ///
589    /// # Example
590    /// ```
591    /// # use google_cloud_storage::client::Storage;
592    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
593    /// # use google_cloud_storage::model::{ObjectContexts, ObjectCustomContextPayload};
594    /// let response = client
595    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
596    ///     .set_contexts(
597    ///         ObjectContexts::new().set_custom([
598    ///             ("example", ObjectCustomContextPayload::new().set_value("true")),
599    ///         ])
600    ///     )
601    ///     .send_buffered()
602    ///     .await?;
603    /// println!("response details={response:?}");
604    /// # Ok(()) }
605    /// ```
606    pub fn set_contexts<V>(mut self, v: V) -> Self
607    where
608        V: Into<crate::model::ObjectContexts>,
609    {
610        self.mut_resource().contexts = Some(v.into());
611        self
612    }
613
614    /// Configure the idempotency for this upload.
615    ///
616    /// By default, the client library treats single-shot uploads without
617    /// preconditions, as non-idempotent. If the destination bucket is
618    /// configured with [object versioning] then the operation may succeed
619    /// multiple times with observable side-effects. With object versioning and
620    /// a [lifecycle] policy limiting the number of versions, uploading the same
621    /// data multiple times may result in data loss.
622    ///
623    /// The client library cannot efficiently determine if these conditions
624    /// apply to your upload. If they do, or your application can tolerate
625    /// multiple versions of the same data for other reasons, consider using
626    /// `with_idempotency(true)`.
627    ///
628    /// The client library treats resumable uploads as idempotent, regardless of
629    /// the value in this option. Such uploads can succeed at most once.
630    ///
631    /// # Example
632    /// ```
633    /// # use google_cloud_storage::client::Storage;
634    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
635    /// use std::time::Duration;
636    /// use gax::retry_policy::RetryPolicyExt;
637    /// let response = client
638    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
639    ///     .with_idempotency(true)
640    ///     .send_buffered()
641    ///     .await?;
642    /// println!("response details={response:?}");
643    /// # Ok(()) }
644    /// ```
645    ///
646    /// [lifecycle]: https://cloud.google.com/storage/docs/lifecycle
647    /// [object versioning]: https://cloud.google.com/storage/docs/object-versioning
648    pub fn with_idempotency(mut self, v: bool) -> Self {
649        self.options.idempotency = Some(v);
650        self
651    }
652
653    /// The retry policy used for this request.
654    ///
655    /// # Example
656    /// ```
657    /// # use google_cloud_storage::client::Storage;
658    /// # use google_cloud_storage::retry_policy::RetryableErrors;
659    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
660    /// use std::time::Duration;
661    /// use gax::retry_policy::RetryPolicyExt;
662    /// let response = client
663    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
664    ///     .with_retry_policy(
665    ///         RetryableErrors
666    ///             .with_attempt_limit(5)
667    ///             .with_time_limit(Duration::from_secs(90)),
668    ///     )
669    ///     .send_buffered()
670    ///     .await?;
671    /// println!("response details={response:?}");
672    /// # Ok(()) }
673    /// ```
674    pub fn with_retry_policy<V: Into<gax::retry_policy::RetryPolicyArg>>(mut self, v: V) -> Self {
675        self.options.retry_policy = v.into().into();
676        self
677    }
678
679    /// The backoff policy used for this request.
680    ///
681    /// # Example
682    /// ```
683    /// # use google_cloud_storage::client::Storage;
684    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
685    /// use std::time::Duration;
686    /// use gax::exponential_backoff::ExponentialBackoff;
687    /// let response = client
688    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
689    ///     .with_backoff_policy(ExponentialBackoff::default())
690    ///     .send_buffered()
691    ///     .await?;
692    /// println!("response details={response:?}");
693    /// # Ok(()) }
694    /// ```
695    pub fn with_backoff_policy<V: Into<gax::backoff_policy::BackoffPolicyArg>>(
696        mut self,
697        v: V,
698    ) -> Self {
699        self.options.backoff_policy = v.into().into();
700        self
701    }
702
703    /// The retry throttler used for this request.
704    ///
705    /// Most of the time you want to use the same throttler for all the requests
706    /// in a client, and even the same throttler for many clients. Rarely it
707    /// may be necessary to use an custom throttler for some subset of the
708    /// requests.
709    ///
710    /// # Example
711    /// ```
712    /// # use google_cloud_storage::client::Storage;
713    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
714    /// let response = client
715    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
716    ///     .with_retry_throttler(adhoc_throttler())
717    ///     .send_buffered()
718    ///     .await?;
719    /// println!("response details={response:?}");
720    /// fn adhoc_throttler() -> gax::retry_throttler::SharedRetryThrottler {
721    ///     # panic!();
722    /// }
723    /// # Ok(()) }
724    /// ```
725    pub fn with_retry_throttler<V: Into<gax::retry_throttler::RetryThrottlerArg>>(
726        mut self,
727        v: V,
728    ) -> Self {
729        self.options.retry_throttler = v.into().into();
730        self
731    }
732
733    /// Sets the payload size threshold to switch from single-shot to resumable uploads.
734    ///
735    /// # Example
736    /// ```
737    /// # use google_cloud_storage::client::Storage;
738    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
739    /// let response = client
740    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
741    ///     .with_resumable_upload_threshold(0_usize) // Forces a resumable upload.
742    ///     .send_buffered()
743    ///     .await?;
744    /// println!("response details={response:?}");
745    /// # Ok(()) }
746    /// ```
747    ///
748    /// The client library can perform uploads using [single-shot] or
749    /// [resumable] uploads. For small objects, single-shot uploads offer better
750    /// performance, as they require a single HTTP transfer. For larger objects,
751    /// the additional request latency is not significant, and resumable uploads
752    /// offer better recovery on errors.
753    ///
754    /// The library automatically selects resumable uploads when the payload is
755    /// equal to or larger than this option. For smaller uploads the client
756    /// library uses single-shot uploads.
757    ///
758    /// The exact threshold depends on where the application is deployed and
759    /// destination bucket location with respect to where the application is
760    /// running. The library defaults should work well in most cases, but some
761    /// applications may benefit from fine-tuning.
762    ///
763    /// [single-shot]: https://cloud.google.com/storage/docs/uploading-objects
764    /// [resumable]: https://cloud.google.com/storage/docs/resumable-uploads
765    pub fn with_resumable_upload_threshold<V: Into<usize>>(mut self, v: V) -> Self {
766        self.options.set_resumable_upload_threshold(v.into());
767        self
768    }
769
770    /// Changes the buffer size for some resumable uploads.
771    ///
772    /// # Example
773    /// ```
774    /// # use google_cloud_storage::client::Storage;
775    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
776    /// let response = client
777    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
778    ///     .with_resumable_upload_buffer_size(32 * 1024 * 1024_usize)
779    ///     .send_buffered()
780    ///     .await?;
781    /// println!("response details={response:?}");
782    /// # Ok(()) }
783    /// ```
784    ///
785    /// When performing [resumable uploads] from sources without [Seek] the
786    /// client library needs to buffer data in memory until it is persisted by
787    /// the service. Otherwise the data would be lost if the upload fails.
788    /// Applications may want to tune this buffer size:
789    ///
790    /// - Use smaller buffer sizes to support more concurrent uploads in the
791    ///   same application.
792    /// - Use larger buffer sizes for better throughput. Sending many small
793    ///   buffers stalls the upload until the client receives a successful
794    ///   response from the service.
795    ///
796    /// Keep in mind that there are diminishing returns on using larger buffers.
797    ///
798    /// [resumable uploads]: https://cloud.google.com/storage/docs/resumable-uploads
799    /// [Seek]: crate::streaming_source::Seek
800    pub fn with_resumable_upload_buffer_size<V: Into<usize>>(mut self, v: V) -> Self {
801        self.options.set_resumable_upload_buffer_size(v.into());
802        self
803    }
804
805    fn mut_resource(&mut self) -> &mut crate::model::Object {
806        self.request
807            .spec
808            .resource
809            .as_mut()
810            .expect("resource field initialized in `new()`")
811    }
812
813    fn set_crc32c<V: Into<u32>>(mut self, v: V) -> Self {
814        let checksum = self.mut_resource().checksums.get_or_insert_default();
815        checksum.crc32c = Some(v.into());
816        self
817    }
818
819    pub fn set_md5_hash<I, V>(mut self, i: I) -> Self
820    where
821        I: IntoIterator<Item = V>,
822        V: Into<u8>,
823    {
824        let checksum = self.mut_resource().checksums.get_or_insert_default();
825        checksum.md5_hash = i.into_iter().map(|v| v.into()).collect();
826        self
827    }
828
829    /// Provide a precomputed value for the CRC32C checksum.
830    ///
831    /// # Example
832    /// ```
833    /// # use google_cloud_storage::client::Storage;
834    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
835    /// use crc32c::crc32c;
836    /// let response = client
837    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
838    ///     .with_known_crc32c(crc32c(b"hello world"))
839    ///     .send_buffered()
840    ///     .await?;
841    /// println!("response details={response:?}");
842    /// # Ok(()) }
843    /// ```
844    ///
845    /// In some applications, the payload's CRC32C checksum is already known.
846    /// For example, the application may be reading the data from another blob
847    /// storage system.
848    ///
849    /// In such cases, it is safer to pass the known CRC32C of the payload to
850    /// [Cloud Storage], and more efficient to skip the computation in the
851    /// client library.
852    ///
853    /// Note that once you provide a CRC32C value to this builder you cannot
854    /// use [compute_md5()] to also have the library compute the checksums.
855    ///
856    /// [compute_md5()]: WriteObject::compute_md5
857    pub fn with_known_crc32c<V: Into<u32>>(self, v: V) -> Self {
858        let mut this = self;
859        this.options.checksum.crc32c = None;
860        this.set_crc32c(v)
861    }
862
863    /// Provide a precomputed value for the MD5 hash.
864    ///
865    /// # Example
866    /// ```
867    /// # use google_cloud_storage::client::Storage;
868    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
869    /// use md5::compute;
870    /// let hash = md5::compute(b"hello world");
871    /// let response = client
872    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
873    ///     .with_known_md5_hash(bytes::Bytes::from_owner(hash.0))
874    ///     .send_buffered()
875    ///     .await?;
876    /// println!("response details={response:?}");
877    /// # Ok(()) }
878    /// ```
879    ///
880    /// In some applications, the payload's MD5 hash is already known. For
881    /// example, the application may be reading the data from another blob
882    /// storage system.
883    ///
884    /// In such cases, it is safer to pass the known MD5 of the payload to
885    /// [Cloud Storage], and more efficient to skip the computation in the
886    /// client library.
887    ///
888    /// Note that once you provide a MD5 value to this builder you cannot
889    /// use [compute_md5()] to also have the library compute the checksums.
890    ///
891    /// [compute_md5()]: WriteObject::compute_md5
892    pub fn with_known_md5_hash<I, V>(self, i: I) -> Self
893    where
894        I: IntoIterator<Item = V>,
895        V: Into<u8>,
896    {
897        let mut this = self;
898        this.options.checksum.md5_hash = None;
899        this.set_md5_hash(i)
900    }
901
902    /// Enables computation of MD5 hashes.
903    ///
904    /// # Example
905    /// ```
906    /// # use google_cloud_storage::client::Storage;
907    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
908    /// let payload = tokio::fs::File::open("my-data").await?;
909    /// let response = client
910    ///     .write_object("projects/_/buckets/my-bucket", "my-object", payload)
911    ///     .compute_md5()
912    ///     .send_buffered()
913    ///     .await?;
914    /// println!("response details={response:?}");
915    /// # Ok(()) }
916    /// ```
917    ///
918    /// See [precompute_checksums][WriteObject::precompute_checksums] for more
919    /// details on how checksums are used by the client library and their
920    /// limitations.
921    pub fn compute_md5(self) -> Self {
922        let mut this = self;
923        this.options.checksum.md5_hash = Some(Md5::default());
924        this
925    }
926
927    pub(crate) fn new<B, O, P>(
928        stub: std::sync::Arc<S>,
929        bucket: B,
930        object: O,
931        payload: P,
932        options: RequestOptions,
933    ) -> Self
934    where
935        B: Into<String>,
936        O: Into<String>,
937        P: Into<Payload<T>>,
938    {
939        let resource = crate::model::Object::new()
940            .set_bucket(bucket)
941            .set_name(object);
942        WriteObject {
943            stub,
944            request: crate::model_ext::WriteObjectRequest {
945                spec: crate::model::WriteObjectSpec::new().set_resource(resource),
946                params: None,
947            },
948            payload: payload.into(),
949            options,
950        }
951    }
952}
953
954impl<T, S> WriteObject<T, S>
955where
956    T: StreamingSource + Seek + Send + Sync + 'static,
957    <T as StreamingSource>::Error: std::error::Error + Send + Sync + 'static,
958    <T as Seek>::Error: std::error::Error + Send + Sync + 'static,
959    S: crate::storage::stub::Storage + 'static,
960{
961    /// A simple upload from a buffer.
962    ///
963    /// # Example
964    /// ```
965    /// # use google_cloud_storage::client::Storage;
966    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
967    /// let response = client
968    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
969    ///     .send_unbuffered()
970    ///     .await?;
971    /// println!("response details={response:?}");
972    /// # Ok(()) }
973    /// ```
974    pub async fn send_unbuffered(self) -> Result<Object> {
975        self.stub
976            .write_object_unbuffered(self.payload, self.request, self.options)
977            .await
978    }
979
980    /// Precompute the payload checksums before uploading the data.
981    ///
982    /// If the checksums are known when the upload starts, the client library
983    /// can include the checksums with the upload request, and the service can
984    /// reject the upload if the payload and the checksums do not match.
985    ///
986    /// # Example
987    /// ```
988    /// # use google_cloud_storage::client::Storage;
989    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
990    /// let payload = tokio::fs::File::open("my-data").await?;
991    /// let response = client
992    ///     .write_object("projects/_/buckets/my-bucket", "my-object", payload)
993    ///     .precompute_checksums()
994    ///     .await?
995    ///     .send_unbuffered()
996    ///     .await?;
997    /// println!("response details={response:?}");
998    /// # Ok(()) }
999    /// ```
1000    ///
1001    /// Precomputing the checksums can be expensive if the data source is slow
1002    /// to read. Therefore, the client library does not precompute the checksums
1003    /// by default. The client library compares the checksums computed by the
1004    /// service against its own checksums. If they do not match, the client
1005    /// library returns an error. However, the service has already created the
1006    /// object with the (likely incorrect) data.
1007    ///
1008    /// The client library currently uses the [JSON API], it is not possible to
1009    /// send the checksums at the end of the upload with this API.
1010    ///
1011    /// [JSON API]: https://cloud.google.com/storage/docs/json_api
1012    pub async fn precompute_checksums(mut self) -> Result<Self> {
1013        let mut offset = 0_u64;
1014        self.payload.seek(offset).await.map_err(Error::ser)?;
1015        while let Some(n) = self.payload.next().await.transpose().map_err(Error::ser)? {
1016            self.options.checksum.update(offset, &n);
1017            offset += n.len() as u64;
1018        }
1019        self.payload.seek(0_u64).await.map_err(Error::ser)?;
1020        let computed = self.options.checksum.finalize();
1021        let current = self.mut_resource().checksums.get_or_insert_default();
1022        checksum_update(current, computed);
1023        self.options.checksum = Checksum {
1024            crc32c: None,
1025            md5_hash: None,
1026        };
1027        Ok(self)
1028    }
1029}
1030
1031impl<T, S> WriteObject<T, S>
1032where
1033    T: StreamingSource + Send + Sync + 'static,
1034    T::Error: std::error::Error + Send + Sync + 'static,
1035    S: crate::storage::stub::Storage + 'static,
1036{
1037    /// Upload an object from a streaming source without rewinds.
1038    ///
1039    /// If the data source does **not** implement [Seek] the client library must
1040    /// buffer data sent to the service until the service confirms it has
1041    /// persisted the data. This requires more memory in the client, and when
1042    /// the buffer grows too large, may require stalling the writer until the
1043    /// service can persist the data.
1044    ///
1045    /// Use this function for data sources where it is expensive or impossible
1046    /// to restart the data source. This function is also useful when it is hard
1047    /// or impossible to predict the number of bytes emitted by a stream, even
1048    /// if restarting the stream is not too expensive.
1049    ///
1050    /// # Example
1051    /// ```
1052    /// # use google_cloud_storage::client::Storage;
1053    /// # async fn sample(client: &Storage) -> anyhow::Result<()> {
1054    /// let response = client
1055    ///     .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
1056    ///     .send_buffered()
1057    ///     .await?;
1058    /// println!("response details={response:?}");
1059    /// # Ok(()) }
1060    /// ```
1061    pub async fn send_buffered(self) -> crate::Result<Object> {
1062        self.stub
1063            .write_object_buffered(self.payload, self.request, self.options)
1064            .await
1065    }
1066}
1067
1068// We need `Debug` to use `expect_err()` in `Result<WriteObject, ...>`.
1069impl<T, S> std::fmt::Debug for WriteObject<T, S>
1070where
1071    S: crate::storage::stub::Storage + 'static,
1072{
1073    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1074        f.debug_struct("WriteObject")
1075            .field("stub", &self.stub)
1076            .field("request", &self.request)
1077            // skip payload, as it is not `Debug`
1078            .field("options", &self.options)
1079            .finish()
1080    }
1081}
1082
1083#[cfg(test)]
1084mod tests {
1085    use super::client::tests::{test_builder, test_inner_client};
1086    use super::*;
1087    use crate::client::Storage;
1088    use crate::model::{
1089        CommonObjectRequestParams, ObjectChecksums, ObjectContexts, ObjectCustomContextPayload,
1090        WriteObjectSpec,
1091    };
1092    use crate::storage::checksum::details::{Crc32c, Md5};
1093    use crate::streaming_source::tests::MockSeekSource;
1094    use auth::credentials::anonymous::Builder as Anonymous;
1095    use std::error::Error as _;
1096    use std::io::{Error as IoError, ErrorKind};
1097
1098    type Result = anyhow::Result<()>;
1099
1100    // Verify `write_object()` can be used with a source that implements
1101    // `StreamingSource` **and** `Seek`
1102    #[tokio::test]
1103    async fn test_upload_streaming_source_and_seek() -> Result {
1104        struct Source;
1105        impl crate::streaming_source::StreamingSource for Source {
1106            type Error = std::io::Error;
1107            async fn next(&mut self) -> Option<std::result::Result<bytes::Bytes, Self::Error>> {
1108                None
1109            }
1110        }
1111        impl crate::streaming_source::Seek for Source {
1112            type Error = std::io::Error;
1113            async fn seek(&mut self, _offset: u64) -> std::result::Result<(), Self::Error> {
1114                Ok(())
1115            }
1116        }
1117
1118        let client = Storage::builder()
1119            .with_credentials(Anonymous::new().build())
1120            .build()
1121            .await?;
1122        let _ = client.write_object("projects/_/buckets/test-bucket", "test-object", Source);
1123        Ok(())
1124    }
1125
1126    // Verify `write_object()` can be used with a source that **only**
1127    // implements `StreamingSource`.
1128    #[tokio::test]
1129    async fn test_upload_only_streaming_source() -> Result {
1130        struct Source;
1131        impl crate::streaming_source::StreamingSource for Source {
1132            type Error = std::io::Error;
1133            async fn next(&mut self) -> Option<std::result::Result<bytes::Bytes, Self::Error>> {
1134                None
1135            }
1136        }
1137
1138        let client = Storage::builder()
1139            .with_credentials(Anonymous::new().build())
1140            .build()
1141            .await?;
1142        let _ = client.write_object("projects/_/buckets/test-bucket", "test-object", Source);
1143        Ok(())
1144    }
1145
1146    // Verify `write_object()` meets normal Send, Sync, requirements.
1147    #[tokio::test]
1148    async fn test_upload_is_send_and_static() -> Result {
1149        let client = Storage::builder()
1150            .with_credentials(Anonymous::new().build())
1151            .build()
1152            .await?;
1153
1154        fn need_send<T: Send>(_val: &T) {}
1155        fn need_sync<T: Sync>(_val: &T) {}
1156        fn need_static<T: 'static>(_val: &T) {}
1157
1158        let upload = client.write_object("projects/_/buckets/test-bucket", "test-object", "");
1159        need_send(&upload);
1160        need_sync(&upload);
1161        need_static(&upload);
1162
1163        let upload = client
1164            .write_object("projects/_/buckets/test-bucket", "test-object", "")
1165            .send_unbuffered();
1166        need_send(&upload);
1167        need_static(&upload);
1168
1169        let upload = client
1170            .write_object("projects/_/buckets/test-bucket", "test-object", "")
1171            .send_buffered();
1172        need_send(&upload);
1173        need_static(&upload);
1174
1175        Ok(())
1176    }
1177
1178    #[tokio::test]
1179    async fn write_object_metadata() -> Result {
1180        use crate::model::ObjectAccessControl;
1181        let inner = test_inner_client(test_builder()).await;
1182        let options = inner.options.clone();
1183        let stub = crate::storage::transport::Storage::new(inner);
1184        let key = KeyAes256::new(&[0x42; 32]).expect("hard-coded key is not an error");
1185        let mut builder =
1186            WriteObject::new(stub, "projects/_/buckets/bucket", "object", "", options)
1187                .set_if_generation_match(10)
1188                .set_if_generation_not_match(20)
1189                .set_if_metageneration_match(30)
1190                .set_if_metageneration_not_match(40)
1191                .set_predefined_acl("private")
1192                .set_acl([ObjectAccessControl::new()
1193                    .set_entity("allAuthenticatedUsers")
1194                    .set_role("READER")])
1195                .set_cache_control("public; max-age=7200")
1196                .set_content_disposition("inline")
1197                .set_content_encoding("gzip")
1198                .set_content_language("en")
1199                .set_content_type("text/plain")
1200                .set_contexts(ObjectContexts::new().set_custom([(
1201                    "context-key",
1202                    ObjectCustomContextPayload::new().set_value("context-value"),
1203                )]))
1204                .set_custom_time(wkt::Timestamp::try_from("2025-07-07T18:11:00Z")?)
1205                .set_event_based_hold(true)
1206                .set_key(key.clone())
1207                .set_metadata([("k0", "v0"), ("k1", "v1")])
1208                .set_retention(
1209                    crate::model::object::Retention::new()
1210                        .set_mode(crate::model::object::retention::Mode::Locked)
1211                        .set_retain_until_time(wkt::Timestamp::try_from("2035-07-07T18:14:00Z")?),
1212                )
1213                .set_storage_class("ARCHIVE")
1214                .set_temporary_hold(true)
1215                .set_kms_key("test-key")
1216                .with_known_crc32c(crc32c::crc32c(b""))
1217                .with_known_md5_hash(md5::compute(b"").0);
1218
1219        let resource = builder.request.spec.resource.take().unwrap();
1220        let builder = builder;
1221        assert_eq!(
1222            &builder.request.spec,
1223            &WriteObjectSpec::new()
1224                .set_if_generation_match(10)
1225                .set_if_generation_not_match(20)
1226                .set_if_metageneration_match(30)
1227                .set_if_metageneration_not_match(40)
1228                .set_predefined_acl("private")
1229        );
1230
1231        assert_eq!(
1232            &builder.request.params,
1233            &Some(CommonObjectRequestParams::from(key))
1234        );
1235
1236        assert_eq!(
1237            resource,
1238            Object::new()
1239                .set_name("object")
1240                .set_bucket("projects/_/buckets/bucket")
1241                .set_acl([ObjectAccessControl::new()
1242                    .set_entity("allAuthenticatedUsers")
1243                    .set_role("READER")])
1244                .set_cache_control("public; max-age=7200")
1245                .set_content_disposition("inline")
1246                .set_content_encoding("gzip")
1247                .set_content_language("en")
1248                .set_content_type("text/plain")
1249                .set_contexts(ObjectContexts::new().set_custom([(
1250                    "context-key",
1251                    ObjectCustomContextPayload::new().set_value("context-value"),
1252                )]))
1253                .set_checksums(
1254                    crate::model::ObjectChecksums::new()
1255                        .set_crc32c(crc32c::crc32c(b""))
1256                        .set_md5_hash(bytes::Bytes::from_iter(md5::compute(b"").0))
1257                )
1258                .set_custom_time(wkt::Timestamp::try_from("2025-07-07T18:11:00Z")?)
1259                .set_event_based_hold(true)
1260                .set_metadata([("k0", "v0"), ("k1", "v1")])
1261                .set_retention(
1262                    crate::model::object::Retention::new()
1263                        .set_mode("LOCKED")
1264                        .set_retain_until_time(wkt::Timestamp::try_from("2035-07-07T18:14:00Z")?)
1265                )
1266                .set_storage_class("ARCHIVE")
1267                .set_temporary_hold(true)
1268                .set_kms_key("test-key")
1269        );
1270
1271        Ok(())
1272    }
1273
1274    #[tokio::test]
1275    async fn upload_object_options() {
1276        let inner = test_inner_client(
1277            test_builder()
1278                .with_resumable_upload_threshold(123_usize)
1279                .with_resumable_upload_buffer_size(234_usize),
1280        )
1281        .await;
1282        let options = inner.options.clone();
1283        let stub = crate::storage::transport::Storage::new(inner);
1284        let request = WriteObject::new(
1285            stub.clone(),
1286            "projects/_/buckets/bucket",
1287            "object",
1288            "",
1289            options.clone(),
1290        );
1291        assert_eq!(request.options.resumable_upload_threshold(), 123);
1292        assert_eq!(request.options.resumable_upload_buffer_size(), 234);
1293
1294        let request = WriteObject::new(stub, "projects/_/buckets/bucket", "object", "", options)
1295            .with_resumable_upload_threshold(345_usize)
1296            .with_resumable_upload_buffer_size(456_usize);
1297        assert_eq!(request.options.resumable_upload_threshold(), 345);
1298        assert_eq!(request.options.resumable_upload_buffer_size(), 456);
1299    }
1300
1301    const QUICK: &str = "the quick brown fox jumps over the lazy dog";
1302    const VEXING: &str = "how vexingly quick daft zebras jump";
1303
1304    fn quick_checksum(mut engine: Checksum) -> ObjectChecksums {
1305        engine.update(0, &bytes::Bytes::from_static(QUICK.as_bytes()));
1306        engine.finalize()
1307    }
1308
1309    async fn collect<S: StreamingSource>(mut stream: S) -> anyhow::Result<Vec<u8>> {
1310        let mut collected = Vec::new();
1311        while let Some(b) = stream.next().await.transpose()? {
1312            collected.extend_from_slice(&b);
1313        }
1314        Ok(collected)
1315    }
1316
1317    #[tokio::test]
1318    async fn checksum_default() -> Result {
1319        let client = test_builder().build().await?;
1320        let upload = client
1321            .write_object("my-bucket", "my-object", QUICK)
1322            .precompute_checksums()
1323            .await?;
1324        let want = quick_checksum(Checksum {
1325            crc32c: Some(Crc32c::default()),
1326            md5_hash: None,
1327        });
1328        assert_eq!(
1329            upload.request.spec.resource.and_then(|r| r.checksums),
1330            Some(want)
1331        );
1332        let collected = collect(upload.payload).await?;
1333        assert_eq!(collected, QUICK.as_bytes());
1334        Ok(())
1335    }
1336
1337    #[tokio::test]
1338    async fn checksum_md5_and_crc32c() -> Result {
1339        let client = test_builder().build().await?;
1340        let upload = client
1341            .write_object("my-bucket", "my-object", QUICK)
1342            .compute_md5()
1343            .precompute_checksums()
1344            .await?;
1345        let want = quick_checksum(Checksum {
1346            crc32c: Some(Crc32c::default()),
1347            md5_hash: Some(Md5::default()),
1348        });
1349        assert_eq!(
1350            upload.request.spec.resource.and_then(|r| r.checksums),
1351            Some(want)
1352        );
1353        Ok(())
1354    }
1355
1356    #[tokio::test]
1357    async fn checksum_precomputed() -> Result {
1358        let mut engine = Checksum {
1359            crc32c: Some(Crc32c::default()),
1360            md5_hash: Some(Md5::default()),
1361        };
1362        engine.update(0, &bytes::Bytes::from_static(VEXING.as_bytes()));
1363        let ck = engine.finalize();
1364
1365        let client = test_builder().build().await?;
1366        let upload = client
1367            .write_object("my-bucket", "my-object", QUICK)
1368            .with_known_crc32c(ck.crc32c.unwrap())
1369            .with_known_md5_hash(ck.md5_hash.clone())
1370            .precompute_checksums()
1371            .await?;
1372        // Note that the checksums do not match the data. This is intentional,
1373        // we are trying to verify that whatever is provided in with_crc32c()
1374        // and with_md5() is respected.
1375        assert_eq!(
1376            upload.request.spec.resource.and_then(|r| r.checksums),
1377            Some(ck)
1378        );
1379
1380        Ok(())
1381    }
1382
1383    #[tokio::test]
1384    async fn checksum_crc32c_known_md5_computed() -> Result {
1385        let mut engine = Checksum {
1386            crc32c: Some(Crc32c::default()),
1387            md5_hash: Some(Md5::default()),
1388        };
1389        engine.update(0, &bytes::Bytes::from_static(VEXING.as_bytes()));
1390        let ck = engine.finalize();
1391
1392        let client = test_builder().build().await?;
1393        let upload = client
1394            .write_object("my-bucket", "my-object", QUICK)
1395            .compute_md5()
1396            .with_known_crc32c(ck.crc32c.unwrap())
1397            .precompute_checksums()
1398            .await?;
1399        // Note that the checksums do not match the data. This is intentional,
1400        // we are trying to verify that whatever is provided in with_known*()
1401        // is respected.
1402        let want = quick_checksum(Checksum {
1403            crc32c: None,
1404            md5_hash: Some(Md5::default()),
1405        })
1406        .set_crc32c(ck.crc32c.unwrap());
1407        assert_eq!(
1408            upload.request.spec.resource.and_then(|r| r.checksums),
1409            Some(want)
1410        );
1411
1412        Ok(())
1413    }
1414
1415    #[tokio::test]
1416    async fn checksum_mixed_then_precomputed() -> Result {
1417        let mut engine = Checksum {
1418            crc32c: Some(Crc32c::default()),
1419            md5_hash: Some(Md5::default()),
1420        };
1421        engine.update(0, &bytes::Bytes::from_static(VEXING.as_bytes()));
1422        let ck = engine.finalize();
1423
1424        let client = test_builder().build().await?;
1425        let upload = client
1426            .write_object("my-bucket", "my-object", QUICK)
1427            .with_known_md5_hash(ck.md5_hash.clone())
1428            .with_known_crc32c(ck.crc32c.unwrap())
1429            .precompute_checksums()
1430            .await?;
1431        // Note that the checksums do not match the data. This is intentional,
1432        // we are trying to verify that whatever is provided in with_known*()
1433        // is respected.
1434        let want = ck.clone();
1435        assert_eq!(
1436            upload.request.spec.resource.and_then(|r| r.checksums),
1437            Some(want)
1438        );
1439
1440        Ok(())
1441    }
1442
1443    #[tokio::test]
1444    async fn checksum_full_computed_then_md5_precomputed() -> Result {
1445        let mut engine = Checksum {
1446            crc32c: Some(Crc32c::default()),
1447            md5_hash: Some(Md5::default()),
1448        };
1449        engine.update(0, &bytes::Bytes::from_static(VEXING.as_bytes()));
1450        let ck = engine.finalize();
1451
1452        let client = test_builder().build().await?;
1453        let upload = client
1454            .write_object("my-bucket", "my-object", QUICK)
1455            .compute_md5()
1456            .with_known_md5_hash(ck.md5_hash.clone())
1457            .precompute_checksums()
1458            .await?;
1459        // Note that the checksums do not match the data. This is intentional,
1460        // we are trying to verify that whatever is provided in with_known*()
1461        // is respected.
1462        let want = quick_checksum(Checksum {
1463            crc32c: Some(Crc32c::default()),
1464            md5_hash: None,
1465        })
1466        .set_md5_hash(ck.md5_hash.clone());
1467        assert_eq!(
1468            upload.request.spec.resource.and_then(|r| r.checksums),
1469            Some(want)
1470        );
1471
1472        Ok(())
1473    }
1474
1475    #[tokio::test]
1476    async fn checksum_known_crc32_then_computed_md5() -> Result {
1477        let mut engine = Checksum {
1478            crc32c: Some(Crc32c::default()),
1479            md5_hash: Some(Md5::default()),
1480        };
1481        engine.update(0, &bytes::Bytes::from_static(VEXING.as_bytes()));
1482        let ck = engine.finalize();
1483
1484        let client = test_builder().build().await?;
1485        let upload = client
1486            .write_object("my-bucket", "my-object", QUICK)
1487            .with_known_crc32c(ck.crc32c.unwrap())
1488            .compute_md5()
1489            .with_known_md5_hash(ck.md5_hash.clone())
1490            .precompute_checksums()
1491            .await?;
1492        // Note that the checksums do not match the data. This is intentional,
1493        // we are trying to verify that whatever is provided in with_known*()
1494        // is respected.
1495        let want = ck.clone();
1496        assert_eq!(
1497            upload.request.spec.resource.and_then(|r| r.checksums),
1498            Some(want)
1499        );
1500
1501        Ok(())
1502    }
1503
1504    #[tokio::test]
1505    async fn checksum_known_crc32_then_known_md5() -> Result {
1506        let mut engine = Checksum {
1507            crc32c: Some(Crc32c::default()),
1508            md5_hash: Some(Md5::default()),
1509        };
1510        engine.update(0, &bytes::Bytes::from_static(VEXING.as_bytes()));
1511        let ck = engine.finalize();
1512
1513        let client = test_builder().build().await?;
1514        let upload = client
1515            .write_object("my-bucket", "my-object", QUICK)
1516            .with_known_crc32c(ck.crc32c.unwrap())
1517            .with_known_md5_hash(ck.md5_hash.clone())
1518            .precompute_checksums()
1519            .await?;
1520        // Note that the checksums do not match the data. This is intentional,
1521        // we are trying to verify that whatever is provided in with_known*()
1522        // is respected.
1523        let want = ck.clone();
1524        assert_eq!(
1525            upload.request.spec.resource.and_then(|r| r.checksums),
1526            Some(want)
1527        );
1528
1529        Ok(())
1530    }
1531
1532    #[tokio::test]
1533    async fn precompute_checksums_seek_error() -> Result {
1534        let mut source = MockSeekSource::new();
1535        source
1536            .expect_seek()
1537            .once()
1538            .returning(|_| Err(IoError::new(ErrorKind::Deadlock, "test-only")));
1539
1540        let client = test_builder().build().await?;
1541        let err = client
1542            .write_object("my-bucket", "my-object", source)
1543            .precompute_checksums()
1544            .await
1545            .expect_err("seek() returns an error");
1546        assert!(err.is_serialization(), "{err:?}");
1547        assert!(
1548            err.source()
1549                .and_then(|e| e.downcast_ref::<IoError>())
1550                .is_some(),
1551            "{err:?}"
1552        );
1553
1554        Ok(())
1555    }
1556
1557    #[tokio::test]
1558    async fn precompute_checksums_next_error() -> Result {
1559        let mut source = MockSeekSource::new();
1560        source.expect_seek().returning(|_| Ok(()));
1561        let mut seq = mockall::Sequence::new();
1562        source
1563            .expect_next()
1564            .times(3)
1565            .in_sequence(&mut seq)
1566            .returning(|| Some(Ok(bytes::Bytes::new())));
1567        source
1568            .expect_next()
1569            .once()
1570            .in_sequence(&mut seq)
1571            .returning(|| Some(Err(IoError::new(ErrorKind::BrokenPipe, "test-only"))));
1572
1573        let client = test_builder().build().await?;
1574        let err = client
1575            .write_object("my-bucket", "my-object", source)
1576            .precompute_checksums()
1577            .await
1578            .expect_err("seek() returns an error");
1579        assert!(err.is_serialization(), "{err:?}");
1580        assert!(
1581            err.source()
1582                .and_then(|e| e.downcast_ref::<IoError>())
1583                .is_some(),
1584            "{err:?}"
1585        );
1586
1587        Ok(())
1588    }
1589
1590    #[tokio::test]
1591    async fn debug() -> Result {
1592        let client = test_builder().build().await?;
1593        let upload = client
1594            .write_object("my-bucket", "my-object", "")
1595            .precompute_checksums()
1596            .await;
1597
1598        let fmt = format!("{upload:?}");
1599        ["WriteObject", "inner", "spec", "options", "checksum"]
1600            .into_iter()
1601            .for_each(|text| {
1602                assert!(fmt.contains(text), "expected {text} in {fmt}");
1603            });
1604        Ok(())
1605    }
1606}