WriteObject

Struct WriteObject 

Source
pub struct WriteObject<T, S = Storage>
where S: Storage + 'static,
{ /* private fields */ }
Expand description

A request builder for object writes.

§Example: hello world

use google_cloud_storage::client::Storage;
async fn sample(client: &Storage) -> anyhow::Result<()> {
    let response = client
        .write_object("projects/_/buckets/my-bucket", "hello", "Hello World!")
        .send_unbuffered()
        .await?;
    println!("response details={response:?}");
    Ok(())
}

§Example: upload a file

use google_cloud_storage::client::Storage;
async fn sample(client: &Storage) -> anyhow::Result<()> {
    let payload = tokio::fs::File::open("my-data").await?;
    let response = client
        .write_object("projects/_/buckets/my-bucket", "my-object", payload)
        .send_unbuffered()
        .await?;
    println!("response details={response:?}");
    Ok(())
}

§Example: create a new object from a custom data source

use google_cloud_storage::{client::Storage, streaming_source::StreamingSource};
struct DataSource;
impl StreamingSource for DataSource {
    type Error = std::io::Error;
    async fn next(&mut self) -> Option<Result<bytes::Bytes, Self::Error>> {
    }
}

async fn sample(client: &Storage) -> anyhow::Result<()> {
    let response = client
        .write_object("projects/_/buckets/my-bucket", "my-object", DataSource)
        .send_buffered()
        .await?;
    println!("response details={response:?}");
    Ok(())
}

Implementations§

Source§

impl<T, S> WriteObject<T, S>
where S: Storage + 'static,

Source

pub fn set_if_generation_match<V>(self, v: V) -> Self
where V: Into<i64>,

Set a request precondition on the object generation to match.

With this precondition the request fails if the current object generation matches the provided value. A common value is 0, which prevents writes from succeeding if the object already exists.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_if_generation_match(0)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_if_generation_not_match<V>(self, v: V) -> Self
where V: Into<i64>,

Set a request precondition on the object generation to match.

With this precondition the request fails if the current object generation does not match the provided value.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_if_generation_not_match(0)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_if_metageneration_match<V>(self, v: V) -> Self
where V: Into<i64>,

Set a request precondition on the object meta generation.

With this precondition the request fails if the current object metadata generation does not match the provided value. This may be useful to prevent changes when the metageneration is known.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_if_metageneration_match(1234)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_if_metageneration_not_match<V>(self, v: V) -> Self
where V: Into<i64>,

Set a request precondition on the object meta-generation.

With this precondition the request fails if the current object metadata generation matches the provided value. This is rarely useful in uploads, it is more commonly used on reads to prevent a large response if the data is already cached.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_if_metageneration_not_match(1234)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_acl<I, V>(self, v: I) -> Self
where I: IntoIterator<Item = V>, V: Into<ObjectAccessControl>,

Sets the ACL for the new object.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_acl([ObjectAccessControl::new().set_entity("allAuthenticatedUsers").set_role("READER")])
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_cache_control<V: Into<String>>(self, v: V) -> Self

Sets the cache control for the new object.

This can be used to control caching in public objects.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_cache_control("public; max-age=7200")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_content_disposition<V: Into<String>>(self, v: V) -> Self

Sets the content disposition for the new object.

Google Cloud Storage can serve content directly to web browsers. This attribute sets the Content-Disposition header, which may change how the browser displays the contents.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_content_disposition("inline")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_content_encoding<V: Into<String>>(self, v: V) -> Self

Sets the content encoding for the object data.

This can be used to upload compressed data and enable transcoding of the data during reads.

§Example
use flate2::write::GzEncoder;
use std::io::Write;
let mut e = GzEncoder::new(Vec::new(), flate2::Compression::default());
e.write_all(b"hello world");
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", bytes::Bytes::from_owner(e.finish()?))
    .set_content_encoding("gzip")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_content_language<V: Into<String>>(self, v: V) -> Self

Sets the content language for the new object.

Google Cloud Storage can serve content directly to web browsers. This attribute sets the Content-Language header, which may change how the browser displays the contents.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_content_language("en")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_content_type<V: Into<String>>(self, v: V) -> Self

Sets the content type for the new object.

Google Cloud Storage can serve content directly to web browsers. This attribute sets the Content-Type header, which may change how the browser interprets the contents.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_content_type("text/plain")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_custom_time<V: Into<Timestamp>>(self, v: V) -> Self

Sets the custom time for the new object.

This field is typically set in order to use the DaysSinceCustomTime condition in Object Lifecycle Management.

§Example
let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_custom_time(time)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_event_based_hold<V: Into<bool>>(self, v: V) -> Self

Sets the event based hold flag for the new object.

This field is typically set in order to prevent objects from being deleted or modified.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_event_based_hold(true)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_metadata<I, K, V>(self, i: I) -> Self
where I: IntoIterator<Item = (K, V)>, K: Into<String>, V: Into<String>,

Sets the custom metadata for the new object.

This field is typically set to annotate the object with application-specific metadata.

§Example
let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_metadata([("test-only", "true"), ("environment", "qa")])
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_retention<V>(self, v: V) -> Self
where V: Into<Retention>,

Sets the retention configuration for the new object.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_retention(
        Retention::new()
            .set_mode(retention::Mode::Locked)
            .set_retain_until_time(wkt::Timestamp::try_from("2035-01-01T00:00:00Z")?))
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_storage_class<V>(self, v: V) -> Self
where V: Into<String>,

Sets the storage class for the new object.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_storage_class("ARCHIVE")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_temporary_hold<V: Into<bool>>(self, v: V) -> Self

Sets the temporary hold flag for the new object.

This field is typically set in order to prevent objects from being deleted or modified.

§Example
let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_temporary_hold(true)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_kms_key<V>(self, v: V) -> Self
where V: Into<String>,

Sets the resource name of the Customer-managed encryption key for this object.

The service imposes a number of restrictions on the keys used to encrypt Google Cloud Storage objects. Read the documentation in full before trying to use customer-managed encryption keys. In particular, verify the service has the necessary permissions, and the key is in a compatible location.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_kms_key("projects/test-project/locations/us-central1/keyRings/test-ring/cryptoKeys/test-key")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_predefined_acl<V>(self, v: V) -> Self
where V: Into<String>,

Configure this object to use one of the predefined ACLs.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_predefined_acl("private")
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn set_key(self, v: KeyAes256) -> Self

The encryption key used with the Customer-Supplied Encryption Keys feature. In raw bytes format (not base64-encoded).

§Example
let key: &[u8] = &[97; 32];
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .set_key(KeyAes256::new(key)?)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn with_idempotency(self, v: bool) -> Self

Configure the idempotency for this upload.

By default, the client library treats single-shot uploads without preconditions, as non-idempotent. If the destination bucket is configured with object versioning then the operation may succeed multiple times with observable side-effects. With object versioning and a lifecycle policy limiting the number of versions, uploading the same data multiple times may result in data loss.

The client library cannot efficiently determine if these conditions apply to your upload. If they do, or your application can tolerate multiple versions of the same data for other reasons, consider using with_idempotency(true).

The client library treats resumable uploads as idempotent, regardless of the value in this option. Such uploads can succeed at most once.

§Example
use std::time::Duration;
use gax::retry_policy::RetryPolicyExt;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_idempotency(true)
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn with_retry_policy<V: Into<RetryPolicyArg>>(self, v: V) -> Self

The retry policy used for this request.

§Example
use std::time::Duration;
use gax::retry_policy::RetryPolicyExt;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_retry_policy(
        RetryableErrors
            .with_attempt_limit(5)
            .with_time_limit(Duration::from_secs(90)),
    )
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn with_backoff_policy<V: Into<BackoffPolicyArg>>(self, v: V) -> Self

The backoff policy used for this request.

§Example
use std::time::Duration;
use gax::exponential_backoff::ExponentialBackoff;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_backoff_policy(ExponentialBackoff::default())
    .send_buffered()
    .await?;
println!("response details={response:?}");
Source

pub fn with_retry_throttler<V: Into<RetryThrottlerArg>>(self, v: V) -> Self

The retry throttler used for this request.

Most of the time you want to use the same throttler for all the requests in a client, and even the same throttler for many clients. Rarely it may be necessary to use an custom throttler for some subset of the requests.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_retry_throttler(adhoc_throttler())
    .send_buffered()
    .await?;
println!("response details={response:?}");
fn adhoc_throttler() -> gax::retry_throttler::SharedRetryThrottler {
}
Source

pub fn with_resumable_upload_threshold<V: Into<usize>>(self, v: V) -> Self

Sets the payload size threshold to switch from single-shot to resumable uploads.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_resumable_upload_threshold(0_usize) // Forces a resumable upload.
    .send_buffered()
    .await?;
println!("response details={response:?}");

The client library can perform uploads using single-shot or resumable uploads. For small objects, single-shot uploads offer better performance, as they require a single HTTP transfer. For larger objects, the additional request latency is not significant, and resumable uploads offer better recovery on errors.

The library automatically selects resumable uploads when the payload is equal to or larger than this option. For smaller uploads the client library uses single-shot uploads.

The exact threshold depends on where the application is deployed and destination bucket location with respect to where the application is running. The library defaults should work well in most cases, but some applications may benefit from fine-tuning.

Source

pub fn with_resumable_upload_buffer_size<V: Into<usize>>(self, v: V) -> Self

Changes the buffer size for some resumable uploads.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_resumable_upload_buffer_size(32 * 1024 * 1024_usize)
    .send_buffered()
    .await?;
println!("response details={response:?}");

When performing resumable uploads from sources without Seek the client library needs to buffer data in memory until it is persisted by the service. Otherwise the data would be lost if the upload fails. Applications may want to tune this buffer size:

  • Use smaller buffer sizes to support more concurrent uploads in the same application.
  • Use larger buffer sizes for better throughput. Sending many small buffers stalls the upload until the client receives a successful response from the service.

Keep in mind that there are diminishing returns on using larger buffers.

Source

pub fn set_md5_hash<I, V>(self, i: I) -> Self
where I: IntoIterator<Item = V>, V: Into<u8>,

Source

pub fn with_known_crc32c<V: Into<u32>>(self, v: V) -> Self

Provide a precomputed value for the CRC32C checksum.

§Example
use crc32c::crc32c;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_known_crc32c(crc32c(b"hello world"))
    .send_buffered()
    .await?;
println!("response details={response:?}");

In some applications, the payload’s CRC32C checksum is already known. For example, the application may be reading the data from another blob storage system.

In such cases, it is safer to pass the known CRC32C of the payload to [Cloud Storage], and more efficient to skip the computation in the client library.

Note that once you provide a CRC32C value to this builder you cannot use compute_md5() to also have the library compute the checksums.

Source

pub fn with_known_md5_hash<I, V>(self, i: I) -> Self
where I: IntoIterator<Item = V>, V: Into<u8>,

Provide a precomputed value for the MD5 hash.

§Example
use md5::compute;
let hash = md5::compute(b"hello world");
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .with_known_md5_hash(bytes::Bytes::from_owner(hash.0))
    .send_buffered()
    .await?;
println!("response details={response:?}");

In some applications, the payload’s MD5 hash is already known. For example, the application may be reading the data from another blob storage system.

In such cases, it is safer to pass the known MD5 of the payload to [Cloud Storage], and more efficient to skip the computation in the client library.

Note that once you provide a MD5 value to this builder you cannot use compute_md5() to also have the library compute the checksums.

Source

pub fn compute_md5(self) -> Self

Enables computation of MD5 hashes.

§Example
let payload = tokio::fs::File::open("my-data").await?;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", payload)
    .compute_md5()
    .send_buffered()
    .await?;
println!("response details={response:?}");

See precompute_checksums for more details on how checksums are used by the client library and their limitations.

Source§

impl<T, S> WriteObject<T, S>
where T: StreamingSource + Seek + Send + Sync + 'static, <T as StreamingSource>::Error: Error + Send + Sync + 'static, <T as Seek>::Error: Error + Send + Sync + 'static, S: Storage + 'static,

Source

pub async fn send_unbuffered(self) -> Result<Object>

A simple upload from a buffer.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .send_unbuffered()
    .await?;
println!("response details={response:?}");
Source

pub async fn precompute_checksums(self) -> Result<Self>

Precompute the payload checksums before uploading the data.

If the checksums are known when the upload starts, the client library can include the checksums with the upload request, and the service can reject the upload if the payload and the checksums do not match.

§Example
let payload = tokio::fs::File::open("my-data").await?;
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", payload)
    .precompute_checksums()
    .await?
    .send_unbuffered()
    .await?;
println!("response details={response:?}");

Precomputing the checksums can be expensive if the data source is slow to read. Therefore, the client library does not precompute the checksums by default. The client library compares the checksums computed by the service against its own checksums. If they do not match, the client library returns an error. However, the service has already created the object with the (likely incorrect) data.

The client library currently uses the JSON API, it is not possible to send the checksums at the end of the upload with this API.

Source§

impl<T, S> WriteObject<T, S>
where T: StreamingSource + Send + Sync + 'static, T::Error: Error + Send + Sync + 'static, S: Storage + 'static,

Source

pub async fn send_buffered(self) -> Result<Object>

Upload an object from a streaming source without rewinds.

If the data source does not implement Seek the client library must buffer data sent to the service until the service confirms it has persisted the data. This requires more memory in the client, and when the buffer grows too large, may require stalling the writer until the service can persist the data.

Use this function for data sources where it is expensive or impossible to restart the data source. This function is also useful when it is hard or impossible to predict the number of bytes emitted by a stream, even if restarting the stream is not too expensive.

§Example
let response = client
    .write_object("projects/_/buckets/my-bucket", "my-object", "hello world")
    .send_buffered()
    .await?;
println!("response details={response:?}");

Trait Implementations§

Source§

impl<T, S> Debug for WriteObject<T, S>
where S: Storage + 'static,

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more

Auto Trait Implementations§

§

impl<T, S = Storage> !Freeze for WriteObject<T, S>

§

impl<T, S = Storage> !RefUnwindSafe for WriteObject<T, S>

§

impl<T, S> Send for WriteObject<T, S>
where T: Send,

§

impl<T, S> Sync for WriteObject<T, S>
where T: Sync,

§

impl<T, S> Unpin for WriteObject<T, S>
where T: Unpin,

§

impl<T, S = Storage> !UnwindSafe for WriteObject<T, S>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoRequest<T> for T

Source§

fn into_request(self) -> Request<T>

Wrap the input message T in a tonic::Request
Source§

impl<L> LayerExt<L> for L

Source§

fn named_layer<S>(&self, service: S) -> Layered<<L as Layer<S>>::Service, S>
where L: Layer<S>,

Applies the layer to a service and wraps it in Layered.
Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,