pub struct UploadObject<T, C = Crc32c> { /* private fields */ }Expand description
A request builder for object uploads.
§Example: hello world
use google_cloud_storage::client::Storage;
async fn sample(client: &Storage) -> anyhow::Result<()> {
let response = client
.upload_object("projects/_/buckets/my-bucket", "hello", "Hello World!")
.send_unbuffered()
.await?;
println!("response details={response:?}");
Ok(())
}§Example: upload a file
use google_cloud_storage::client::Storage;
async fn sample(client: &Storage) -> anyhow::Result<()> {
let payload = tokio::fs::File::open("my-data").await?;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", payload)
.send_unbuffered()
.await?;
println!("response details={response:?}");
Ok(())
}§Example: upload a custom data source
use google_cloud_storage::{client::Storage, upload_source::StreamingSource};
struct DataSource;
impl StreamingSource for DataSource {
type Error = std::io::Error;
async fn next(&mut self) -> Option<Result<bytes::Bytes, Self::Error>> {
}
}
async fn sample(client: &Storage) -> anyhow::Result<()> {
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", DataSource)
.send_buffered()
.await?;
println!("response details={response:?}");
Ok(())
}Implementations§
Source§impl<T, C> UploadObject<T, C>
impl<T, C> UploadObject<T, C>
Sourcepub fn with_if_generation_match<V>(self, v: V) -> Self
pub fn with_if_generation_match<V>(self, v: V) -> Self
Set a request precondition on the object generation to match.
With this precondition the request fails if the current object
generation matches the provided value. A common value is 0, which
prevents uploads from succeeding if the object already exists.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_if_generation_match(0)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_if_generation_not_match<V>(self, v: V) -> Self
pub fn with_if_generation_not_match<V>(self, v: V) -> Self
Set a request precondition on the object generation to match.
With this precondition the request fails if the current object generation does not match the provided value.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_if_generation_not_match(0)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_if_metageneration_match<V>(self, v: V) -> Self
pub fn with_if_metageneration_match<V>(self, v: V) -> Self
Set a request precondition on the object meta generation.
With this precondition the request fails if the current object metadata generation does not match the provided value. This may be useful to prevent changes when the metageneration is known.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_if_metageneration_match(1234)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_if_metageneration_not_match<V>(self, v: V) -> Self
pub fn with_if_metageneration_not_match<V>(self, v: V) -> Self
Set a request precondition on the object meta-generation.
With this precondition the request fails if the current object metadata generation matches the provided value. This is rarely useful in uploads, it is more commonly used on downloads to prevent downloads if the value is already cached.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_if_metageneration_not_match(1234)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_acl<I, V>(self, v: I) -> Self
pub fn with_acl<I, V>(self, v: I) -> Self
Sets the ACL for the new object.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_acl([ObjectAccessControl::new().set_entity("allAuthenticatedUsers").set_role("READER")])
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_cache_control<V: Into<String>>(self, v: V) -> Self
pub fn with_cache_control<V: Into<String>>(self, v: V) -> Self
Sets the cache control for the new object.
This can be used to control caching in public objects.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_cache_control("public; max-age=7200")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_content_disposition<V: Into<String>>(self, v: V) -> Self
pub fn with_content_disposition<V: Into<String>>(self, v: V) -> Self
Sets the content disposition for the new object.
Google Cloud Storage can serve content directly to web browsers. This
attribute sets the Content-Disposition header, which may change how
the browser displays the contents.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_content_disposition("inline")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_content_encoding<V: Into<String>>(self, v: V) -> Self
pub fn with_content_encoding<V: Into<String>>(self, v: V) -> Self
Sets the content encoding for the object data.
This can be used to upload compressed data and enable transcoding of the data during downloads.
§Example
use flate2::write::GzEncoder;
use std::io::Write;
let mut e = GzEncoder::new(Vec::new(), flate2::Compression::default());
e.write_all(b"hello world");
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", bytes::Bytes::from_owner(e.finish()?))
.with_content_encoding("gzip")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_content_language<V: Into<String>>(self, v: V) -> Self
pub fn with_content_language<V: Into<String>>(self, v: V) -> Self
Sets the content language for the new object.
Google Cloud Storage can serve content directly to web browsers. This
attribute sets the Content-Language header, which may change how the
browser displays the contents.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_content_language("en")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_content_type<V: Into<String>>(self, v: V) -> Self
pub fn with_content_type<V: Into<String>>(self, v: V) -> Self
Sets the content type for the new object.
Google Cloud Storage can serve content directly to web browsers. This
attribute sets the Content-Type header, which may change how the
browser interprets the contents.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_content_type("text/plain")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_custom_time<V: Into<Timestamp>>(self, v: V) -> Self
pub fn with_custom_time<V: Into<Timestamp>>(self, v: V) -> Self
Sets the custom time for the new object.
This field is typically set in order to use the DaysSinceCustomTime condition in Object Lifecycle Management.
§Example
let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_custom_time(time)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_event_based_hold<V: Into<bool>>(self, v: V) -> Self
pub fn with_event_based_hold<V: Into<bool>>(self, v: V) -> Self
Sets the event based hold flag for the new object.
This field is typically set in order to prevent objects from being deleted or modified.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_event_based_hold(true)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_metadata<I, K, V>(self, i: I) -> Self
pub fn with_metadata<I, K, V>(self, i: I) -> Self
Sets the custom metadata for the new object.
This field is typically set to annotate the object with application-specific metadata.
§Example
let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_metadata([("test-only", "true"), ("environment", "qa")])
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_retention<V>(self, v: V) -> Self
pub fn with_retention<V>(self, v: V) -> Self
Sets the retention configuration for the new object.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_retention(
Retention::new()
.set_mode(retention::Mode::Locked)
.set_retain_until_time(wkt::Timestamp::try_from("2035-01-01T00:00:00Z")?))
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_storage_class<V>(self, v: V) -> Self
pub fn with_storage_class<V>(self, v: V) -> Self
Sets the storage class for the new object.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_storage_class("ARCHIVE")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_temporary_hold<V: Into<bool>>(self, v: V) -> Self
pub fn with_temporary_hold<V: Into<bool>>(self, v: V) -> Self
Sets the temporary hold flag for the new object.
This field is typically set in order to prevent objects from being deleted or modified.
§Example
let time = wkt::Timestamp::try_from("2025-07-07T18:30:00Z")?;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_temporary_hold(true)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_kms_key<V>(self, v: V) -> Self
pub fn with_kms_key<V>(self, v: V) -> Self
Sets the resource name of the Customer-managed encryption key for this object.
The service imposes a number of restrictions on the keys used to encrypt Google Cloud Storage objects. Read the documentation in full before trying to use customer-managed encryption keys. In particular, verify the service has the necessary permissions, and the key is in a compatible location.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_kms_key("projects/test-project/locations/us-central1/keyRings/test-ring/cryptoKeys/test-key")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_predefined_acl<V>(self, v: V) -> Self
pub fn with_predefined_acl<V>(self, v: V) -> Self
Configure this object to use one of the predefined ACLs.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_predefined_acl("private")
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_key(self, v: KeyAes256) -> Self
pub fn with_key(self, v: KeyAes256) -> Self
The encryption key used with the Customer-Supplied Encryption Keys feature. In raw bytes format (not base64-encoded).
§Example
let key: &[u8] = &[97; 32];
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_key(KeyAes256::new(key)?)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_idempotency(self, v: bool) -> Self
pub fn with_idempotency(self, v: bool) -> Self
Configure the idempotency for this upload.
By default, the client library treats single-shot uploads without preconditions, as non-idempotent. If the destination bucket is configured with object versioning then the operation may succeed multiple times with observable side-effects. With object versioning and a lifecycle policy limiting the number of versions, uploading the same data multiple times may result in data loss.
The client library cannot efficiently determine if these conditions
apply to your upload. If they do, or your application can tolerate
multiple versions of the same data for other reasons, consider using
with_idempotency(true).
The client library treats resumable uploads as idempotent, regardless of the value in this option. Such uploads can succeed at most once.
§Example
use std::time::Duration;
use gax::retry_policy::RetryPolicyExt;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_idempotency(true)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_retry_policy<V: Into<RetryPolicyArg>>(self, v: V) -> Self
pub fn with_retry_policy<V: Into<RetryPolicyArg>>(self, v: V) -> Self
The retry policy used for this request.
§Example
use std::time::Duration;
use gax::retry_policy::RetryPolicyExt;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_retry_policy(RecommendedPolicy
.with_attempt_limit(5)
.with_time_limit(Duration::from_secs(10)),
)
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_backoff_policy<V: Into<BackoffPolicyArg>>(self, v: V) -> Self
pub fn with_backoff_policy<V: Into<BackoffPolicyArg>>(self, v: V) -> Self
The backoff policy used for this request.
§Example
use std::time::Duration;
use gax::exponential_backoff::ExponentialBackoff;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_backoff_policy(ExponentialBackoff::default())
.send_buffered()
.await?;
println!("response details={response:?}");Sourcepub fn with_retry_throttler<V: Into<RetryThrottlerArg>>(self, v: V) -> Self
pub fn with_retry_throttler<V: Into<RetryThrottlerArg>>(self, v: V) -> Self
The retry throttler used for this request.
Most of the time you want to use the same throttler for all the requests in a client, and even the same throttler for many clients. Rarely it may be necessary to use an custom throttler for some subset of the requests.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_retry_throttler(adhoc_throttler())
.send_buffered()
.await?;
println!("response details={response:?}");
fn adhoc_throttler() -> gax::retry_throttler::SharedRetryThrottler {
}Sourcepub fn with_resumable_upload_threshold<V: Into<usize>>(self, v: V) -> Self
pub fn with_resumable_upload_threshold<V: Into<usize>>(self, v: V) -> Self
Sets the payload size threshold to switch from single-shot to resumable uploads.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_resumable_upload_threshold(0_usize) // Forces a resumable upload.
.send_buffered()
.await?;
println!("response details={response:?}");The client library can perform uploads using single-shot or resumable uploads. For small objects, single-shot uploads offer better performance, as they require a single HTTP transfer. For larger objects, the additional request latency is not significant, and resumable uploads offer better recovery on errors.
The library automatically selects resumable uploads when the payload is equal to or larger than this option. For smaller uploads the client library uses single-shot uploads.
The exact threshold depends on where the application is deployed and destination bucket location with respect to where the application is running. The library defaults should work well in most cases, but some applications may benefit from fine-tuning.
Sourcepub fn with_resumable_upload_buffer_size<V: Into<usize>>(self, v: V) -> Self
pub fn with_resumable_upload_buffer_size<V: Into<usize>>(self, v: V) -> Self
Changes the buffer size for some resumable uploads.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_resumable_upload_buffer_size(32 * 1024 * 1024_usize)
.send_buffered()
.await?;
println!("response details={response:?}");When performing resumable uploads from sources without Seek the client library needs to buffer data in memory until it is persisted by the service. Otherwise the data would be lost if the upload fails. Applications may want to tune this buffer size:
- Use smaller buffer sizes to support more concurrent uploads in the same application.
- Use larger buffer sizes for better throughput. Sending many small buffers stalls the upload until the client receives a successful response from the service.
Keep in mind that there are diminishing returns on using larger buffers.
pub fn set_md5_hash<I, V>(self, i: I) -> Self
Source§impl<T> UploadObject<T, Crc32c>
impl<T> UploadObject<T, Crc32c>
Sourcepub fn with_known_crc32c<V: Into<u32>>(
self,
v: V,
) -> UploadObject<T, KnownCrc32c>
pub fn with_known_crc32c<V: Into<u32>>( self, v: V, ) -> UploadObject<T, KnownCrc32c>
Provide a precomputed value for the CRC32C checksum.
§Example
use crc32c::crc32c;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_known_crc32c(crc32c(b"hello world"))
.send_buffered()
.await?;
println!("response details={response:?}");In some applications, the payload’s CRC32C checksum is already known. For example, the application may be downloading the data from another blob storage system.
In such cases, it is safer to pass the known CRC32C of the payload to [Cloud Storage], and more efficient to skip the computation in the client library.
Note that once you provide a CRC32C value to this builder you cannot use compute_md5() to also have the library compute the checksums.
Sourcepub fn with_known_md5_hash<I, V>(
self,
i: I,
) -> UploadObject<T, Crc32c<KnownMd5>>
pub fn with_known_md5_hash<I, V>( self, i: I, ) -> UploadObject<T, Crc32c<KnownMd5>>
Provide a precomputed value for the MD5 hash.
§Example
use md5::compute;
let hash = md5::compute(b"hello world");
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.with_known_md5_hash(bytes::Bytes::from_owner(hash.0))
.send_buffered()
.await?;
println!("response details={response:?}");In some applications, the payload’s MD5 hash is already known. For example, the application may be downloading the data from another blob storage system.
In such cases, it is safer to pass the known MD5 of the payload to [Cloud Storage], and more efficient to skip the computation in the client library.
Note that once you provide a MD5 value to this builder you cannot use compute_md5() to also have the library compute the checksums.
Sourcepub fn compute_md5(self) -> UploadObject<T, Md5<Crc32c>>
pub fn compute_md5(self) -> UploadObject<T, Md5<Crc32c>>
Enables computation of MD5 hashes.
§Example
let payload = tokio::fs::File::open("my-data").await?;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", payload)
.compute_md5()
.send_buffered()
.await?;
println!("response details={response:?}");See precompute_checksums for more details on how checksums are used by the client library and their limitations.
Source§impl<T> UploadObject<T, Crc32c<KnownMd5>>
impl<T> UploadObject<T, Crc32c<KnownMd5>>
Sourcepub fn with_known_crc32c<V: Into<u32>>(self, v: V) -> UploadObject<T, Known>
pub fn with_known_crc32c<V: Into<u32>>(self, v: V) -> UploadObject<T, Known>
Source§impl<T> UploadObject<T, Md5<Crc32c>>
impl<T> UploadObject<T, Md5<Crc32c>>
Sourcepub fn with_known_crc32c<V: Into<u32>>(
self,
v: V,
) -> UploadObject<T, Md5<KnownCrc32c>>
pub fn with_known_crc32c<V: Into<u32>>( self, v: V, ) -> UploadObject<T, Md5<KnownCrc32c>>
Sourcepub fn with_known_md5_hash<I, V>(
self,
i: I,
) -> UploadObject<T, Crc32c<KnownMd5>>
pub fn with_known_md5_hash<I, V>( self, i: I, ) -> UploadObject<T, Crc32c<KnownMd5>>
Source§impl<T> UploadObject<T, Md5<KnownCrc32c>>
impl<T> UploadObject<T, Md5<KnownCrc32c>>
Sourcepub fn with_known_md5_hash<I, V>(self, i: I) -> UploadObject<T, Known>
pub fn with_known_md5_hash<I, V>(self, i: I) -> UploadObject<T, Known>
Source§impl<T> UploadObject<T, KnownCrc32c>
impl<T> UploadObject<T, KnownCrc32c>
Sourcepub fn with_known_md5_hash<I, V>(self, i: I) -> UploadObject<T, Known>
pub fn with_known_md5_hash<I, V>(self, i: I) -> UploadObject<T, Known>
Sourcepub fn compute_md5(self) -> UploadObject<T, Md5<KnownCrc32c>>
pub fn compute_md5(self) -> UploadObject<T, Md5<KnownCrc32c>>
Source§impl<T, C> UploadObject<T, C>
impl<T, C> UploadObject<T, C>
Sourcepub async fn send_unbuffered(self) -> Result<Object>
pub async fn send_unbuffered(self) -> Result<Object>
A simple upload from a buffer.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.send_unbuffered()
.await?;
println!("response details={response:?}");Sourcepub async fn precompute_checksums(self) -> Result<UploadObject<T, Known>>
pub async fn precompute_checksums(self) -> Result<UploadObject<T, Known>>
Precompute the payload checksums before uploading the data.
If the checksums are known when the upload starts, the client library can include the checksums with the upload request, and the service can reject the upload if the payload and the checksums do not match.
§Example
let payload = tokio::fs::File::open("my-data").await?;
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", payload)
.precompute_checksums()
.await?
.send_unbuffered()
.await?;
println!("response details={response:?}");Precomputing the checksums can be expensive if the data source is slow to read. Therefore, the client library does not precompute the checksums by default. The client library compares the checksums computed by the service against its own checksums. If they do not match, the client library returns an error. However, the service has already created the object with the (likely incorrect) data.
The client library currently uses the JSON API, it is not possible to send the checksums at the end of the upload with this API.
Source§impl<T, C> UploadObject<T, C>where
C: ChecksumEngine + Send + Sync + 'static,
T: StreamingSource + Send + Sync + 'static,
T::Error: Error + Send + Sync + 'static,
impl<T, C> UploadObject<T, C>where
C: ChecksumEngine + Send + Sync + 'static,
T: StreamingSource + Send + Sync + 'static,
T::Error: Error + Send + Sync + 'static,
Sourcepub async fn send_buffered(self) -> Result<Object>
pub async fn send_buffered(self) -> Result<Object>
Upload an object from a streaming source without rewinds.
§Example
let response = client
.upload_object("projects/_/buckets/my-bucket", "my-object", "hello world")
.send_buffered()
.await?;
println!("response details={response:?}");Trait Implementations§
Auto Trait Implementations§
impl<T, C = Crc32c> !Freeze for UploadObject<T, C>
impl<T, C = Crc32c> !RefUnwindSafe for UploadObject<T, C>
impl<T, C> Send for UploadObject<T, C>
impl<T, C> Sync for UploadObject<T, C>
impl<T, C> Unpin for UploadObject<T, C>
impl<T, C = Crc32c> !UnwindSafe for UploadObject<T, C>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
Source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T in a tonic::Request