1use crate::storage::error::{
2 internal_error, invalid_argument, invalid_root_operation, no_download_url, StorageResult,
3};
4use crate::storage::list::{parse_list_result, ListOptions, ListResult};
5use crate::storage::location::Location;
6use crate::storage::metadata::serde::ObjectMetadata;
7use crate::storage::path::{child, last_component, parent};
8#[cfg(not(target_arch = "wasm32"))]
9use crate::storage::request::StreamingResponse;
10use crate::storage::request::{
11 continue_resumable_upload_request, create_resumable_upload_request, delete_object_request,
12 download_bytes_request, download_url_request, get_metadata_request, list_request,
13 multipart_upload_request, update_metadata_request, RESUMABLE_UPLOAD_CHUNK_SIZE,
14};
15use crate::storage::service::FirebaseStorageImpl;
16use crate::storage::stream::UploadAsyncRead;
17use crate::storage::string::{prepare_string_upload, StringFormat};
18use crate::storage::upload::{UploadProgress, UploadTask};
19#[cfg(all(feature = "wasm-web", target_arch = "wasm32"))]
20use crate::storage::wasm;
21use crate::storage::{SettableMetadata, UploadMetadata};
22use std::convert::TryFrom;
23
24#[derive(Clone)]
25pub struct StorageReference {
26 storage: FirebaseStorageImpl,
27 location: Location,
28}
29
30#[cfg(not(target_arch = "wasm32"))]
31pub type StreamingDownload = StreamingResponse;
32
33impl StorageReference {
34 pub(crate) fn new(storage: FirebaseStorageImpl, location: Location) -> Self {
35 Self { storage, location }
36 }
37
38 pub fn storage(&self) -> FirebaseStorageImpl {
39 self.storage.clone()
40 }
41
42 pub fn location(&self) -> &Location {
43 &self.location
44 }
45
46 pub fn to_gs_url(&self) -> String {
47 if self.location.path().is_empty() {
48 format!("gs://{}/", self.location.bucket())
49 } else {
50 format!("gs://{}/{}", self.location.bucket(), self.location.path())
51 }
52 }
53
54 pub fn root(&self) -> StorageReference {
55 let location = Location::new(self.location.bucket(), "");
56 StorageReference::new(self.storage.clone(), location)
57 }
58
59 pub fn bucket(&self) -> &str {
60 self.location.bucket()
61 }
62
63 pub fn full_path(&self) -> &str {
64 self.location.path()
65 }
66
67 pub fn name(&self) -> String {
68 last_component(self.location.path())
69 }
70
71 pub fn parent(&self) -> Option<StorageReference> {
72 let path = parent(self.location.path())?;
73 let location = Location::new(self.location.bucket(), path);
74 Some(StorageReference::new(self.storage.clone(), location))
75 }
76
77 pub fn child(&self, segment: &str) -> StorageReference {
78 let new_path = child(self.location.path(), segment);
79 let location = Location::new(self.location.bucket(), new_path);
80 StorageReference::new(self.storage.clone(), location)
81 }
82
83 fn ensure_not_root(&self, operation: &str) -> StorageResult<()> {
84 if self.location.is_root() {
85 Err(invalid_root_operation(operation))
86 } else {
87 Ok(())
88 }
89 }
90
91 pub async fn get_metadata(&self) -> StorageResult<ObjectMetadata> {
98 self.ensure_not_root("get_metadata")?;
99 let request = get_metadata_request(&self.storage, &self.location);
100 let json = self.storage.run_request(request).await?;
101 Ok(ObjectMetadata::from_value(json))
102 }
103
104 pub async fn list(&self, options: Option<ListOptions>) -> StorageResult<ListResult> {
106 let opts = options.unwrap_or_default();
107 let request = list_request(&self.storage, &self.location, &opts);
108 let json = self.storage.run_request(request).await?;
109 parse_list_result(&self.storage, self.location.bucket(), json)
110 }
111
112 pub async fn list_all(&self) -> StorageResult<ListResult> {
117 let mut merged = ListResult::default();
118 let mut page_token: Option<String> = None;
119
120 loop {
121 let mut options = ListOptions::default();
122 options.page_token = page_token.clone();
123 let page = self.list(Some(options)).await?;
124 merged.prefixes.extend(page.prefixes);
125 merged.items.extend(page.items);
126
127 if let Some(token) = page.next_page_token {
128 page_token = Some(token);
129 } else {
130 break;
131 }
132 }
133
134 Ok(merged)
135 }
136
137 pub async fn update_metadata(
144 &self,
145 metadata: SettableMetadata,
146 ) -> StorageResult<ObjectMetadata> {
147 self.ensure_not_root("update_metadata")?;
148 let request = update_metadata_request(&self.storage, &self.location, metadata);
149 let json = self.storage.run_request(request).await?;
150 Ok(ObjectMetadata::from_value(json))
151 }
152
153 pub async fn get_bytes(&self, max_download_size_bytes: Option<u64>) -> StorageResult<Vec<u8>> {
159 self.ensure_not_root("get_bytes")?;
160 let request =
161 download_bytes_request(&self.storage, &self.location, max_download_size_bytes);
162 let mut bytes = self.storage.run_request(request).await?;
163
164 if let Some(limit) = max_download_size_bytes {
165 let limit_usize = usize::try_from(limit).map_err(|_| {
166 invalid_argument("max_download_size_bytes exceeds platform addressable memory")
167 })?;
168 if bytes.len() > limit_usize {
169 bytes.truncate(limit_usize);
170 }
171 }
172
173 Ok(bytes)
174 }
175
176 #[cfg(not(target_arch = "wasm32"))]
177 pub async fn get_stream(
198 &self,
199 max_download_size_bytes: Option<u64>,
200 ) -> StorageResult<StreamingResponse> {
201 self.ensure_not_root("get_stream")?;
202 let request =
203 download_bytes_request(&self.storage, &self.location, max_download_size_bytes);
204 self.storage.run_streaming_request(request).await
205 }
206
207 pub async fn get_download_url(&self) -> StorageResult<String> {
209 self.ensure_not_root("get_download_url")?;
210 let request = download_url_request(&self.storage, &self.location);
211 let url = self.storage.run_request(request).await?;
212 url.ok_or_else(no_download_url)
213 }
214
215 pub async fn delete_object(&self) -> StorageResult<()> {
217 self.ensure_not_root("delete_object")?;
218 let request = delete_object_request(&self.storage, &self.location);
219 self.storage.run_request(request).await
220 }
221
222 pub async fn upload_bytes(
244 &self,
245 data: impl Into<Vec<u8>>,
246 metadata: Option<UploadMetadata>,
247 ) -> StorageResult<ObjectMetadata> {
248 self.ensure_not_root("upload_bytes")?;
249 let request =
250 multipart_upload_request(&self.storage, &self.location, data.into(), metadata);
251 self.storage.run_upload_request(request).await
252 }
253
254 pub fn upload_bytes_resumable(
260 &self,
261 data: Vec<u8>,
262 metadata: Option<UploadMetadata>,
263 ) -> StorageResult<UploadTask> {
264 self.ensure_not_root("upload_bytes_resumable")?;
265 Ok(UploadTask::new(self.clone(), data, metadata))
266 }
267
268 pub async fn upload_string(
270 &self,
271 data: &str,
272 format: StringFormat,
273 metadata: Option<UploadMetadata>,
274 ) -> StorageResult<ObjectMetadata> {
275 self.ensure_not_root("upload_string")?;
276 let prepared = prepare_string_upload(data, format)?;
277 let metadata = merge_metadata(metadata, prepared.content_type);
278 self.upload_bytes(prepared.bytes, metadata).await
279 }
280
281 #[cfg(all(feature = "wasm-web", target_arch = "wasm32"))]
282 pub async fn upload_blob(
284 &self,
285 blob: &web_sys::Blob,
286 metadata: Option<UploadMetadata>,
287 ) -> StorageResult<ObjectMetadata> {
288 let data = wasm::blob_to_vec(blob).await?;
289 self.upload_bytes(data, metadata).await
290 }
291
292 #[cfg(all(feature = "wasm-web", target_arch = "wasm32"))]
293 pub async fn upload_blob_resumable(
295 &self,
296 blob: &web_sys::Blob,
297 metadata: Option<UploadMetadata>,
298 ) -> StorageResult<UploadTask> {
299 let data = wasm::blob_to_vec(blob).await?;
300 self.upload_bytes_resumable(data, metadata)
301 }
302
303 #[cfg(all(feature = "wasm-web", target_arch = "wasm32"))]
304 pub async fn upload_uint8_array(
306 &self,
307 data: &js_sys::Uint8Array,
308 metadata: Option<UploadMetadata>,
309 ) -> StorageResult<ObjectMetadata> {
310 self.upload_bytes(wasm::uint8_array_to_vec(data), metadata)
311 .await
312 }
313
314 #[cfg(all(feature = "wasm-web", target_arch = "wasm32"))]
315 pub fn upload_uint8_array_resumable(
317 &self,
318 data: &js_sys::Uint8Array,
319 metadata: Option<UploadMetadata>,
320 ) -> StorageResult<UploadTask> {
321 self.upload_bytes_resumable(wasm::uint8_array_to_vec(data), metadata)
322 }
323
324 #[cfg(all(feature = "wasm-web", target_arch = "wasm32"))]
325 pub async fn get_blob(
327 &self,
328 max_download_size_bytes: Option<u64>,
329 ) -> StorageResult<web_sys::Blob> {
330 let bytes = self.get_bytes(max_download_size_bytes).await?;
331 wasm::bytes_to_blob(&bytes)
332 }
333
334 #[cfg(all(feature = "wasm-web", target_arch = "wasm32"))]
335 pub async fn upload_readable_stream_resumable(
337 &self,
338 stream: &web_sys::ReadableStream,
339 total_size: u64,
340 metadata: Option<UploadMetadata>,
341 ) -> StorageResult<ObjectMetadata> {
342 let reader = wasm::readable_stream_async_reader(stream)?;
343 self.upload_reader_resumable(reader, total_size, metadata)
344 .await
345 }
346
347 pub async fn upload_reader_resumable<R>(
349 &self,
350 reader: R,
351 total_size: u64,
352 metadata: Option<UploadMetadata>,
353 ) -> StorageResult<ObjectMetadata>
354 where
355 R: UploadAsyncRead,
356 {
357 self.upload_reader_resumable_with_progress(reader, total_size, metadata, |_| {})
358 .await
359 }
360
361 pub async fn upload_reader_resumable_with_progress<R, F>(
363 &self,
364 mut reader: R,
365 total_size: u64,
366 metadata: Option<UploadMetadata>,
367 mut progress: F,
368 ) -> StorageResult<ObjectMetadata>
369 where
370 R: UploadAsyncRead,
371 F: FnMut(UploadProgress),
372 {
373 use futures::io::AsyncReadExt;
374
375 self.ensure_not_root("upload_reader_resumable")?;
376
377 let storage = self.storage();
378 let request =
379 create_resumable_upload_request(&storage, self.location(), metadata, total_size);
380 let upload_url = storage.run_upload_request(request).await?;
381
382 if total_size == 0 {
383 let request = continue_resumable_upload_request(
384 &storage,
385 self.location(),
386 &upload_url,
387 0,
388 0,
389 Vec::new(),
390 true,
391 );
392 let status = storage.run_upload_request(request).await?;
393 progress(UploadProgress::new(0, 0));
394 let metadata = status
395 .metadata
396 .ok_or_else(|| internal_error("resumable upload completed without metadata"))?;
397 return Ok(metadata);
398 }
399
400 let chunk_size = RESUMABLE_UPLOAD_CHUNK_SIZE as usize;
401 let mut buffer = vec![0u8; chunk_size];
402 let mut offset = 0u64;
403
404 while offset < total_size {
405 let remaining = (total_size - offset) as usize;
406 let to_read = remaining.min(chunk_size);
407 let mut read_total = 0usize;
408
409 while read_total < to_read {
410 let read = reader
411 .read(&mut buffer[read_total..to_read])
412 .await
413 .map_err(|err| {
414 internal_error(format!("failed to read from upload source: {err}"))
415 })?;
416 if read == 0 {
417 break;
418 }
419 read_total += read;
420 }
421
422 if read_total == 0 {
423 return Err(internal_error(
424 "upload source ended before the declared total_size was reached",
425 ));
426 }
427
428 let finalize = offset + read_total as u64 == total_size;
429 let chunk = buffer[..read_total].to_vec();
430
431 let request = continue_resumable_upload_request(
432 &storage,
433 self.location(),
434 &upload_url,
435 offset,
436 total_size,
437 chunk,
438 finalize,
439 );
440 let status = storage.run_upload_request(request).await?;
441 offset = status.current;
442 progress(UploadProgress::new(offset, total_size));
443
444 if finalize {
445 let metadata = status
446 .metadata
447 .ok_or_else(|| internal_error("resumable upload completed without metadata"))?;
448 return Ok(metadata);
449 }
450 }
451
452 let request = continue_resumable_upload_request(
453 &storage,
454 self.location(),
455 &upload_url,
456 offset,
457 total_size,
458 Vec::new(),
459 true,
460 );
461 let status = storage.run_upload_request(request).await?;
462 progress(UploadProgress::new(offset, total_size));
463 let metadata = status
464 .metadata
465 .ok_or_else(|| internal_error("resumable upload completed without metadata"))?;
466 Ok(metadata)
467 }
468}
469
470fn merge_metadata(
471 metadata: Option<UploadMetadata>,
472 inferred_content_type: Option<String>,
473) -> Option<UploadMetadata> {
474 match (metadata, inferred_content_type) {
475 (Some(mut metadata), Some(content_type)) => {
476 if metadata.content_type.is_none() {
477 metadata.content_type = Some(content_type);
478 }
479 Some(metadata)
480 }
481 (Some(metadata), None) => Some(metadata),
482 (None, Some(content_type)) => {
483 let mut metadata = UploadMetadata::new();
484 metadata.content_type = Some(content_type);
485 Some(metadata)
486 }
487 (None, None) => None,
488 }
489}
490
491#[cfg(test)]
492mod tests {
493 use super::*;
494 use crate::app::initialize_app;
495 use crate::app::{FirebaseAppSettings, FirebaseOptions};
496
497 fn unique_settings() -> FirebaseAppSettings {
498 use std::sync::atomic::{AtomicUsize, Ordering};
499 static COUNTER: AtomicUsize = AtomicUsize::new(0);
500 FirebaseAppSettings {
501 name: Some(format!(
502 "storage-ref-{}",
503 COUNTER.fetch_add(1, Ordering::SeqCst)
504 )),
505 ..Default::default()
506 }
507 }
508
509 async fn build_storage() -> FirebaseStorageImpl {
510 let options = FirebaseOptions {
511 storage_bucket: Some("my-bucket".into()),
512 ..Default::default()
513 };
514 let app = initialize_app(options, Some(unique_settings()))
515 .await
516 .unwrap();
517 let container = app.container();
518 let auth_provider = container.get_provider("auth-internal");
519 let app_check_provider = container.get_provider("app-check-internal");
520 FirebaseStorageImpl::new(app, auth_provider, app_check_provider, None, None).unwrap()
521 }
522
523 #[tokio::test]
524 async fn root_reference_has_expected_url() {
525 let storage = build_storage().await;
526 let root = storage.root_reference().unwrap();
527 assert_eq!(root.to_gs_url(), "gs://my-bucket/");
528 }
529
530 #[tokio::test]
531 async fn child_computes_new_path() {
532 let storage = build_storage().await;
533 let root = storage.root_reference().unwrap();
534 let image = root.child("images/photo.png");
535 assert_eq!(image.to_gs_url(), "gs://my-bucket/images/photo.png");
536 assert_eq!(image.name(), "photo.png");
537 assert_eq!(image.parent().unwrap().to_gs_url(), "gs://my-bucket/images");
538 }
539
540 #[test]
541 fn merge_metadata_preserves_existing_content_type() {
542 let original = UploadMetadata::new().with_content_type("image/png");
543 let merged =
544 merge_metadata(Some(original.clone()), Some("text/plain".to_string())).unwrap();
545 assert_eq!(merged.content_type.as_deref(), Some("image/png"));
546 }
547
548 #[test]
549 fn merge_metadata_uses_inferred_when_absent() {
550 let merged = merge_metadata(None, Some("text/plain".to_string())).unwrap();
551 assert_eq!(merged.content_type.as_deref(), Some("text/plain"));
552 }
553}