supabase/
storage.rs

1//! Storage module for Supabase file operations
2
3use crate::{
4    error::{Error, Result},
5    types::{SupabaseConfig, Timestamp},
6};
7use bytes::Bytes;
8
9#[cfg(target_arch = "wasm32")]
10use reqwest::Client as HttpClient;
11#[cfg(not(target_arch = "wasm32"))]
12use reqwest::{multipart, Client as HttpClient};
13use serde::{Deserialize, Serialize};
14use std::{collections::HashMap, sync::Arc};
15
16#[cfg(target_arch = "wasm32")]
17use tracing::{debug, info};
18#[cfg(not(target_arch = "wasm32"))]
19use tracing::{debug, info, warn};
20use url::Url;
21
22// Resumable uploads support
23#[cfg(target_arch = "wasm32")]
24use std::time::Duration;
25#[cfg(not(target_arch = "wasm32"))]
26use tokio::time::{sleep, Duration};
27
28// Helper for async sleep across platforms
29#[cfg(not(target_arch = "wasm32"))]
30async fn async_sleep(duration: Duration) {
31    sleep(duration).await;
32}
33
34#[cfg(all(target_arch = "wasm32", feature = "wasm"))]
35async fn async_sleep(duration: Duration) {
36    use gloo_timers::future::sleep as gloo_sleep;
37    gloo_sleep(duration).await;
38}
39
40#[cfg(all(target_arch = "wasm32", not(feature = "wasm")))]
41#[allow(dead_code)]
42async fn async_sleep(_duration: Duration) {
43    // No-op for wasm32 without wasm feature (resumable uploads not fully supported)
44}
45
46/// Storage client for file operations
47#[derive(Debug, Clone)]
48pub struct Storage {
49    http_client: Arc<HttpClient>,
50    config: Arc<SupabaseConfig>,
51}
52
53/// Storage bucket information
54#[derive(Debug, Clone, Serialize, Deserialize)]
55pub struct Bucket {
56    pub id: String,
57    pub name: String,
58    pub owner: Option<String>,
59    pub public: bool,
60    pub file_size_limit: Option<u64>,
61    pub allowed_mime_types: Option<Vec<String>>,
62    pub created_at: Timestamp,
63    pub updated_at: Timestamp,
64}
65
66/// File object information
67#[derive(Debug, Clone, Serialize, Deserialize)]
68pub struct FileObject {
69    pub name: String,
70    pub id: Option<String>,
71    pub updated_at: Option<Timestamp>,
72    pub created_at: Option<Timestamp>,
73    pub last_accessed_at: Option<Timestamp>,
74    pub metadata: Option<HashMap<String, serde_json::Value>>,
75}
76
77/// Upload response
78#[derive(Debug, Clone, Serialize, Deserialize)]
79pub struct UploadResponse {
80    #[serde(rename = "Key")]
81    pub key: String,
82    #[serde(rename = "Id")]
83    pub id: Option<String>,
84}
85
86/// File options for upload
87#[derive(Debug, Clone, Default)]
88pub struct FileOptions {
89    pub cache_control: Option<String>,
90    pub content_type: Option<String>,
91    pub upsert: bool,
92}
93
94/// Transform options for image processing
95#[derive(Debug, Clone)]
96pub struct TransformOptions {
97    pub width: Option<u32>,
98    pub height: Option<u32>,
99    pub resize: Option<ResizeMode>,
100    pub format: Option<ImageFormat>,
101    pub quality: Option<u8>,
102}
103
104/// Resize mode for image transformations
105#[derive(Debug, Clone, Serialize, Deserialize)]
106#[serde(rename_all = "lowercase")]
107pub enum ResizeMode {
108    Cover,
109    Contain,
110    Fill,
111}
112
113/// Image format for transformations
114#[derive(Debug, Clone, Serialize, Deserialize)]
115#[serde(rename_all = "lowercase")]
116pub enum ImageFormat {
117    Webp,
118    Jpeg,
119    Png,
120    Avif,
121}
122
123/// Resumable upload session information
124#[derive(Debug, Clone, Serialize, Deserialize)]
125pub struct UploadSession {
126    pub upload_id: String,
127    pub part_size: u64,
128    pub total_size: u64,
129    pub uploaded_parts: Vec<UploadedPart>,
130    pub bucket_id: String,
131    pub object_path: String,
132    pub created_at: Timestamp,
133    pub expires_at: Timestamp,
134}
135
136/// Information about an uploaded part
137#[derive(Debug, Clone, Serialize, Deserialize)]
138pub struct UploadedPart {
139    pub part_number: u32,
140    pub etag: String,
141    pub size: u64,
142}
143
144/// Configuration for resumable uploads
145#[derive(Debug, Clone)]
146pub struct ResumableUploadConfig {
147    /// Size of each chunk (default: 5MB)
148    pub chunk_size: u64,
149    /// Maximum retry attempts (default: 3)
150    pub max_retries: u32,
151    /// Retry delay in milliseconds (default: 1000)
152    pub retry_delay: u64,
153    /// Whether to verify uploaded chunks with checksums (default: true)
154    pub verify_checksums: bool,
155}
156
157impl Default for ResumableUploadConfig {
158    fn default() -> Self {
159        Self {
160            chunk_size: 5 * 1024 * 1024, // 5MB
161            max_retries: 3,
162            retry_delay: 1000,
163            verify_checksums: true,
164        }
165    }
166}
167
168/// Progress callback for resumable uploads
169pub type UploadProgressCallback = Arc<dyn Fn(u64, u64) + Send + Sync>;
170
171/// Advanced metadata for files
172#[derive(Debug, Clone, Serialize, Deserialize)]
173pub struct FileMetadata {
174    pub tags: Option<HashMap<String, String>>,
175    pub custom_metadata: Option<HashMap<String, serde_json::Value>>,
176    pub description: Option<String>,
177    pub category: Option<String>,
178    pub searchable_content: Option<String>,
179}
180
181/// Search options for file metadata
182#[derive(Debug, Clone, Default, Serialize)]
183pub struct SearchOptions {
184    pub tags: Option<HashMap<String, String>>,
185    pub category: Option<String>,
186    pub content_search: Option<String>,
187    pub limit: Option<u32>,
188    pub offset: Option<u32>,
189}
190
191/// Storage event types for real-time notifications
192#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
193pub enum StorageEvent {
194    #[serde(rename = "file_uploaded")]
195    FileUploaded,
196    #[serde(rename = "file_deleted")]
197    FileDeleted,
198    #[serde(rename = "file_updated")]
199    FileUpdated,
200    #[serde(rename = "bucket_created")]
201    BucketCreated,
202    #[serde(rename = "bucket_deleted")]
203    BucketDeleted,
204}
205
206/// Storage event notification message
207#[derive(Debug, Clone, Serialize, Deserialize)]
208pub struct StorageEventMessage {
209    pub event: StorageEvent,
210    pub bucket_id: String,
211    pub object_path: Option<String>,
212    pub object_metadata: Option<FileObject>,
213    pub timestamp: Timestamp,
214    pub user_id: Option<String>,
215}
216
217/// Callback for storage events
218pub type StorageEventCallback = Arc<dyn Fn(StorageEventMessage) + Send + Sync>;
219
220impl Storage {
221    /// Create a new Storage instance
222    pub fn new(config: Arc<SupabaseConfig>, http_client: Arc<HttpClient>) -> Result<Self> {
223        debug!("Initializing Storage module");
224
225        Ok(Self {
226            http_client,
227            config,
228        })
229    }
230
231    /// Get the appropriate authorization key for admin operations
232    fn get_admin_key(&self) -> &str {
233        self.config
234            .service_role_key
235            .as_ref()
236            .unwrap_or(&self.config.key)
237    }
238
239    /// List all storage buckets
240    pub async fn list_buckets(&self) -> Result<Vec<Bucket>> {
241        debug!("Listing all storage buckets");
242
243        let url = format!("{}/storage/v1/bucket", self.config.url);
244        let response = self.http_client.get(&url).send().await?;
245
246        if !response.status().is_success() {
247            let status = response.status();
248            let error_msg = match response.text().await {
249                Ok(text) => text,
250                Err(_) => format!("List buckets failed with status: {}", status),
251            };
252            return Err(Error::storage(error_msg));
253        }
254
255        let buckets: Vec<Bucket> = response.json().await?;
256        info!("Listed {} buckets successfully", buckets.len());
257
258        Ok(buckets)
259    }
260
261    /// Get bucket information
262    pub async fn get_bucket(&self, bucket_id: &str) -> Result<Bucket> {
263        debug!("Getting bucket info for: {}", bucket_id);
264
265        let url = format!("{}/storage/v1/bucket/{}", self.config.url, bucket_id);
266        let response = self.http_client.get(&url).send().await?;
267
268        if !response.status().is_success() {
269            let status = response.status();
270            let error_msg = match response.text().await {
271                Ok(text) => text,
272                Err(_) => format!("Get bucket failed with status: {}", status),
273            };
274            return Err(Error::storage(error_msg));
275        }
276
277        let bucket: Bucket = response.json().await?;
278        info!("Retrieved bucket info for: {}", bucket_id);
279
280        Ok(bucket)
281    }
282
283    /// Create a new storage bucket
284    pub async fn create_bucket(&self, id: &str, name: &str, public: bool) -> Result<Bucket> {
285        debug!("Creating bucket: {} ({})", name, id);
286
287        let payload = serde_json::json!({
288            "id": id,
289            "name": name,
290            "public": public
291        });
292
293        let url = format!("{}/storage/v1/bucket", self.config.url);
294        let response = self
295            .http_client
296            .post(&url)
297            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
298            .json(&payload)
299            .send()
300            .await?;
301
302        if !response.status().is_success() {
303            let status = response.status();
304            let error_msg = match response.text().await {
305                Ok(text) => text,
306                Err(_) => format!("Create bucket failed with status: {}", status),
307            };
308            return Err(Error::storage(error_msg));
309        }
310
311        let bucket: Bucket = response.json().await?;
312        info!("Created bucket successfully: {}", id);
313
314        Ok(bucket)
315    }
316
317    /// Update bucket settings
318    pub async fn update_bucket(&self, id: &str, public: Option<bool>) -> Result<()> {
319        debug!("Updating bucket: {}", id);
320
321        let mut payload = serde_json::Map::new();
322        if let Some(public) = public {
323            payload.insert("public".to_string(), serde_json::Value::Bool(public));
324        }
325
326        let url = format!("{}/storage/v1/bucket/{}", self.config.url, id);
327        let response = self
328            .http_client
329            .put(&url)
330            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
331            .json(&payload)
332            .send()
333            .await?;
334
335        if !response.status().is_success() {
336            let status = response.status();
337            let error_msg = match response.text().await {
338                Ok(text) => text,
339                Err(_) => format!("Update bucket failed with status: {}", status),
340            };
341            return Err(Error::storage(error_msg));
342        }
343
344        info!("Updated bucket successfully: {}", id);
345        Ok(())
346    }
347
348    /// Delete a storage bucket
349    pub async fn delete_bucket(&self, id: &str) -> Result<()> {
350        debug!("Deleting bucket: {}", id);
351
352        let url = format!("{}/storage/v1/bucket/{}", self.config.url, id);
353        let response = self
354            .http_client
355            .delete(&url)
356            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
357            .send()
358            .await?;
359
360        if !response.status().is_success() {
361            let status = response.status();
362            let error_msg = match response.text().await {
363                Ok(text) => text,
364                Err(_) => format!("Delete bucket failed with status: {}", status),
365            };
366            return Err(Error::storage(error_msg));
367        }
368
369        info!("Deleted bucket successfully: {}", id);
370        Ok(())
371    }
372
373    /// List files in a bucket
374    pub async fn list(&self, bucket_id: &str, path: Option<&str>) -> Result<Vec<FileObject>> {
375        self.list_with_auth(bucket_id, path, None).await
376    }
377
378    /// List files in a bucket with authentication token
379    ///
380    /// This method allows passing a user authentication token for listing
381    /// files in protected buckets with Row Level Security policies.
382    ///
383    /// # Arguments
384    ///
385    /// * `bucket_id` - The bucket identifier
386    /// * `path` - Optional path prefix to filter files
387    /// * `user_token` - Optional user JWT token for authenticated requests
388    ///
389    /// # Examples
390    ///
391    /// ```rust,no_run
392    /// # use supabase::prelude::*;
393    /// # async fn example(client: &Client) -> Result<()> {
394    /// let auth_response = client.auth().sign_in_with_email_and_password("user@example.com", "password").await?;
395    /// let token = &auth_response.session.as_ref().unwrap().access_token;
396    ///
397    /// let files = client.storage()
398    ///     .list_with_auth("private-bucket", Some("user-files/"), Some(token))
399    ///     .await?;
400    /// # Ok(())
401    /// # }
402    /// ```
403    pub async fn list_with_auth(
404        &self,
405        bucket_id: &str,
406        path: Option<&str>,
407        user_token: Option<&str>,
408    ) -> Result<Vec<FileObject>> {
409        debug!("Listing files in bucket: {}", bucket_id);
410
411        let url = format!("{}/storage/v1/object/list/{}", self.config.url, bucket_id);
412
413        let payload = serde_json::json!({
414            "prefix": path.unwrap_or("")
415        });
416
417        let mut request = self.http_client.post(&url).json(&payload);
418
419        // Override Authorization header with user token if provided
420        if let Some(token) = user_token {
421            request = request.header("Authorization", format!("Bearer {}", token));
422        }
423
424        let response = request.send().await?;
425
426        if !response.status().is_success() {
427            let status = response.status();
428            let error_msg = match response.text().await {
429                Ok(text) => text,
430                Err(_) => format!("List files failed with status: {}", status),
431            };
432            return Err(Error::storage(error_msg));
433        }
434
435        let files: Vec<FileObject> = response.json().await?;
436        info!("Listed {} files in bucket: {}", files.len(), bucket_id);
437
438        Ok(files)
439    }
440
441    /// Upload a file from bytes
442    #[cfg(not(target_arch = "wasm32"))]
443    pub async fn upload(
444        &self,
445        bucket_id: &str,
446        path: &str,
447        file_body: Bytes,
448        options: Option<FileOptions>,
449    ) -> Result<UploadResponse> {
450        self.upload_with_auth(bucket_id, path, file_body, options, None)
451            .await
452    }
453
454    /// Upload a file with authentication token
455    ///
456    /// This method allows passing a user authentication token for operations
457    /// on protected resources (e.g., when Row Level Security policies require authentication).
458    ///
459    /// # Arguments
460    ///
461    /// * `bucket_id` - The bucket identifier
462    /// * `path` - The file path in the bucket
463    /// * `file_body` - The file content as bytes
464    /// * `options` - Optional file options (content type, cache control, upsert)
465    /// * `user_token` - Optional user JWT token for authenticated requests
466    ///
467    /// # Examples
468    ///
469    /// ```rust,no_run
470    /// # use supabase::prelude::*;
471    /// # use bytes::Bytes;
472    /// # async fn example(client: &Client) -> Result<()> {
473    /// // Get user session token after authentication
474    /// let auth_response = client.auth().sign_in_with_email_and_password("user@example.com", "password").await?;
475    /// let token = &auth_response.session.as_ref().unwrap().access_token;
476    ///
477    /// // Upload to protected bucket
478    /// let file_data = Bytes::from("file content");
479    /// let response = client.storage()
480    ///     .upload_with_auth("private-bucket", "user-files/document.txt", file_data, None, Some(token))
481    ///     .await?;
482    /// # Ok(())
483    /// # }
484    /// ```
485    pub async fn upload_with_auth(
486        &self,
487        bucket_id: &str,
488        path: &str,
489        file_body: Bytes,
490        options: Option<FileOptions>,
491        user_token: Option<&str>,
492    ) -> Result<UploadResponse> {
493        debug!("Uploading file to bucket: {} at path: {}", bucket_id, path);
494
495        let options = options.unwrap_or_default();
496
497        let url = format!(
498            "{}/storage/v1/object/{}/{}",
499            self.config.url, bucket_id, path
500        );
501
502        let mut form = multipart::Form::new().part(
503            "file",
504            multipart::Part::bytes(file_body.to_vec()).file_name(path.to_string()),
505        );
506
507        if let Some(content_type) = options.content_type {
508            form = form.part("contentType", multipart::Part::text(content_type));
509        }
510
511        if let Some(cache_control) = options.cache_control {
512            form = form.part("cacheControl", multipart::Part::text(cache_control));
513        }
514
515        let mut request = self.http_client.post(&url).multipart(form);
516
517        // Override Authorization header with user token if provided
518        if let Some(token) = user_token {
519            request = request.header("Authorization", format!("Bearer {}", token));
520        }
521
522        if options.upsert {
523            request = request.header("x-upsert", "true");
524        }
525
526        let response = request.send().await?;
527
528        if !response.status().is_success() {
529            let status = response.status();
530            let error_msg = match response.text().await {
531                Ok(text) => text,
532                Err(_) => format!("Upload failed with status: {}", status),
533            };
534            return Err(Error::storage(error_msg));
535        }
536
537        let upload_response: UploadResponse = response.json().await?;
538        info!("Uploaded file successfully: {}", path);
539        Ok(upload_response)
540    }
541
542    /// Upload a file from bytes (WASM version)
543    ///
544    /// Note: WASM version uses simpler body upload due to multipart limitations
545    #[cfg(target_arch = "wasm32")]
546    pub async fn upload(
547        &self,
548        bucket_id: &str,
549        path: &str,
550        file_body: Bytes,
551        options: Option<FileOptions>,
552    ) -> Result<UploadResponse> {
553        self.upload_with_auth(bucket_id, path, file_body, options, None)
554            .await
555    }
556
557    /// Upload a file with authentication token (WASM version)
558    #[cfg(target_arch = "wasm32")]
559    pub async fn upload_with_auth(
560        &self,
561        bucket_id: &str,
562        path: &str,
563        file_body: Bytes,
564        options: Option<FileOptions>,
565        user_token: Option<&str>,
566    ) -> Result<UploadResponse> {
567        debug!(
568            "Uploading file to bucket: {} at path: {} (WASM)",
569            bucket_id, path
570        );
571
572        let options = options.unwrap_or_default();
573
574        let url = format!(
575            "{}/storage/v1/object/{}/{}",
576            self.config.url, bucket_id, path
577        );
578
579        let mut request = self.http_client.post(&url).body(file_body);
580
581        // Override Authorization header with user token if provided
582        if let Some(token) = user_token {
583            request = request.header("Authorization", format!("Bearer {}", token));
584        }
585
586        if let Some(content_type) = options.content_type {
587            request = request.header("Content-Type", content_type);
588        }
589
590        if let Some(cache_control) = options.cache_control {
591            request = request.header("Cache-Control", cache_control);
592        }
593
594        if options.upsert {
595            request = request.header("x-upsert", "true");
596        }
597
598        let response = request.send().await?;
599
600        if !response.status().is_success() {
601            let status = response.status();
602            let error_msg = match response.text().await {
603                Ok(text) => text,
604                Err(_) => format!("Upload failed with status: {}", status),
605            };
606            return Err(Error::storage(error_msg));
607        }
608
609        let upload_response: UploadResponse = response.json().await?;
610        info!("Uploaded file successfully: {}", path);
611
612        Ok(upload_response)
613    }
614
615    /// Upload a file from local filesystem (Native only, requires tokio)
616    #[cfg(all(not(target_arch = "wasm32"), feature = "native"))]
617    pub async fn upload_file<P: AsRef<std::path::Path>>(
618        &self,
619        bucket_id: &str,
620        path: &str,
621        file_path: P,
622        options: Option<FileOptions>,
623    ) -> Result<UploadResponse> {
624        debug!("Uploading file from path: {:?}", file_path.as_ref());
625
626        let file_bytes = tokio::fs::read(file_path)
627            .await
628            .map_err(|e| Error::storage(format!("Failed to read file: {}", e)))?;
629
630        self.upload(bucket_id, path, Bytes::from(file_bytes), options)
631            .await
632    }
633
634    /// Download a file
635    pub async fn download(&self, bucket_id: &str, path: &str) -> Result<Bytes> {
636        self.download_with_auth(bucket_id, path, None).await
637    }
638
639    /// Download a file with authentication token
640    ///
641    /// This method allows passing a user authentication token for downloading
642    /// files from protected buckets with Row Level Security policies.
643    ///
644    /// # Arguments
645    ///
646    /// * `bucket_id` - The bucket identifier
647    /// * `path` - The file path in the bucket
648    /// * `user_token` - Optional user JWT token for authenticated requests
649    ///
650    /// # Examples
651    ///
652    /// ```rust,no_run
653    /// # use supabase::prelude::*;
654    /// # async fn example(client: &Client) -> Result<()> {
655    /// let auth_response = client.auth().sign_in_with_email_and_password("user@example.com", "password").await?;
656    /// let token = &auth_response.session.as_ref().unwrap().access_token;
657    ///
658    /// let file_data = client.storage()
659    ///     .download_with_auth("private-bucket", "user-files/document.txt", Some(token))
660    ///     .await?;
661    /// # Ok(())
662    /// # }
663    /// ```
664    pub async fn download_with_auth(
665        &self,
666        bucket_id: &str,
667        path: &str,
668        user_token: Option<&str>,
669    ) -> Result<Bytes> {
670        debug!(
671            "Downloading file from bucket: {} at path: {}",
672            bucket_id, path
673        );
674
675        let url = format!(
676            "{}/storage/v1/object/{}/{}",
677            self.config.url, bucket_id, path
678        );
679
680        let mut request = self.http_client.get(&url);
681
682        // Override Authorization header with user token if provided
683        if let Some(token) = user_token {
684            request = request.header("Authorization", format!("Bearer {}", token));
685        }
686
687        let response = request.send().await?;
688
689        if !response.status().is_success() {
690            let error_msg = format!("Download failed with status: {}", response.status());
691            return Err(Error::storage(error_msg));
692        }
693
694        let bytes = response.bytes().await?;
695        info!("Downloaded file successfully: {}", path);
696
697        Ok(bytes)
698    }
699
700    /// Delete a file
701    pub async fn remove(&self, bucket_id: &str, paths: &[&str]) -> Result<()> {
702        self.remove_with_auth(bucket_id, paths, None).await
703    }
704
705    /// Remove/delete files with authentication token
706    ///
707    /// This method allows passing a user authentication token for deleting
708    /// files from protected buckets with Row Level Security policies.
709    ///
710    /// # Arguments
711    ///
712    /// * `bucket_id` - The bucket identifier
713    /// * `paths` - Array of file paths to delete
714    /// * `user_token` - Optional user JWT token for authenticated requests
715    ///
716    /// # Examples
717    ///
718    /// ```rust,no_run
719    /// # use supabase::prelude::*;
720    /// # async fn example(client: &Client) -> Result<()> {
721    /// let auth_response = client.auth().sign_in_with_email_and_password("user@example.com", "password").await?;
722    /// let token = &auth_response.session.as_ref().unwrap().access_token;
723    ///
724    /// client.storage()
725    ///     .remove_with_auth("private-bucket", &["user-files/old-doc.txt"], Some(token))
726    ///     .await?;
727    /// # Ok(())
728    /// # }
729    /// ```
730    pub async fn remove_with_auth(
731        &self,
732        bucket_id: &str,
733        paths: &[&str],
734        user_token: Option<&str>,
735    ) -> Result<()> {
736        debug!("Deleting files from bucket: {}", bucket_id);
737
738        let url = format!("{}/storage/v1/object/{}", self.config.url, bucket_id);
739
740        let payload = serde_json::json!({
741            "prefixes": paths
742        });
743
744        let mut request = self.http_client.delete(&url).json(&payload);
745
746        // Override Authorization header with user token if provided
747        if let Some(token) = user_token {
748            request = request.header("Authorization", format!("Bearer {}", token));
749        }
750
751        let response = request.send().await?;
752
753        if !response.status().is_success() {
754            let error_msg = format!("Delete files failed with status: {}", response.status());
755            return Err(Error::storage(error_msg));
756        }
757
758        info!("Deleted {} files successfully", paths.len());
759        Ok(())
760    }
761
762    /// Move a file
763    pub async fn r#move(&self, bucket_id: &str, from_path: &str, to_path: &str) -> Result<()> {
764        debug!("Moving file from {} to {}", from_path, to_path);
765
766        let url = format!("{}/storage/v1/object/move", self.config.url);
767
768        let payload = serde_json::json!({
769            "bucketId": bucket_id,
770            "sourceKey": from_path,
771            "destinationKey": to_path
772        });
773
774        let response = self.http_client.post(&url).json(&payload).send().await?;
775
776        if !response.status().is_success() {
777            let status = response.status();
778            let error_msg = match response.text().await {
779                Ok(text) => text,
780                Err(_) => format!("Move failed with status: {}", status),
781            };
782            return Err(Error::storage(error_msg));
783        }
784
785        info!("Moved file successfully from {} to {}", from_path, to_path);
786        Ok(())
787    }
788
789    /// Copy a file
790    pub async fn copy(&self, bucket_id: &str, from_path: &str, to_path: &str) -> Result<()> {
791        debug!("Copying file from {} to {}", from_path, to_path);
792
793        let url = format!("{}/storage/v1/object/copy", self.config.url);
794
795        let payload = serde_json::json!({
796            "bucketId": bucket_id,
797            "sourceKey": from_path,
798            "destinationKey": to_path
799        });
800
801        let response = self.http_client.post(&url).json(&payload).send().await?;
802
803        if !response.status().is_success() {
804            let status = response.status();
805            let error_msg = match response.text().await {
806                Ok(text) => text,
807                Err(_) => format!("Copy failed with status: {}", status),
808            };
809            return Err(Error::storage(error_msg));
810        }
811
812        info!("Copied file successfully from {} to {}", from_path, to_path);
813        Ok(())
814    }
815
816    /// Get public URL for a file
817    pub fn get_public_url(&self, bucket_id: &str, path: &str) -> String {
818        format!(
819            "{}/storage/v1/object/public/{}/{}",
820            self.config.url, bucket_id, path
821        )
822    }
823
824    /// Get signed URL for private file access
825    pub async fn create_signed_url(
826        &self,
827        bucket_id: &str,
828        path: &str,
829        expires_in: u32,
830        transform: Option<TransformOptions>,
831    ) -> Result<String> {
832        debug!(
833            "Creating signed URL for bucket: {} path: {} expires_in: {}",
834            bucket_id, path, expires_in
835        );
836
837        let url = format!(
838            "{}/storage/v1/object/sign/{}/{}",
839            self.config.url, bucket_id, path
840        );
841
842        let mut payload = serde_json::json!({
843            "expiresIn": expires_in
844        });
845
846        if let Some(transform_opts) = transform {
847            let mut transform_params = serde_json::Map::new();
848
849            if let Some(width) = transform_opts.width {
850                transform_params.insert("width".to_string(), serde_json::Value::from(width));
851            }
852            if let Some(height) = transform_opts.height {
853                transform_params.insert("height".to_string(), serde_json::Value::from(height));
854            }
855            if let Some(resize) = transform_opts.resize {
856                transform_params.insert("resize".to_string(), serde_json::to_value(resize)?);
857            }
858            if let Some(format) = transform_opts.format {
859                transform_params.insert("format".to_string(), serde_json::to_value(format)?);
860            }
861            if let Some(quality) = transform_opts.quality {
862                transform_params.insert("quality".to_string(), serde_json::Value::from(quality));
863            }
864
865            payload["transform"] = serde_json::Value::Object(transform_params);
866        }
867
868        let response = self.http_client.post(&url).json(&payload).send().await?;
869
870        if !response.status().is_success() {
871            let error_msg = format!(
872                "Create signed URL failed with status: {}",
873                response.status()
874            );
875            return Err(Error::storage(error_msg));
876        }
877
878        let response_data: serde_json::Value = response.json().await?;
879        let signed_url = response_data["signedURL"]
880            .as_str()
881            .ok_or_else(|| Error::storage("Invalid signed URL response"))?;
882
883        info!("Created signed URL successfully");
884        Ok(signed_url.to_string())
885    }
886
887    /// Get transformed image URL
888    pub fn get_public_url_transformed(
889        &self,
890        bucket_id: &str,
891        path: &str,
892        options: TransformOptions,
893    ) -> Result<String> {
894        let mut url = Url::parse(&self.get_public_url(bucket_id, path))?;
895
896        if let Some(width) = options.width {
897            url.query_pairs_mut()
898                .append_pair("width", &width.to_string());
899        }
900
901        if let Some(height) = options.height {
902            url.query_pairs_mut()
903                .append_pair("height", &height.to_string());
904        }
905
906        if let Some(resize) = options.resize {
907            let resize_str = match resize {
908                ResizeMode::Cover => "cover",
909                ResizeMode::Contain => "contain",
910                ResizeMode::Fill => "fill",
911            };
912            url.query_pairs_mut().append_pair("resize", resize_str);
913        }
914
915        if let Some(format) = options.format {
916            let format_str = match format {
917                ImageFormat::Webp => "webp",
918                ImageFormat::Jpeg => "jpeg",
919                ImageFormat::Png => "png",
920                ImageFormat::Avif => "avif",
921            };
922            url.query_pairs_mut().append_pair("format", format_str);
923        }
924
925        if let Some(quality) = options.quality {
926            url.query_pairs_mut()
927                .append_pair("quality", &quality.to_string());
928        }
929
930        Ok(url.to_string())
931    }
932
933    /// Start a resumable upload session for large files
934    ///
935    /// # Examples
936    /// ```rust,no_run
937    /// use supabase::storage::{ResumableUploadConfig, FileOptions};
938    ///
939    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
940    /// let config = ResumableUploadConfig::default();
941    /// let file_opts = FileOptions {
942    ///     content_type: Some("video/mp4".to_string()),
943    ///     ..Default::default()
944    /// };
945    ///
946    /// let session = storage.start_resumable_upload(
947    ///     "videos",
948    ///     "my-large-video.mp4",
949    ///     1024 * 1024 * 100, // 100MB
950    ///     Some(config),
951    ///     Some(file_opts)
952    /// ).await?;
953    ///
954    /// println!("Started upload session: {}", session.upload_id);
955    /// # Ok(())
956    /// # }
957    /// ```
958    #[cfg(not(target_arch = "wasm32"))]
959    pub async fn start_resumable_upload(
960        &self,
961        bucket_id: &str,
962        path: &str,
963        total_size: u64,
964        config: Option<ResumableUploadConfig>,
965        options: Option<FileOptions>,
966    ) -> Result<UploadSession> {
967        let config = config.unwrap_or_default();
968        let options = options.unwrap_or_default();
969
970        debug!(
971            "Starting resumable upload for bucket: {} path: {} size: {}",
972            bucket_id, path, total_size
973        );
974
975        let url = format!(
976            "{}/storage/v1/object/{}/{}/resumable",
977            self.config.url, bucket_id, path
978        );
979
980        let payload = serde_json::json!({
981            "totalSize": total_size,
982            "chunkSize": config.chunk_size,
983            "contentType": options.content_type,
984            "cacheControl": options.cache_control,
985            "upsert": options.upsert
986        });
987
988        let response = self.http_client.post(&url).json(&payload).send().await?;
989
990        if !response.status().is_success() {
991            let error_msg = format!(
992                "Start resumable upload failed with status: {}",
993                response.status()
994            );
995            return Err(Error::storage(error_msg));
996        }
997
998        let session: UploadSession = response.json().await?;
999        info!("Started resumable upload session: {}", session.upload_id);
1000
1001        Ok(session)
1002    }
1003
1004    /// Upload a chunk for resumable upload
1005    ///
1006    /// # Examples
1007    /// ```rust,no_run
1008    /// use bytes::Bytes;
1009    ///
1010    /// # async fn example(storage: &supabase::storage::Storage, session: &supabase::storage::UploadSession) -> supabase::Result<()> {
1011    /// let chunk_data = Bytes::from(vec![0u8; 1024 * 1024]); // 1MB chunk
1012    ///
1013    /// let part = storage.upload_chunk(
1014    ///     session,
1015    ///     1, // part number
1016    ///     chunk_data
1017    /// ).await?;
1018    ///
1019    /// println!("Uploaded part: {} etag: {}", part.part_number, part.etag);
1020    /// # Ok(())
1021    /// # }
1022    /// ```
1023    #[cfg(not(target_arch = "wasm32"))]
1024    pub async fn upload_chunk(
1025        &self,
1026        session: &UploadSession,
1027        part_number: u32,
1028        chunk_data: Bytes,
1029    ) -> Result<UploadedPart> {
1030        debug!(
1031            "Uploading chunk {} for session: {} size: {}",
1032            part_number,
1033            session.upload_id,
1034            chunk_data.len()
1035        );
1036
1037        let url = format!(
1038            "{}/storage/v1/object/{}/{}/resumable/{}",
1039            self.config.url, session.bucket_id, session.object_path, session.upload_id
1040        );
1041
1042        let chunk_size = chunk_data.len() as u64;
1043
1044        let response = self
1045            .http_client
1046            .put(&url)
1047            .header("Content-Type", "application/octet-stream")
1048            .header("X-Part-Number", part_number.to_string())
1049            .body(chunk_data)
1050            .send()
1051            .await?;
1052
1053        if !response.status().is_success() {
1054            let error_msg = format!("Upload chunk failed with status: {}", response.status());
1055            return Err(Error::storage(error_msg));
1056        }
1057
1058        let etag = response
1059            .headers()
1060            .get("etag")
1061            .and_then(|h| h.to_str().ok())
1062            .unwrap_or("")
1063            .to_string();
1064
1065        let part = UploadedPart {
1066            part_number,
1067            etag,
1068            size: chunk_size,
1069        };
1070
1071        info!("Uploaded chunk {} successfully", part_number);
1072        Ok(part)
1073    }
1074
1075    /// Complete a resumable upload after all chunks are uploaded
1076    ///
1077    /// # Examples
1078    /// ```rust,no_run
1079    /// # async fn example(storage: &supabase::storage::Storage, mut session: supabase::storage::UploadSession) -> supabase::Result<()> {
1080    /// // ... upload all chunks and collect parts ...
1081    ///
1082    /// let response = storage.complete_resumable_upload(&session).await?;
1083    /// println!("Upload completed: {}", response.key);
1084    /// # Ok(())
1085    /// # }
1086    /// ```
1087    #[cfg(not(target_arch = "wasm32"))]
1088    pub async fn complete_resumable_upload(
1089        &self,
1090        session: &UploadSession,
1091    ) -> Result<UploadResponse> {
1092        debug!(
1093            "Completing resumable upload for session: {}",
1094            session.upload_id
1095        );
1096
1097        let url = format!(
1098            "{}/storage/v1/object/{}/{}/resumable/{}/complete",
1099            self.config.url, session.bucket_id, session.object_path, session.upload_id
1100        );
1101
1102        let payload = serde_json::json!({
1103            "parts": session.uploaded_parts
1104        });
1105
1106        let response = self.http_client.post(&url).json(&payload).send().await?;
1107
1108        if !response.status().is_success() {
1109            let error_msg = format!(
1110                "Complete resumable upload failed with status: {}",
1111                response.status()
1112            );
1113            return Err(Error::storage(error_msg));
1114        }
1115
1116        let upload_response: UploadResponse = response.json().await?;
1117        info!("Completed resumable upload: {}", upload_response.key);
1118
1119        Ok(upload_response)
1120    }
1121
1122    /// Upload a large file with automatic chunking and resume capability
1123    ///
1124    /// This is a high-level method that handles the entire resumable upload process.
1125    ///
1126    /// # Examples
1127    /// ```rust,no_run
1128    /// use supabase::storage::{ResumableUploadConfig, FileOptions};
1129    /// use std::sync::Arc;
1130    ///
1131    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1132    /// let config = ResumableUploadConfig::default();
1133    /// let file_opts = FileOptions {
1134    ///     content_type: Some("video/mp4".to_string()),
1135    ///     ..Default::default()
1136    /// };
1137    ///
1138    /// let progress_callback = Arc::new(|uploaded: u64, total: u64| {
1139    ///     println!("Progress: {:.1}%", (uploaded as f64 / total as f64) * 100.0);
1140    /// });
1141    ///
1142    /// let response = storage.upload_large_file(
1143    ///     "videos",
1144    ///     "my-large-video.mp4",
1145    ///     "/path/to/large-video.mp4",
1146    ///     Some(config),
1147    ///     Some(file_opts),
1148    ///     Some(progress_callback)
1149    /// ).await?;
1150    ///
1151    /// println!("Upload completed: {}", response.key);
1152    /// # Ok(())
1153    /// # }
1154    /// ```
1155    #[cfg(all(not(target_arch = "wasm32"), feature = "native"))]
1156    pub async fn upload_large_file<P: AsRef<std::path::Path>>(
1157        &self,
1158        bucket_id: &str,
1159        path: &str,
1160        file_path: P,
1161        config: Option<ResumableUploadConfig>,
1162        options: Option<FileOptions>,
1163        progress_callback: Option<UploadProgressCallback>,
1164    ) -> Result<UploadResponse> {
1165        let config = config.unwrap_or_default();
1166
1167        debug!("Starting large file upload from: {:?}", file_path.as_ref());
1168
1169        // Get file size
1170        let metadata = tokio::fs::metadata(&file_path)
1171            .await
1172            .map_err(|e| Error::storage(format!("Failed to get file metadata: {}", e)))?;
1173
1174        let total_size = metadata.len();
1175
1176        if total_size <= config.chunk_size {
1177            // Use regular upload for small files
1178            return self.upload_file(bucket_id, path, file_path, options).await;
1179        }
1180
1181        // Start resumable upload session
1182        let mut session = self
1183            .start_resumable_upload(bucket_id, path, total_size, Some(config.clone()), options)
1184            .await?;
1185
1186        // Open file for reading
1187        let mut file = tokio::fs::File::open(&file_path)
1188            .await
1189            .map_err(|e| Error::storage(format!("Failed to open file: {}", e)))?;
1190
1191        let mut uploaded_size = 0u64;
1192        let mut part_number = 1u32;
1193
1194        // Upload chunks
1195        loop {
1196            let remaining_size = total_size - uploaded_size;
1197            if remaining_size == 0 {
1198                break;
1199            }
1200
1201            let chunk_size = std::cmp::min(config.chunk_size, remaining_size);
1202            let mut buffer = vec![0u8; chunk_size as usize];
1203
1204            // Read chunk from file
1205            use tokio::io::AsyncReadExt;
1206            let bytes_read = file
1207                .read_exact(&mut buffer)
1208                .await
1209                .map_err(|e| Error::storage(format!("Failed to read file chunk: {}", e)))?;
1210
1211            if bytes_read == 0 {
1212                break;
1213            }
1214
1215            buffer.truncate(bytes_read);
1216            let chunk_data = Bytes::from(buffer);
1217
1218            // Upload chunk with retries
1219            let mut attempts = 0;
1220            let part = loop {
1221                attempts += 1;
1222
1223                match self
1224                    .upload_chunk(&session, part_number, chunk_data.clone())
1225                    .await
1226                {
1227                    Ok(part) => break part,
1228                    Err(e) if attempts < config.max_retries => {
1229                        warn!(
1230                            "Upload chunk {} failed (attempt {}), retrying: {}",
1231                            part_number, attempts, e
1232                        );
1233                        async_sleep(Duration::from_millis(config.retry_delay)).await;
1234                        continue;
1235                    }
1236                    Err(e) => return Err(e),
1237                }
1238            };
1239
1240            session.uploaded_parts.push(part);
1241            uploaded_size += chunk_size;
1242            part_number += 1;
1243
1244            // Call progress callback
1245            if let Some(callback) = &progress_callback {
1246                callback(uploaded_size, total_size);
1247            }
1248
1249            debug!(
1250                "Uploaded chunk {}, progress: {}/{}",
1251                part_number - 1,
1252                uploaded_size,
1253                total_size
1254            );
1255        }
1256
1257        // Complete upload
1258        let response = self.complete_resumable_upload(&session).await?;
1259
1260        info!("Large file upload completed: {}", response.key);
1261        Ok(response)
1262    }
1263
1264    /// Get resumable upload session status
1265    #[cfg(not(target_arch = "wasm32"))]
1266    pub async fn get_upload_session(&self, upload_id: &str) -> Result<UploadSession> {
1267        debug!("Getting upload session status: {}", upload_id);
1268
1269        let url = format!("{}/storage/v1/resumable/{}", self.config.url, upload_id);
1270
1271        let response = self.http_client.get(&url).send().await?;
1272
1273        if !response.status().is_success() {
1274            let error_msg = format!(
1275                "Get upload session failed with status: {}",
1276                response.status()
1277            );
1278            return Err(Error::storage(error_msg));
1279        }
1280
1281        let session: UploadSession = response.json().await?;
1282        Ok(session)
1283    }
1284
1285    /// Cancel a resumable upload session
1286    #[cfg(not(target_arch = "wasm32"))]
1287    pub async fn cancel_upload_session(&self, upload_id: &str) -> Result<()> {
1288        debug!("Cancelling upload session: {}", upload_id);
1289
1290        let url = format!("{}/storage/v1/resumable/{}", self.config.url, upload_id);
1291
1292        let response = self.http_client.delete(&url).send().await?;
1293
1294        if !response.status().is_success() {
1295            let error_msg = format!(
1296                "Cancel upload session failed with status: {}",
1297                response.status()
1298            );
1299            return Err(Error::storage(error_msg));
1300        }
1301
1302        info!("Cancelled upload session: {}", upload_id);
1303        Ok(())
1304    }
1305
1306    /// Update file metadata with tags and custom metadata
1307    ///
1308    /// # Examples
1309    /// ```rust,no_run
1310    /// use std::collections::HashMap;
1311    /// use supabase::storage::FileMetadata;
1312    ///
1313    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1314    /// let mut tags = HashMap::new();
1315    /// tags.insert("category".to_string(), "documents".to_string());
1316    /// tags.insert("project".to_string(), "web-app".to_string());
1317    ///
1318    /// let mut custom_data = HashMap::new();
1319    /// custom_data.insert("author".to_string(), serde_json::Value::String("john_doe".to_string()));
1320    /// custom_data.insert("version".to_string(), serde_json::Value::Number(serde_json::Number::from(1)));
1321    ///
1322    /// let metadata = FileMetadata {
1323    ///     tags: Some(tags),
1324    ///     custom_metadata: Some(custom_data),
1325    ///     description: Some("Project documentation".to_string()),
1326    ///     category: Some("documents".to_string()),
1327    ///     searchable_content: Some("documentation project guide".to_string()),
1328    /// };
1329    ///
1330    /// storage.update_file_metadata("documents", "guide.pdf", &metadata).await?;
1331    /// # Ok(())
1332    /// # }
1333    /// ```
1334    pub async fn update_file_metadata(
1335        &self,
1336        bucket_id: &str,
1337        path: &str,
1338        metadata: &FileMetadata,
1339    ) -> Result<()> {
1340        debug!(
1341            "Updating file metadata for bucket: {} path: {}",
1342            bucket_id, path
1343        );
1344
1345        let url = format!(
1346            "{}/storage/v1/object/{}/{}/metadata",
1347            self.config.url, bucket_id, path
1348        );
1349
1350        let response = self.http_client.put(&url).json(metadata).send().await?;
1351
1352        if !response.status().is_success() {
1353            let error_msg = format!(
1354                "Update file metadata failed with status: {}",
1355                response.status()
1356            );
1357            return Err(Error::storage(error_msg));
1358        }
1359
1360        info!("Updated file metadata successfully");
1361        Ok(())
1362    }
1363
1364    /// Search files by metadata
1365    ///
1366    /// # Examples
1367    /// ```rust,no_run
1368    /// use std::collections::HashMap;
1369    /// use supabase::storage::SearchOptions;
1370    ///
1371    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1372    /// let mut tag_filter = HashMap::new();
1373    /// tag_filter.insert("category".to_string(), "documents".to_string());
1374    ///
1375    /// let search_options = SearchOptions {
1376    ///     tags: Some(tag_filter),
1377    ///     category: Some("documents".to_string()),
1378    ///     content_search: Some("project guide".to_string()),
1379    ///     limit: Some(20),
1380    ///     offset: Some(0),
1381    /// };
1382    ///
1383    /// let files = storage.search_files("documents", &search_options).await?;
1384    /// println!("Found {} files", files.len());
1385    /// # Ok(())
1386    /// # }
1387    /// ```
1388    pub async fn search_files(
1389        &self,
1390        bucket_id: &str,
1391        search_options: &SearchOptions,
1392    ) -> Result<Vec<FileObject>> {
1393        debug!("Searching files in bucket: {}", bucket_id);
1394
1395        let url = format!("{}/storage/v1/object/{}/search", self.config.url, bucket_id);
1396
1397        let response = self
1398            .http_client
1399            .post(&url)
1400            .json(search_options)
1401            .send()
1402            .await?;
1403
1404        if !response.status().is_success() {
1405            let error_msg = format!("Search files failed with status: {}", response.status());
1406            return Err(Error::storage(error_msg));
1407        }
1408
1409        let files: Vec<FileObject> = response.json().await?;
1410        info!("Found {} files matching search criteria", files.len());
1411
1412        Ok(files)
1413    }
1414
1415    // ========================
1416    // STORAGE POLICIES HELPERS
1417    // ========================
1418
1419    /// Create a storage policy for Row Level Security (RLS)
1420    ///
1421    /// # Examples
1422    /// ```rust,no_run
1423    /// use supabase::storage::{StoragePolicy, PolicyOperation};
1424    ///
1425    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1426    /// let policy = StoragePolicy {
1427    ///     name: "user_files_policy".to_string(),
1428    ///     bucket_id: "user-files".to_string(),
1429    ///     operation: PolicyOperation::Select,
1430    ///     definition: "auth.uid()::text = (storage.foldername(name))[1]".to_string(),
1431    ///     check: None,
1432    /// };
1433    ///
1434    /// storage.create_policy(&policy).await?;
1435    /// # Ok(())
1436    /// # }
1437    /// ```
1438    pub async fn create_policy(&self, policy: &StoragePolicy) -> Result<()> {
1439        debug!("Creating storage policy: {}", policy.name);
1440
1441        let url = format!("{}/rest/v1/rpc/create_storage_policy", self.config.url);
1442
1443        let payload = serde_json::json!({
1444            "policy_name": policy.name,
1445            "bucket_name": policy.bucket_id,
1446            "operation": policy.operation,
1447            "definition": policy.definition,
1448            "check_expression": policy.check
1449        });
1450
1451        let response = self
1452            .http_client
1453            .post(&url)
1454            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
1455            .header("apikey", self.get_admin_key())
1456            .json(&payload)
1457            .send()
1458            .await?;
1459
1460        if !response.status().is_success() {
1461            let error_msg = format!(
1462                "Create storage policy failed with status: {}",
1463                response.status()
1464            );
1465            return Err(Error::storage(error_msg));
1466        }
1467
1468        info!("Created storage policy: {}", policy.name);
1469        Ok(())
1470    }
1471
1472    /// Update an existing storage policy
1473    ///
1474    /// # Examples
1475    /// ```rust,no_run
1476    /// use supabase::storage::{StoragePolicy, PolicyOperation};
1477    ///
1478    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1479    /// let updated_policy = StoragePolicy {
1480    ///     name: "user_files_policy".to_string(),
1481    ///     bucket_id: "user-files".to_string(),
1482    ///     operation: PolicyOperation::All,
1483    ///     definition: "auth.uid()::text = (storage.foldername(name))[1] OR auth.role() = 'admin'".to_string(),
1484    ///     check: Some("auth.uid() IS NOT NULL".to_string()),
1485    /// };
1486    ///
1487    /// storage.update_policy(&updated_policy).await?;
1488    /// # Ok(())
1489    /// # }
1490    /// ```
1491    pub async fn update_policy(&self, policy: &StoragePolicy) -> Result<()> {
1492        debug!("Updating storage policy: {}", policy.name);
1493
1494        let url = format!("{}/rest/v1/rpc/update_storage_policy", self.config.url);
1495
1496        let payload = serde_json::json!({
1497            "policy_name": policy.name,
1498            "bucket_name": policy.bucket_id,
1499            "operation": policy.operation,
1500            "definition": policy.definition,
1501            "check_expression": policy.check
1502        });
1503
1504        let response = self
1505            .http_client
1506            .put(&url)
1507            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
1508            .header("apikey", self.get_admin_key())
1509            .json(&payload)
1510            .send()
1511            .await?;
1512
1513        if !response.status().is_success() {
1514            let error_msg = format!(
1515                "Update storage policy failed with status: {}",
1516                response.status()
1517            );
1518            return Err(Error::storage(error_msg));
1519        }
1520
1521        info!("Updated storage policy: {}", policy.name);
1522        Ok(())
1523    }
1524
1525    /// Delete a storage policy
1526    ///
1527    /// # Examples
1528    /// ```rust,no_run
1529    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1530    /// storage.delete_policy("user-files", "user_files_policy").await?;
1531    /// # Ok(())
1532    /// # }
1533    /// ```
1534    pub async fn delete_policy(&self, bucket_id: &str, policy_name: &str) -> Result<()> {
1535        debug!(
1536            "Deleting storage policy: {} from bucket: {}",
1537            policy_name, bucket_id
1538        );
1539
1540        let url = format!("{}/rest/v1/rpc/delete_storage_policy", self.config.url);
1541
1542        let payload = serde_json::json!({
1543            "policy_name": policy_name,
1544            "bucket_name": bucket_id
1545        });
1546
1547        let response = self
1548            .http_client
1549            .delete(&url)
1550            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
1551            .header("apikey", self.get_admin_key())
1552            .json(&payload)
1553            .send()
1554            .await?;
1555
1556        if !response.status().is_success() {
1557            let error_msg = format!(
1558                "Delete storage policy failed with status: {}",
1559                response.status()
1560            );
1561            return Err(Error::storage(error_msg));
1562        }
1563
1564        info!("Deleted storage policy: {}", policy_name);
1565        Ok(())
1566    }
1567
1568    /// List all storage policies for a bucket
1569    ///
1570    /// # Examples
1571    /// ```rust,no_run
1572    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1573    /// let policies = storage.list_policies("user-files").await?;
1574    /// println!("Found {} policies", policies.len());
1575    /// # Ok(())
1576    /// # }
1577    /// ```
1578    pub async fn list_policies(&self, bucket_id: &str) -> Result<Vec<StoragePolicy>> {
1579        debug!("Listing storage policies for bucket: {}", bucket_id);
1580
1581        let url = format!("{}/rest/v1/rpc/list_storage_policies", self.config.url);
1582
1583        let payload = serde_json::json!({
1584            "bucket_name": bucket_id
1585        });
1586
1587        let response = self
1588            .http_client
1589            .post(&url)
1590            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
1591            .header("apikey", self.get_admin_key())
1592            .json(&payload)
1593            .send()
1594            .await?;
1595
1596        if !response.status().is_success() {
1597            let error_msg = format!(
1598                "List storage policies failed with status: {}",
1599                response.status()
1600            );
1601            return Err(Error::storage(error_msg));
1602        }
1603
1604        let policies: Vec<StoragePolicy> = response.json().await?;
1605        info!(
1606            "Listed {} storage policies for bucket: {}",
1607            policies.len(),
1608            bucket_id
1609        );
1610
1611        Ok(policies)
1612    }
1613
1614    /// Test if a user can access a file based on current policies
1615    ///
1616    /// # Examples
1617    /// ```rust,no_run
1618    /// use supabase::storage::PolicyOperation;
1619    ///
1620    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1621    /// let can_access = storage.test_policy_access(
1622    ///     "user-files",
1623    ///     "user123/document.pdf",
1624    ///     PolicyOperation::Select,
1625    ///     "user123"
1626    /// ).await?;
1627    ///
1628    /// if can_access {
1629    ///     println!("User can access the file");
1630    /// } else {
1631    ///     println!("Access denied");
1632    /// }
1633    /// # Ok(())
1634    /// # }
1635    /// ```
1636    pub async fn test_policy_access(
1637        &self,
1638        bucket_id: &str,
1639        object_path: &str,
1640        operation: PolicyOperation,
1641        user_id: &str,
1642    ) -> Result<bool> {
1643        debug!(
1644            "Testing policy access for user: {} on object: {}",
1645            user_id, object_path
1646        );
1647
1648        let url = format!("{}/rest/v1/rpc/test_storage_policy_access", self.config.url);
1649
1650        let payload = serde_json::json!({
1651            "bucket_name": bucket_id,
1652            "object_name": object_path,
1653            "operation": operation,
1654            "user_id": user_id
1655        });
1656
1657        let response = self
1658            .http_client
1659            .post(&url)
1660            .header("Authorization", format!("Bearer {}", self.get_admin_key()))
1661            .header("apikey", self.get_admin_key())
1662            .json(&payload)
1663            .send()
1664            .await?;
1665
1666        if !response.status().is_success() {
1667            let error_msg = format!(
1668                "Test policy access failed with status: {}",
1669                response.status()
1670            );
1671            return Err(Error::storage(error_msg));
1672        }
1673
1674        let result: serde_json::Value = response.json().await?;
1675        let can_access = result["can_access"].as_bool().unwrap_or(false);
1676
1677        info!("Policy access test result: {}", can_access);
1678        Ok(can_access)
1679    }
1680
1681    /// Generate a policy template for common use cases
1682    ///
1683    /// # Examples
1684    /// ```rust,no_run
1685    /// use supabase::storage::PolicyTemplate;
1686    ///
1687    /// # async fn example(storage: &supabase::storage::Storage) -> supabase::Result<()> {
1688    /// let policy = storage.generate_policy_template(
1689    ///     "user-files",
1690    ///     "user_files_access",
1691    ///     PolicyTemplate::UserFolderAccess
1692    /// );
1693    ///
1694    /// println!("Generated policy: {:?}", policy);
1695    /// # Ok(())
1696    /// # }
1697    /// ```
1698    pub fn generate_policy_template(
1699        &self,
1700        bucket_id: &str,
1701        policy_name: &str,
1702        template: PolicyTemplate,
1703    ) -> StoragePolicy {
1704        let (operation, definition, check) = match template {
1705            PolicyTemplate::PublicRead => (PolicyOperation::Select, "true".to_string(), None),
1706            PolicyTemplate::AuthenticatedRead => (
1707                PolicyOperation::Select,
1708                "auth.uid() IS NOT NULL".to_string(),
1709                None,
1710            ),
1711            PolicyTemplate::UserFolderAccess => (
1712                PolicyOperation::All,
1713                "auth.uid()::text = (storage.foldername(name))[1]".to_string(),
1714                Some("auth.uid() IS NOT NULL".to_string()),
1715            ),
1716            PolicyTemplate::AdminFullAccess => (
1717                PolicyOperation::All,
1718                "auth.role() = 'admin'".to_string(),
1719                None,
1720            ),
1721            PolicyTemplate::ReadOnlyForRole(role) => (
1722                PolicyOperation::Select,
1723                format!("auth.role() = '{}'", role),
1724                None,
1725            ),
1726        };
1727
1728        StoragePolicy {
1729            name: policy_name.to_string(),
1730            bucket_id: bucket_id.to_string(),
1731            operation,
1732            definition,
1733            check,
1734        }
1735    }
1736}
1737
1738/// Storage policy for Row Level Security
1739#[derive(Debug, Clone, Serialize, Deserialize)]
1740pub struct StoragePolicy {
1741    pub name: String,
1742    pub bucket_id: String,
1743    pub operation: PolicyOperation,
1744    pub definition: String,
1745    pub check: Option<String>,
1746}
1747
1748/// Policy operation types
1749#[derive(Debug, Clone, Serialize, Deserialize)]
1750#[serde(rename_all = "UPPERCASE")]
1751pub enum PolicyOperation {
1752    Select,
1753    Insert,
1754    Update,
1755    Delete,
1756    All,
1757}
1758
1759/// Pre-defined policy templates for common use cases
1760#[derive(Debug, Clone)]
1761pub enum PolicyTemplate {
1762    /// Allow public read access to all files
1763    PublicRead,
1764    /// Allow read access only to authenticated users
1765    AuthenticatedRead,
1766    /// Allow full access to files in user's own folder (e.g., user123/*)
1767    UserFolderAccess,
1768    /// Allow full access to admin users only
1769    AdminFullAccess,
1770    /// Allow read-only access to users with specific role
1771    ReadOnlyForRole(String),
1772}