Skip to main content

baidu_netdisk_sdk/
upload.rs

1//! File upload functionality for Baidu NetDisk
2//!
3//! This module provides comprehensive file upload capabilities with support for:
4//! - Simple file upload via path
5//! - Reader-based upload for streaming data
6//! - Byte array upload for in-memory data
7//! - Resumable upload (automatic detection of partially uploaded chunks)
8//! - Parallel chunk upload for better performance
9//!
10//! # Architecture
11//!
12//! The upload process consists of 3 steps:
13//! 1. **Precreate** - Initiate upload session, get uploadid, and check existing chunks
14//! 2. **Upload Chunks** - Upload missing chunks in parallel
15//! 3. **Create File** - Merge chunks into final file on server
16//!
17//! # Upload Methods Comparison
18//!
19//! | Method | Data Source | Memory Usage | Streaming | Use Case |
20//! |--------|-------------|--------------|-----------|----------|
21//! | [`UploadClient::upload_file`] | File path | ~80MB | ✅ | Most common, upload from disk |
22//! | [`UploadClient::upload_reader`] | Reader + size | ~80MB | ✅ | Custom readers, wrapped streams |
23//! | [`UploadClient::upload_bytes`] | `&[u8]` slice | Full data | ❌ | Small data already in memory |
24//!
25//! # Memory Optimization
26//!
27//! For large files, memory usage is bounded by:
28//! - Batch size: `max_concurrency * 2` chunks (default: 20 * 4MB = 80MB)
29//! - Only missing chunks are copied to memory
30//! - Existing chunks are skipped automatically (resumable upload)
31//!
32//! # Example
33//!
34//! ```no_run
35//! use baidu_netdisk_sdk::BaiduNetDiskClient;
36//!
37//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
38//! let client = BaiduNetDiskClient::builder().build()?;
39//! client.load_token_from_env()?;
40//!
41//! // Simple file upload
42//! let response = client.upload()
43//!     .upload_file("test.txt", "/remote/test.txt")
44//!     .await?;
45//!
46//! println!("Uploaded: {} ({} bytes)", response.path, response.size);
47//! # Ok(())
48//! # }
49//! ```
50//!
51//! # Low-level API
52//!
53//! For advanced use cases, you can use the step-by-step API:
54//! - [`UploadClient::precreate`] - Start upload session
55//! - [`UploadClient::upload_chunks_parallel`] - Upload specific chunks
56//! - [`UploadClient::create_file`] - Complete the upload
57use crate::client::TokenGetter;
58use crate::errors::{NetDiskError, NetDiskResult};
59use crate::http::HttpClient;
60use futures::stream::{self, StreamExt};
61use log::debug;
62use serde::Deserialize;
63use std::sync::Arc;
64
65/// Upload client for Baidu NetDisk
66#[derive(Debug, Clone)]
67pub struct UploadClient {
68    http_client: HttpClient,
69    token_getter: Arc<dyn TokenGetter>,
70}
71
72impl UploadClient {
73    /// Create a new UploadClient instance
74    ///
75    /// Usually you don't need to call this directly - use BaiduNetDiskClient::upload() instead.
76    pub fn new(http_client: HttpClient, token_getter: Arc<dyn TokenGetter>) -> Self {
77        Self {
78            http_client,
79            token_getter,
80        }
81    }
82
83    /// Get a reference to the internal HTTP client
84    pub fn http_client(&self) -> &HttpClient {
85        &self.http_client
86    }
87
88    /// Precreate an upload session
89    ///
90    /// Initiates an upload session and checks which chunks (if any) already exist on the server.
91    /// This is the first step of the multi-step upload process.
92    pub async fn precreate(&self, options: PrecreateOptions) -> NetDiskResult<PrecreateResponse> {
93        let token = self.token_getter.get_token().await?;
94        let block_list_json =
95            serde_json::to_string(&options.block_list).map_err(|e| NetDiskError::Unknown {
96                message: format!("Failed to serialize block_list: {}", e),
97            })?;
98
99        let params = vec![
100            ("method", "precreate"),
101            ("access_token", token.access_token.as_str()),
102        ];
103
104        let size_str = options.size.to_string();
105        let isdir_str = options.isdir.to_string();
106        let rtype_str = options.rtype.to_string();
107
108        let form_data = vec![
109            ("path", options.path.as_str()),
110            ("size", size_str.as_str()),
111            ("isdir", isdir_str.as_str()),
112            ("block_list", block_list_json.as_str()),
113            ("autoinit", "1"),
114            ("rtype", rtype_str.as_str()),
115        ];
116
117        debug!(
118            "Precreate upload: path={}, size={}, isdir={}, block_list={:?}",
119            options.path, options.size, options.isdir, options.block_list
120        );
121
122        let response: PrecreateResponse = self
123            .http_client
124            .post_form("/rest/2.0/xpan/file", Some(&form_data), Some(&params))
125            .await?;
126
127        if response.errno != 0 {
128            let error_msg = get_error_message(response.errno);
129            return Err(NetDiskError::api_error(response.errno, &error_msg));
130        }
131
132        debug!(
133            "Precreate success: uploadid={}, block_list={:?}",
134            response.uploadid, response.block_list
135        );
136
137        Ok(response)
138    }
139}
140
141/// Options for precreate upload
142#[derive(Debug, Clone, Default)]
143pub struct PrecreateOptions {
144    /// Remote file path on Baidu NetDisk
145    pub path: String,
146    /// File size in bytes
147    pub size: u64,
148    /// Is directory (0 for file, 1 for directory)
149    pub isdir: i32,
150    /// List of block MD5s (each block is typically 4MB)
151    pub block_list: Vec<String>,
152    /// Conflict resolution type (1=overwrite, 2=rename, 3=new copy)
153    pub rtype: i32,
154    /// Optional uploadid for resuming interrupted uploads
155    pub uploadid: Option<String>,
156    /// Optional content MD5 for entire file
157    pub content_md5: Option<String>,
158    /// Optional slice MD5
159    pub slice_md5: Option<String>,
160    /// Optional local creation time (timestamp)
161    pub local_ctime: Option<u64>,
162    /// Optional local modification time (timestamp)
163    pub local_mtime: Option<u64>,
164}
165
166impl PrecreateOptions {
167    /// Create new PrecreateOptions with basic required fields
168    pub fn new(path: &str, size: u64, block_list: Vec<String>) -> Self {
169        Self {
170            path: path.to_string(),
171            size,
172            isdir: 0,
173            block_list,
174            rtype: 1,
175            uploadid: None,
176            content_md5: None,
177            slice_md5: None,
178            local_ctime: None,
179            local_mtime: None,
180        }
181    }
182
183    /// Set whether this is a directory
184    pub fn isdir(mut self, isdir: bool) -> Self {
185        self.isdir = if isdir { 1 } else { 0 };
186        self
187    }
188
189    /// Set conflict resolution type (1=overwrite, 2=rename, 3=new copy)
190    pub fn rtype(mut self, rtype: i32) -> Self {
191        self.rtype = rtype;
192        self
193    }
194
195    /// Set uploadid for resuming an interrupted upload
196    pub fn uploadid(mut self, uploadid: &str) -> Self {
197        self.uploadid = Some(uploadid.to_string());
198        self
199    }
200
201    /// Set content MD5 for the entire file
202    pub fn content_md5(mut self, md5: &str) -> Self {
203        self.content_md5 = Some(md5.to_string());
204        self
205    }
206
207    /// Set slice MD5
208    pub fn slice_md5(mut self, md5: &str) -> Self {
209        self.slice_md5 = Some(md5.to_string());
210        self
211    }
212
213    /// Set local creation time (timestamp)
214    pub fn local_ctime(mut self, ctime: u64) -> Self {
215        self.local_ctime = Some(ctime);
216        self
217    }
218
219    /// Set local modification time (timestamp)
220    pub fn local_mtime(mut self, mtime: u64) -> Self {
221        self.local_mtime = Some(mtime);
222        self
223    }
224}
225
226/// Precreate response
227#[derive(Debug, Deserialize)]
228pub struct PrecreateResponse {
229    /// Error code (0 indicates success)
230    pub errno: i32,
231    /// File path
232    #[serde(default)]
233    pub path: Option<String>,
234    /// Upload session ID (use this for subsequent steps)
235    pub uploadid: String,
236    /// Return type
237    #[serde(rename = "return_type")]
238    pub return_type: i32,
239    /// List of missing block indices that need to be uploaded
240    #[serde(rename = "block_list")]
241    pub block_list: Vec<u32>,
242}
243
244/// Server info for locate upload response
245#[derive(Debug, Deserialize)]
246pub struct LocateUploadServer {
247    /// Server URL
248    pub server: String,
249}
250
251/// Locate upload response
252#[derive(Debug, Deserialize)]
253pub struct LocateUploadResponse {
254    /// Backup servers
255    #[serde(default)]
256    pub bak_server: Vec<String>,
257    /// Backup servers list
258    #[serde(default)]
259    pub bak_servers: Vec<LocateUploadServer>,
260    /// Client IP address
261    pub client_ip: String,
262    /// Error code (0 indicates success)
263    pub error_code: i32,
264    /// Error message
265    pub error_msg: String,
266    /// Expiration time in seconds
267    pub expire: i32,
268    /// Host name
269    pub host: String,
270    /// New number
271    #[serde(default)]
272    pub newno: String,
273    /// QUIC servers
274    #[serde(default)]
275    pub quic_server: Vec<String>,
276    /// QUIC servers list
277    #[serde(default)]
278    pub quic_servers: Vec<LocateUploadServer>,
279    /// Request ID
280    pub request_id: u64,
281    /// Servers list
282    #[serde(default)]
283    pub server: Vec<String>,
284    /// Server timestamp
285    pub server_time: u64,
286    /// Servers list (detailed)
287    pub servers: Vec<LocateUploadServer>,
288    /// SL value
289    pub sl: i32,
290}
291
292impl LocateUploadResponse {
293    /// Get all HTTPS servers from the response
294    ///
295    /// Returns a vector of HTTPS server URLs that can be used for uploading chunks.
296    pub fn get_https_servers(&self) -> Vec<String> {
297        self.servers
298            .iter()
299            .filter(|s| s.server.starts_with("https://"))
300            .map(|s| s.server.clone())
301            .collect()
302    }
303
304    /// Get the first HTTPS server from the available servers
305    ///
306    /// According to Baidu NetDisk API documentation, the servers are sorted by proximity and speed.
307    /// The first server is recommended as the default choice for optimal upload performance.
308    ///
309    /// Returns `None` if no HTTPS servers are available.
310    pub fn get_first_https_server(&self) -> Option<String> {
311        let https_servers = self.get_https_servers();
312        if https_servers.is_empty() {
313            None
314        } else {
315            Some(https_servers[0].clone())
316        }
317    }
318}
319
320impl UploadClient {
321    /// Locate upload domain
322    ///
323    /// Gets the upload domain before uploading chunks.
324    /// This is required before uploading file data.
325    ///
326    /// According to Baidu NetDisk API documentation, the servers are sorted by proximity and speed.
327    /// The first server is recommended as the default choice for optimal upload performance.
328    ///
329    /// # Example
330    ///
331    /// ```no_run
332    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
333    /// use baidu_netdisk_sdk::BaiduNetDiskClient;
334    ///
335    /// let client = BaiduNetDiskClient::builder().build()?;
336    /// client.load_token_from_env()?;
337    ///
338    /// let response = client.upload()
339    ///     .locate_upload("/apps/appName/filename.jpg", "P1-MTAuMjI4LjQzLjMxOjE1OTU4NTg==")
340    ///     .await?;
341    ///
342    /// // Get the first HTTPS server (recommended by Baidu for optimal performance)
343    /// if let Some(server) = response.get_first_https_server() {
344    ///     println!("Upload server: {}", server);
345    /// }
346    /// # Ok(())
347    /// # }
348    /// ```
349    pub async fn locate_upload(
350        &self,
351        path: &str,
352        uploadid: &str,
353    ) -> NetDiskResult<LocateUploadResponse> {
354        let token = self.token_getter.get_token().await?;
355
356        let url = format!(
357            "https://d.pcs.baidu.com/rest/2.0/pcs/file?method=locateupload&appid=250528&access_token={}&path={}&uploadid={}&upload_version=2.0",
358            urlencoding::encode(&token.access_token),
359            urlencoding::encode(path),
360            urlencoding::encode(uploadid)
361        );
362
363        debug!("Locate upload: path={}, uploadid={}", path, uploadid);
364
365        let response: LocateUploadResponse = self.http_client.get(&url, None).await?;
366
367        if response.error_code != 0 {
368            return Err(NetDiskError::api_error(
369                response.error_code,
370                &response.error_msg,
371            ));
372        }
373
374        debug!(
375            "Locate upload success: host={}, servers={}",
376            response.host,
377            response.servers.len()
378        );
379
380        Ok(response)
381    }
382
383    /// Upload a single chunk
384    ///
385    /// If `server_url` is not provided, the default server `https://c3.pcs.baidu.com` will be used.
386    pub async fn upload_chunk(
387        &self,
388        options: UploadChunkOptions,
389        server_url: Option<&str>,
390    ) -> NetDiskResult<UploadChunkResponse> {
391        let token = self.token_getter.get_token().await?;
392
393        let server = server_url.unwrap_or("https://c3.pcs.baidu.com");
394        let url = format!(
395            "{}/rest/2.0/pcs/superfile2?method=upload&access_token={}&type=tmpfile&path={}&uploadid={}&partseq={}",
396            server,
397            urlencoding::encode(&token.access_token),
398            urlencoding::encode(&options.path),
399            urlencoding::encode(&options.uploadid),
400            options.partseq
401        );
402
403        debug!(
404            "Upload chunk: path={}, uploadid={}, partseq={}, data_size={}, server={}",
405            options.path,
406            options.uploadid,
407            options.partseq,
408            options.data.len(),
409            server
410        );
411
412        let response: UploadChunkResponse = self
413            .http_client
414            .post_multipart(
415                &url,
416                "file".to_string(),
417                "chunk.dat".to_string(),
418                options.data,
419            )
420            .await?;
421
422        Ok(response)
423    }
424
425    /// Upload multiple chunks in parallel
426    ///
427    /// If `server_url` is not provided, the default server `https://c3.pcs.baidu.com` will be used.
428    pub async fn upload_chunks_parallel(
429        &self,
430        remote_path: &str,
431        uploadid: &str,
432        chunks: Vec<(u32, Vec<u8>)>,
433        max_concurrency: usize,
434        server_url: Option<&str>,
435    ) -> NetDiskResult<Vec<(u32, String)>> {
436        let token = self.token_getter.get_token().await?;
437        let server = server_url.unwrap_or("https://c3.pcs.baidu.com").to_string();
438
439        debug!(
440            "Uploading {} chunks in parallel (max_concurrency: {}, server: {})",
441            chunks.len(),
442            max_concurrency,
443            server
444        );
445
446        let access_token_str = token.access_token;
447        let remote_path_str = remote_path.to_string();
448        let uploadid_str = uploadid.to_string();
449        let http_client = self.http_client.clone();
450
451        let mut stream = stream::iter(chunks)
452            .map(move |(partseq, data)| {
453                let path = remote_path_str.clone();
454                let uid = access_token_str.clone();
455                let upid = uploadid_str.clone();
456                let client = http_client.clone();
457                let server_clone = server.clone();
458
459                async move {
460                    let url = format!(
461                        "{}/rest/2.0/pcs/superfile2?method=upload&access_token={}&type=tmpfile&path={}&uploadid={}&partseq={}",
462                        server_clone,
463                        urlencoding::encode(&uid),
464                        urlencoding::encode(&path),
465                        urlencoding::encode(&upid),
466                        partseq
467                    );
468
469                    debug!("Uploading chunk {} ({} bytes)", partseq, data.len());
470
471                    let response: UploadChunkResponse = client
472                        .post_multipart(&url, "file".to_string(), "chunk.dat".to_string(), data)
473                        .await?;
474
475                    Ok((partseq, response.md5))
476                }
477            })
478            .buffer_unordered(max_concurrency);
479
480        let mut chunk_md5s = Vec::new();
481        while let Some(result) = stream.next().await {
482            match result {
483                Ok((partseq, md5)) => {
484                    chunk_md5s.push((partseq, md5));
485                }
486                Err(e) => {
487                    return Err(e);
488                }
489            }
490        }
491
492        debug!("All chunks uploaded successfully");
493        Ok(chunk_md5s)
494    }
495
496    /// Create the final file on the server
497    ///
498    /// This is the final step of the upload process, which merges all uploaded chunks into a single file.
499    pub async fn create_file(
500        &self,
501        options: CreateFileOptions,
502    ) -> NetDiskResult<CreateFileResponse> {
503        let token = self.token_getter.get_token().await?;
504        let block_list_json =
505            serde_json::to_string(&options.block_list).map_err(|e| NetDiskError::Unknown {
506                message: format!("Failed to serialize block_list: {}", e),
507            })?;
508
509        let params = vec![
510            ("method", "create"),
511            ("access_token", token.access_token.as_str()),
512        ];
513
514        let size_str = options.size.to_string();
515        let isdir_str = options.isdir.to_string();
516        let rtype_str = options.rtype.to_string();
517
518        let mut form_data = vec![
519            ("path", options.path.as_str()),
520            ("size", size_str.as_str()),
521            ("isdir", isdir_str.as_str()),
522            ("block_list", block_list_json.as_str()),
523            ("uploadid", options.uploadid.as_str()),
524            ("rtype", rtype_str.as_str()),
525        ];
526
527        let ctime_str = options.local_ctime.map(|t| t.to_string());
528        let mtime_str = options.local_mtime.map(|t| t.to_string());
529
530        if let Some(ref ctime) = ctime_str {
531            form_data.push(("local_ctime", ctime.as_str()));
532        }
533        if let Some(ref mtime) = mtime_str {
534            form_data.push(("local_mtime", mtime.as_str()));
535        }
536
537        debug!(
538            "Create file: path={}, size={}, isdir={}, uploadid={}",
539            options.path, options.size, options.isdir, options.uploadid
540        );
541
542        let response: CreateFileResponse = self
543            .http_client
544            .post_form("/rest/2.0/xpan/file", Some(&form_data), Some(&params))
545            .await?;
546
547        if response.errno != 0 {
548            let error_msg = get_create_error_message(response.errno);
549            return Err(NetDiskError::api_error(response.errno, &error_msg));
550        }
551
552        Ok(response)
553    }
554}
555
556/// Options for uploading a single chunk
557#[derive(Debug, Clone)]
558pub struct UploadChunkOptions {
559    /// Remote file path on Baidu NetDisk
560    pub path: String,
561    /// Upload session ID from precreate
562    pub uploadid: String,
563    /// Chunk sequence number (starting from 0)
564    pub partseq: u32,
565    /// Chunk data bytes
566    pub data: Vec<u8>,
567}
568
569impl UploadChunkOptions {
570    /// Create new UploadChunkOptions
571    pub fn new(path: &str, uploadid: &str, partseq: u32, data: Vec<u8>) -> Self {
572        Self {
573            path: path.to_string(),
574            uploadid: uploadid.to_string(),
575            partseq,
576            data,
577        }
578    }
579}
580
581/// Chunk upload response
582#[derive(Debug, Deserialize)]
583pub struct UploadChunkResponse {
584    /// MD5 hash of the uploaded chunk
585    pub md5: String,
586}
587
588/// Options for creating final file on server
589#[derive(Debug, Clone, Default)]
590pub struct CreateFileOptions {
591    /// Remote file path on Baidu NetDisk
592    pub path: String,
593    /// File size in bytes
594    pub size: u64,
595    /// Is directory (0 for file, 1 for directory)
596    pub isdir: i32,
597    /// List of block MD5s
598    pub block_list: Vec<String>,
599    /// Upload session ID from precreate
600    pub uploadid: String,
601    /// Conflict resolution type (1=overwrite, 2=rename, 3=new copy)
602    pub rtype: i32,
603    /// Optional local creation time (timestamp)
604    pub local_ctime: Option<u64>,
605    /// Optional local modification time (timestamp)
606    pub local_mtime: Option<u64>,
607}
608
609impl CreateFileOptions {
610    /// Create new CreateFileOptions with basic required fields
611    pub fn new(path: &str, size: u64, block_list: Vec<String>, uploadid: &str) -> Self {
612        Self {
613            path: path.to_string(),
614            size,
615            isdir: 0,
616            block_list,
617            uploadid: uploadid.to_string(),
618            rtype: 1,
619            local_ctime: None,
620            local_mtime: None,
621        }
622    }
623
624    /// Set whether this is a directory
625    pub fn isdir(mut self, isdir: bool) -> Self {
626        self.isdir = if isdir { 1 } else { 0 };
627        self
628    }
629
630    /// Set conflict resolution type (1=overwrite, 2=rename, 3=new copy)
631    pub fn rtype(mut self, rtype: i32) -> Self {
632        self.rtype = rtype;
633        self
634    }
635
636    /// Set local creation time (timestamp)
637    pub fn local_ctime(mut self, ctime: u64) -> Self {
638        self.local_ctime = Some(ctime);
639        self
640    }
641
642    /// Set local modification time (timestamp)
643    pub fn local_mtime(mut self, mtime: u64) -> Self {
644        self.local_mtime = Some(mtime);
645        self
646    }
647}
648
649/// Create file response
650#[derive(Debug, Deserialize)]
651pub struct CreateFileResponse {
652    /// Error code (0 indicates success)
653    pub errno: i32,
654    /// File server ID
655    #[serde(rename = "fs_id")]
656    pub fs_id: u64,
657    /// File MD5
658    pub md5: Option<String>,
659    /// Server filename
660    #[serde(rename = "server_filename")]
661    #[serde(default)]
662    pub server_filename: Option<String>,
663    /// File category
664    pub category: i32,
665    /// File path
666    pub path: String,
667    /// File size in bytes
668    pub size: u64,
669    /// Creation time (timestamp)
670    pub ctime: u64,
671    /// Modification time (timestamp)
672    pub mtime: u64,
673    /// Is directory (0 for file, 1 for directory)
674    pub isdir: i32,
675    /// File name
676    #[serde(default)]
677    pub name: Option<String>,
678    /// From type
679    #[serde(rename = "from_type")]
680    #[serde(default)]
681    pub from_type: Option<i32>,
682}
683
684fn get_create_error_message(errno: i32) -> String {
685    match errno {
686        -7 => "File or directory name error or access denied".to_string(),
687        -8 => "File or directory already exists".to_string(),
688        -10 => "Cloud storage capacity full".to_string(),
689        10 => "Failed to create file".to_string(),
690        31190 => "File not found".to_string(),
691        31355 => "Invalid parameter".to_string(),
692        31365 => "Total file size limit exceeded".to_string(),
693        _ => format!("Unknown error: {}", errno),
694    }
695}
696
697fn get_error_message(errno: i32) -> String {
698    match errno {
699        -7 => "File or directory name error or access denied".to_string(),
700        -10 => "Insufficient capacity".to_string(),
701        _ => format!("Unknown error: {}", errno),
702    }
703}
704
705const DEFAULT_CHUNK_SIZE: usize = 4 * 1024 * 1024;
706const DEFAULT_MAX_CONCURRENCY: usize = 10;
707
708#[derive(Debug, Clone)]
709/// Options for simple upload methods
710///
711/// Use the builder pattern to customize upload behavior:
712///
713/// # Example
714///
715/// ```
716/// use baidu_netdisk_sdk::upload::SimpleUploadOptions;
717///
718/// let options = SimpleUploadOptions::default()
719///     .chunk_size(8 * 1024 * 1024)  // 8MB chunks
720///     .max_concurrency(20)         // 20 parallel uploads
721///     .r#type(1);                  // file type
722/// ```
723///
724/// # Default Values
725///
726/// - `chunk_size`: 4MB (4194304 bytes)
727/// - `max_concurrency`: 10
728/// - `r#type`: 1
729pub struct SimpleUploadOptions {
730    /// Size of each chunk in bytes (default: 4MB)
731    pub chunk_size: usize,
732    /// Maximum number of parallel chunk uploads (default: 10)
733    pub max_concurrency: usize,
734    /// File type hint (default: 1)
735    pub r#type: i32,
736}
737
738impl Default for SimpleUploadOptions {
739    fn default() -> Self {
740        Self {
741            chunk_size: DEFAULT_CHUNK_SIZE,
742            max_concurrency: DEFAULT_MAX_CONCURRENCY,
743            r#type: 1,
744        }
745    }
746}
747
748impl SimpleUploadOptions {
749    pub fn new() -> Self {
750        Self::default()
751    }
752
753    pub fn chunk_size(mut self, size: usize) -> Self {
754        self.chunk_size = size;
755        self
756    }
757
758    pub fn max_concurrency(mut self, concurrency: usize) -> Self {
759        self.max_concurrency = concurrency;
760        self
761    }
762
763    pub fn r#type(mut self, r#type: i32) -> Self {
764        self.r#type = r#type;
765        self
766    }
767}
768
769impl UploadClient {
770    /// Upload a file from local path (simple API)
771    ///
772    /// This is the simplest way to upload a file. It handles everything automatically:
773    /// - Opens and reads the file
774    /// - Calculates MD5 for each chunk
775    /// - Uploads missing chunks in parallel
776    /// - Creates the final file on the server
777    ///
778    /// # Example
779    ///
780    /// ```no_run
781    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
782    /// use baidu_netdisk_sdk::BaiduNetDiskClient;
783    ///
784    /// let client = BaiduNetDiskClient::builder().build()?;
785    /// client.load_token_from_env()?;
786    ///
787    /// let response = client.upload()
788    ///     .upload_file("test.txt", "/remote/test.txt")
789    ///     .await?;
790    ///
791    /// println!("Uploaded: {} ({} bytes)", response.path, response.size);
792    /// # Ok(())
793    /// # }
794    /// ```
795    ///
796    /// # See Also
797    ///
798    /// - [`UploadClient::upload_file_with_options`] for custom chunk size and concurrency
799    /// - [`UploadClient::upload_reader`] for streaming upload with custom readers
800    /// - [`UploadClient::upload_bytes`] for uploading data already in memory
801    pub async fn upload_file<P: AsRef<std::path::Path>>(
802        &self,
803        local_path: P,
804        remote_path: &str,
805    ) -> NetDiskResult<CreateFileResponse> {
806        self.upload_file_with_options(local_path, remote_path, SimpleUploadOptions::default())
807            .await
808    }
809
810    /// Upload a file from local path with custom options
811    ///
812    /// Use this method to customize upload behavior:
813    /// - `chunk_size`: Size of each chunk (default: 4MB)
814    /// - `max_concurrency`: Maximum parallel uploads (default: 10)
815    /// - `r#type`: File type hint (default: 1)
816    ///
817    /// # Example
818    ///
819    /// ```no_run
820    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
821    /// use baidu_netdisk_sdk::{BaiduNetDiskClient, upload::SimpleUploadOptions};
822    ///
823    /// let client = BaiduNetDiskClient::builder().build()?;
824    /// client.load_token_from_env()?;
825    ///
826    /// let options = SimpleUploadOptions::default()
827    ///     .chunk_size(8 * 1024 * 1024)  // 8MB chunks
828    ///     .max_concurrency(20);         // 20 parallel uploads
829    ///
830    /// let response = client.upload()
831    ///     .upload_file_with_options("large_video.mp4", "/remote/video.mp4", options)
832    ///     .await?;
833    ///
834    /// println!("Uploaded: {}", response.path);
835    /// # Ok(())
836    /// # }
837    /// ```
838    pub async fn upload_file_with_options<P: AsRef<std::path::Path>>(
839        &self,
840        local_path: P,
841        remote_path: &str,
842        options: SimpleUploadOptions,
843    ) -> NetDiskResult<CreateFileResponse> {
844        let file = std::fs::File::open(&local_path).map_err(|e| NetDiskError::Unknown {
845            message: format!(
846                "Failed to open file {}: {}",
847                local_path.as_ref().display(),
848                e
849            ),
850        })?;
851
852        let metadata = file.metadata().map_err(|e| NetDiskError::Unknown {
853            message: format!(
854                "Failed to get file metadata {}: {}",
855                local_path.as_ref().display(),
856                e
857            ),
858        })?;
859
860        let file_size = metadata.len();
861        debug!("File opened successfully: {} bytes", file_size);
862
863        let mut reader = std::io::BufReader::new(file);
864        self.upload_reader_with_options(&mut reader, file_size, remote_path, options)
865            .await
866    }
867
868    /// Upload from a Reader with seek support (streaming API)
869    ///
870    /// This is a lower-level API that accepts any `Read + Seek` reader.
871    /// Useful for:
872    /// - Custom file wrapping (e.g., encrypted files)
873    /// - Upload from memory-mapped files
874    /// - Testing with custom readers
875    ///
876    /// # Example
877    ///
878    /// ```no_run
879    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
880    /// use baidu_netdisk_sdk::BaiduNetDiskClient;
881    /// use std::io::BufReader;
882    ///
883    /// let client = BaiduNetDiskClient::builder().build()?;
884    /// client.load_token_from_env()?;
885    ///
886    /// let file = std::fs::File::open("test.txt")?;
887    /// let metadata = file.metadata()?;
888    /// let file_size = metadata.len();
889    ///
890    /// let reader = BufReader::new(file);
891    /// let mut reader = reader;  // mutable for rewind
892    ///
893    /// let response = client.upload()
894    ///     .upload_reader(&mut reader, file_size, "/remote/test.txt")
895    ///     .await?;
896    ///
897    /// println!("Uploaded: {}", response.path);
898    /// # Ok(())
899    /// # }
900    /// ```
901    ///
902    /// # Memory Usage
903    ///
904    /// Memory is bounded by batch size (`max_concurrency * 2 * chunk_size`),
905    /// approximately 80MB by default, regardless of file size.
906    pub async fn upload_reader<R: std::io::Read + std::io::Seek>(
907        &self,
908        reader: &mut R,
909        file_size: u64,
910        remote_path: &str,
911    ) -> NetDiskResult<CreateFileResponse> {
912        self.upload_reader_with_options(
913            reader,
914            file_size,
915            remote_path,
916            SimpleUploadOptions::default(),
917        )
918        .await
919    }
920
921    /// Upload from a Reader with custom options
922    ///
923    /// # Example
924    ///
925    /// ```no_run
926    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
927    /// use baidu_netdisk_sdk::{BaiduNetDiskClient, upload::SimpleUploadOptions};
928    ///
929    /// let client = BaiduNetDiskClient::builder().build()?;
930    /// client.load_token_from_env()?;
931    ///
932    /// let options = SimpleUploadOptions::default()
933    ///     .chunk_size(8 * 1024 * 1024)
934    ///     .max_concurrency(20);
935    ///
936    /// let file = std::fs::File::open("test.txt")?;
937    /// let metadata = file.metadata()?;
938    /// let file_size = metadata.len();
939    ///
940    /// let mut reader = std::io::BufReader::new(file);
941    /// let response = client.upload()
942    ///     .upload_reader_with_options(&mut reader, file_size, "/remote/test.txt", options)
943    ///     .await?;
944    ///
945    /// println!("Uploaded: {}", response.path);
946    /// # Ok(())
947    /// # }
948    /// ```
949    pub async fn upload_reader_with_options<R: std::io::Read + std::io::Seek>(
950        &self,
951        reader: &mut R,
952        file_size: u64,
953        remote_path: &str,
954        options: SimpleUploadOptions,
955    ) -> NetDiskResult<CreateFileResponse> {
956        let chunk_size = options.chunk_size;
957        let max_concurrency = options.max_concurrency;
958        let r#type = options.r#type;
959
960        debug!(
961            "upload_reader: file_size={} bytes, chunk_size={}",
962            file_size, chunk_size
963        );
964
965        let mut block_list: Vec<String> = Vec::new();
966        let mut read_chunks = 0usize;
967
968        loop {
969            let mut buffer = vec![0u8; chunk_size];
970            let bytes_read = match reader.read(&mut buffer) {
971                Ok(n) => n,
972                Err(e) => {
973                    debug!("First pass read error: {}", e);
974                    break;
975                }
976            };
977
978            if bytes_read == 0 {
979                break;
980            }
981
982            buffer.truncate(bytes_read);
983            let chunk_md5 = format!("{:x}", md5::compute(&buffer));
984            block_list.push(chunk_md5);
985            read_chunks += 1;
986        }
987
988        debug!("First pass: read {} chunks", read_chunks);
989
990        reader.rewind().map_err(|e| NetDiskError::Unknown {
991            message: format!("Failed to rewind reader: {}", e),
992        })?;
993
994        let precreate_options =
995            PrecreateOptions::new(remote_path, file_size, block_list.clone()).rtype(r#type);
996
997        let precreate_response = self.precreate(precreate_options).await?;
998
999        let missing_blocks: Vec<u32> = precreate_response.block_list;
1000        debug!(
1001            "Server returned {} blocks need upload",
1002            missing_blocks.len()
1003        );
1004
1005        if !missing_blocks.is_empty() {
1006            // Get upload server domain dynamically
1007            let locate_response = self
1008                .locate_upload(remote_path, &precreate_response.uploadid)
1009                .await?;
1010            let upload_server = locate_response.get_first_https_server();
1011            debug!("Located upload server: {:?}", upload_server);
1012
1013            let missing_blocks_set: std::collections::HashSet<u32> =
1014                missing_blocks.into_iter().collect();
1015
1016            let batch_size = max_concurrency * 2;
1017            let mut pending_chunks: Vec<(u32, Vec<u8>)> = Vec::with_capacity(batch_size);
1018            let mut all_chunk_md5s: Vec<(u32, String)> = Vec::new();
1019            let mut partseq = 0u32;
1020
1021            loop {
1022                let mut buffer = vec![0u8; chunk_size];
1023                let bytes_read = match reader.read(&mut buffer) {
1024                    Ok(n) => n,
1025                    Err(e) => {
1026                        debug!("Second pass read error: {}", e);
1027                        break;
1028                    }
1029                };
1030
1031                if bytes_read == 0 {
1032                    break;
1033                }
1034
1035                buffer.truncate(bytes_read);
1036                let chunk_md5 = format!("{:x}", md5::compute(&buffer));
1037
1038                if missing_blocks_set.contains(&partseq) {
1039                    pending_chunks.push((partseq, buffer));
1040                } else {
1041                    all_chunk_md5s.push((partseq, chunk_md5.clone()));
1042                }
1043
1044                if pending_chunks.len() >= batch_size
1045                    || (partseq + 1 == read_chunks as u32 && !pending_chunks.is_empty())
1046                {
1047                    let batch_results = self
1048                        .upload_chunks_parallel(
1049                            remote_path,
1050                            &precreate_response.uploadid,
1051                            std::mem::take(&mut pending_chunks),
1052                            max_concurrency,
1053                            upload_server.as_deref(),
1054                        )
1055                        .await?;
1056
1057                    for (seq, md5) in batch_results {
1058                        all_chunk_md5s.push((seq, md5));
1059                    }
1060                }
1061
1062                partseq += 1;
1063            }
1064
1065            all_chunk_md5s.sort_by_key(|(i, _)| *i);
1066            let new_block_list: Vec<String> =
1067                all_chunk_md5s.into_iter().map(|(_, md5)| md5).collect();
1068
1069            let create_options = CreateFileOptions::new(
1070                remote_path,
1071                file_size,
1072                new_block_list,
1073                &precreate_response.uploadid,
1074            )
1075            .rtype(r#type);
1076
1077            self.create_file(create_options).await
1078        } else {
1079            let create_options = CreateFileOptions::new(
1080                remote_path,
1081                file_size,
1082                block_list,
1083                &precreate_response.uploadid,
1084            )
1085            .rtype(r#type);
1086
1087            self.create_file(create_options).await
1088        }
1089    }
1090
1091    /// Upload bytes from memory (simple API)
1092    ///
1093    /// Use this method when you already have the data in memory.
1094    /// For large data, consider using [`UploadClient::upload_file`] or [`UploadClient::upload_reader`] instead
1095    /// to avoid loading the entire data into memory.
1096    ///
1097    /// # Example
1098    ///
1099    /// ```no_run
1100    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1101    /// use baidu_netdisk_sdk::BaiduNetDiskClient;
1102    ///
1103    /// let client = BaiduNetDiskClient::builder().build()?;
1104    /// client.load_token_from_env()?;
1105    ///
1106    /// let data = b"Hello, World!";
1107    /// let response = client.upload()
1108    ///     .upload_bytes(data, "/remote/hello.txt")
1109    ///     .await?;
1110    ///
1111    /// println!("Uploaded: {}", response.path);
1112    /// # Ok(())
1113    /// # }
1114    /// ```
1115    ///
1116    /// # Memory Note
1117    ///
1118    /// The entire `data` slice will be held in memory during upload.
1119    /// For large files, use [`UploadClient::upload_file`] which streams from disk.
1120    pub async fn upload_bytes(
1121        &self,
1122        data: &[u8],
1123        remote_path: &str,
1124    ) -> NetDiskResult<CreateFileResponse> {
1125        self.upload_bytes_with_options(data, remote_path, SimpleUploadOptions::default())
1126            .await
1127    }
1128
1129    /// Upload bytes from memory with custom options
1130    ///
1131    /// # Example
1132    ///
1133    /// ```no_run
1134    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
1135    /// use baidu_netdisk_sdk::{BaiduNetDiskClient, upload::SimpleUploadOptions};
1136    ///
1137    /// let client = BaiduNetDiskClient::builder().build()?;
1138    /// client.load_token_from_env()?;
1139    ///
1140    /// let options = SimpleUploadOptions::default()
1141    ///     .chunk_size(8 * 1024 * 1024)
1142    ///     .max_concurrency(20);
1143    ///
1144    /// let data = b"Hello, World!";
1145    /// let response = client.upload()
1146    ///     .upload_bytes_with_options(data, "/remote/hello.txt", options)
1147    ///     .await?;
1148    ///
1149    /// println!("Uploaded: {}", response.path);
1150    /// # Ok(())
1151    /// # }
1152    /// ```
1153    pub async fn upload_bytes_with_options(
1154        &self,
1155        data: &[u8],
1156        remote_path: &str,
1157        options: SimpleUploadOptions,
1158    ) -> NetDiskResult<CreateFileResponse> {
1159        let file_size = data.len() as u64;
1160        let chunk_size = options.chunk_size;
1161        let max_concurrency = options.max_concurrency;
1162        let r#type = options.r#type;
1163
1164        let block_list: Vec<String> = data
1165            .chunks(chunk_size)
1166            .map(|chunk| format!("{:x}", md5::compute(chunk)))
1167            .collect();
1168
1169        let precreate_options =
1170            PrecreateOptions::new(remote_path, file_size, block_list.clone()).rtype(r#type);
1171
1172        let precreate_response = self.precreate(precreate_options).await?;
1173
1174        let missing_blocks_set: std::collections::HashSet<u32> =
1175            precreate_response.block_list.into_iter().collect();
1176
1177        let chunks_to_upload: Vec<(u32, Vec<u8>)> = data
1178            .chunks(chunk_size)
1179            .enumerate()
1180            .filter(|(i, _)| missing_blocks_set.contains(&(*i as u32)))
1181            .map(|(i, chunk)| (i as u32, chunk.to_vec()))
1182            .collect();
1183
1184        if !chunks_to_upload.is_empty() {
1185            // Get upload server domain dynamically
1186            let locate_response = self
1187                .locate_upload(remote_path, &precreate_response.uploadid)
1188                .await?;
1189            let upload_server = locate_response.get_first_https_server();
1190            debug!("Located upload server: {:?}", upload_server);
1191
1192            let chunk_results = self
1193                .upload_chunks_parallel(
1194                    remote_path,
1195                    &precreate_response.uploadid,
1196                    chunks_to_upload,
1197                    max_concurrency,
1198                    upload_server.as_deref(),
1199                )
1200                .await?;
1201
1202            let mut sorted_results = chunk_results;
1203            sorted_results.sort_by_key(|(i, _)| *i);
1204            let new_block_list: Vec<String> =
1205                sorted_results.into_iter().map(|(_, md5)| md5).collect();
1206
1207            let create_options = CreateFileOptions::new(
1208                remote_path,
1209                file_size,
1210                new_block_list,
1211                &precreate_response.uploadid,
1212            )
1213            .rtype(r#type);
1214
1215            self.create_file(create_options).await
1216        } else {
1217            let create_options = CreateFileOptions::new(
1218                remote_path,
1219                file_size,
1220                block_list,
1221                &precreate_response.uploadid,
1222            )
1223            .rtype(r#type);
1224
1225            self.create_file(create_options).await
1226        }
1227    }
1228}