1use crate::client::TokenGetter;
58use crate::errors::{NetDiskError, NetDiskResult};
59use crate::http::HttpClient;
60use futures::stream::{self, StreamExt};
61use log::debug;
62use serde::Deserialize;
63use std::sync::Arc;
64
65#[derive(Debug, Clone)]
67pub struct UploadClient {
68 http_client: HttpClient,
69 token_getter: Arc<dyn TokenGetter>,
70}
71
72impl UploadClient {
73 pub fn new(http_client: HttpClient, token_getter: Arc<dyn TokenGetter>) -> Self {
77 Self {
78 http_client,
79 token_getter,
80 }
81 }
82
83 pub fn http_client(&self) -> &HttpClient {
85 &self.http_client
86 }
87
88 pub async fn precreate(&self, options: PrecreateOptions) -> NetDiskResult<PrecreateResponse> {
93 let token = self.token_getter.get_token().await?;
94 let block_list_json =
95 serde_json::to_string(&options.block_list).map_err(|e| NetDiskError::Unknown {
96 message: format!("Failed to serialize block_list: {}", e),
97 })?;
98
99 let params = vec![
100 ("method", "precreate"),
101 ("access_token", token.access_token.as_str()),
102 ];
103
104 let size_str = options.size.to_string();
105 let isdir_str = options.isdir.to_string();
106 let rtype_str = options.rtype.to_string();
107
108 let form_data = vec![
109 ("path", options.path.as_str()),
110 ("size", size_str.as_str()),
111 ("isdir", isdir_str.as_str()),
112 ("block_list", block_list_json.as_str()),
113 ("autoinit", "1"),
114 ("rtype", rtype_str.as_str()),
115 ];
116
117 debug!(
118 "Precreate upload: path={}, size={}, isdir={}, block_list={:?}",
119 options.path, options.size, options.isdir, options.block_list
120 );
121
122 let response: PrecreateResponse = self
123 .http_client
124 .post_form("/rest/2.0/xpan/file", Some(&form_data), Some(¶ms))
125 .await?;
126
127 if response.errno != 0 {
128 let error_msg = get_error_message(response.errno);
129 return Err(NetDiskError::api_error(response.errno, &error_msg));
130 }
131
132 debug!(
133 "Precreate success: uploadid={}, block_list={:?}",
134 response.uploadid, response.block_list
135 );
136
137 Ok(response)
138 }
139}
140
141#[derive(Debug, Clone, Default)]
143pub struct PrecreateOptions {
144 pub path: String,
146 pub size: u64,
148 pub isdir: i32,
150 pub block_list: Vec<String>,
152 pub rtype: i32,
154 pub uploadid: Option<String>,
156 pub content_md5: Option<String>,
158 pub slice_md5: Option<String>,
160 pub local_ctime: Option<u64>,
162 pub local_mtime: Option<u64>,
164}
165
166impl PrecreateOptions {
167 pub fn new(path: &str, size: u64, block_list: Vec<String>) -> Self {
169 Self {
170 path: path.to_string(),
171 size,
172 isdir: 0,
173 block_list,
174 rtype: 1,
175 uploadid: None,
176 content_md5: None,
177 slice_md5: None,
178 local_ctime: None,
179 local_mtime: None,
180 }
181 }
182
183 pub fn isdir(mut self, isdir: bool) -> Self {
185 self.isdir = if isdir { 1 } else { 0 };
186 self
187 }
188
189 pub fn rtype(mut self, rtype: i32) -> Self {
191 self.rtype = rtype;
192 self
193 }
194
195 pub fn uploadid(mut self, uploadid: &str) -> Self {
197 self.uploadid = Some(uploadid.to_string());
198 self
199 }
200
201 pub fn content_md5(mut self, md5: &str) -> Self {
203 self.content_md5 = Some(md5.to_string());
204 self
205 }
206
207 pub fn slice_md5(mut self, md5: &str) -> Self {
209 self.slice_md5 = Some(md5.to_string());
210 self
211 }
212
213 pub fn local_ctime(mut self, ctime: u64) -> Self {
215 self.local_ctime = Some(ctime);
216 self
217 }
218
219 pub fn local_mtime(mut self, mtime: u64) -> Self {
221 self.local_mtime = Some(mtime);
222 self
223 }
224}
225
226#[derive(Debug, Deserialize)]
228pub struct PrecreateResponse {
229 pub errno: i32,
231 #[serde(default)]
233 pub path: Option<String>,
234 pub uploadid: String,
236 #[serde(rename = "return_type")]
238 pub return_type: i32,
239 #[serde(rename = "block_list")]
241 pub block_list: Vec<u32>,
242}
243
244#[derive(Debug, Deserialize)]
246pub struct LocateUploadServer {
247 pub server: String,
249}
250
251#[derive(Debug, Deserialize)]
253pub struct LocateUploadResponse {
254 #[serde(default)]
256 pub bak_server: Vec<String>,
257 #[serde(default)]
259 pub bak_servers: Vec<LocateUploadServer>,
260 pub client_ip: String,
262 pub error_code: i32,
264 pub error_msg: String,
266 pub expire: i32,
268 pub host: String,
270 #[serde(default)]
272 pub newno: String,
273 #[serde(default)]
275 pub quic_server: Vec<String>,
276 #[serde(default)]
278 pub quic_servers: Vec<LocateUploadServer>,
279 pub request_id: u64,
281 #[serde(default)]
283 pub server: Vec<String>,
284 pub server_time: u64,
286 pub servers: Vec<LocateUploadServer>,
288 pub sl: i32,
290}
291
292impl LocateUploadResponse {
293 pub fn get_https_servers(&self) -> Vec<String> {
297 self.servers
298 .iter()
299 .filter(|s| s.server.starts_with("https://"))
300 .map(|s| s.server.clone())
301 .collect()
302 }
303
304 pub fn get_first_https_server(&self) -> Option<String> {
311 let https_servers = self.get_https_servers();
312 if https_servers.is_empty() {
313 None
314 } else {
315 Some(https_servers[0].clone())
316 }
317 }
318}
319
320impl UploadClient {
321 pub async fn locate_upload(
350 &self,
351 path: &str,
352 uploadid: &str,
353 ) -> NetDiskResult<LocateUploadResponse> {
354 let token = self.token_getter.get_token().await?;
355
356 let url = format!(
357 "https://d.pcs.baidu.com/rest/2.0/pcs/file?method=locateupload&appid=250528&access_token={}&path={}&uploadid={}&upload_version=2.0",
358 urlencoding::encode(&token.access_token),
359 urlencoding::encode(path),
360 urlencoding::encode(uploadid)
361 );
362
363 debug!("Locate upload: path={}, uploadid={}", path, uploadid);
364
365 let response: LocateUploadResponse = self.http_client.get(&url, None).await?;
366
367 if response.error_code != 0 {
368 return Err(NetDiskError::api_error(
369 response.error_code,
370 &response.error_msg,
371 ));
372 }
373
374 debug!(
375 "Locate upload success: host={}, servers={}",
376 response.host,
377 response.servers.len()
378 );
379
380 Ok(response)
381 }
382
383 pub async fn upload_chunk(
387 &self,
388 options: UploadChunkOptions,
389 server_url: Option<&str>,
390 ) -> NetDiskResult<UploadChunkResponse> {
391 let token = self.token_getter.get_token().await?;
392
393 let server = server_url.unwrap_or("https://c3.pcs.baidu.com");
394 let url = format!(
395 "{}/rest/2.0/pcs/superfile2?method=upload&access_token={}&type=tmpfile&path={}&uploadid={}&partseq={}",
396 server,
397 urlencoding::encode(&token.access_token),
398 urlencoding::encode(&options.path),
399 urlencoding::encode(&options.uploadid),
400 options.partseq
401 );
402
403 debug!(
404 "Upload chunk: path={}, uploadid={}, partseq={}, data_size={}, server={}",
405 options.path,
406 options.uploadid,
407 options.partseq,
408 options.data.len(),
409 server
410 );
411
412 let response: UploadChunkResponse = self
413 .http_client
414 .post_multipart(
415 &url,
416 "file".to_string(),
417 "chunk.dat".to_string(),
418 options.data,
419 )
420 .await?;
421
422 Ok(response)
423 }
424
425 pub async fn upload_chunks_parallel(
429 &self,
430 remote_path: &str,
431 uploadid: &str,
432 chunks: Vec<(u32, Vec<u8>)>,
433 max_concurrency: usize,
434 server_url: Option<&str>,
435 ) -> NetDiskResult<Vec<(u32, String)>> {
436 let token = self.token_getter.get_token().await?;
437 let server = server_url.unwrap_or("https://c3.pcs.baidu.com").to_string();
438
439 debug!(
440 "Uploading {} chunks in parallel (max_concurrency: {}, server: {})",
441 chunks.len(),
442 max_concurrency,
443 server
444 );
445
446 let access_token_str = token.access_token;
447 let remote_path_str = remote_path.to_string();
448 let uploadid_str = uploadid.to_string();
449 let http_client = self.http_client.clone();
450
451 let mut stream = stream::iter(chunks)
452 .map(move |(partseq, data)| {
453 let path = remote_path_str.clone();
454 let uid = access_token_str.clone();
455 let upid = uploadid_str.clone();
456 let client = http_client.clone();
457 let server_clone = server.clone();
458
459 async move {
460 let url = format!(
461 "{}/rest/2.0/pcs/superfile2?method=upload&access_token={}&type=tmpfile&path={}&uploadid={}&partseq={}",
462 server_clone,
463 urlencoding::encode(&uid),
464 urlencoding::encode(&path),
465 urlencoding::encode(&upid),
466 partseq
467 );
468
469 debug!("Uploading chunk {} ({} bytes)", partseq, data.len());
470
471 let response: UploadChunkResponse = client
472 .post_multipart(&url, "file".to_string(), "chunk.dat".to_string(), data)
473 .await?;
474
475 Ok((partseq, response.md5))
476 }
477 })
478 .buffer_unordered(max_concurrency);
479
480 let mut chunk_md5s = Vec::new();
481 while let Some(result) = stream.next().await {
482 match result {
483 Ok((partseq, md5)) => {
484 chunk_md5s.push((partseq, md5));
485 }
486 Err(e) => {
487 return Err(e);
488 }
489 }
490 }
491
492 debug!("All chunks uploaded successfully");
493 Ok(chunk_md5s)
494 }
495
496 pub async fn create_file(
500 &self,
501 options: CreateFileOptions,
502 ) -> NetDiskResult<CreateFileResponse> {
503 let token = self.token_getter.get_token().await?;
504 let block_list_json =
505 serde_json::to_string(&options.block_list).map_err(|e| NetDiskError::Unknown {
506 message: format!("Failed to serialize block_list: {}", e),
507 })?;
508
509 let params = vec![
510 ("method", "create"),
511 ("access_token", token.access_token.as_str()),
512 ];
513
514 let size_str = options.size.to_string();
515 let isdir_str = options.isdir.to_string();
516 let rtype_str = options.rtype.to_string();
517
518 let mut form_data = vec![
519 ("path", options.path.as_str()),
520 ("size", size_str.as_str()),
521 ("isdir", isdir_str.as_str()),
522 ("block_list", block_list_json.as_str()),
523 ("uploadid", options.uploadid.as_str()),
524 ("rtype", rtype_str.as_str()),
525 ];
526
527 let ctime_str = options.local_ctime.map(|t| t.to_string());
528 let mtime_str = options.local_mtime.map(|t| t.to_string());
529
530 if let Some(ref ctime) = ctime_str {
531 form_data.push(("local_ctime", ctime.as_str()));
532 }
533 if let Some(ref mtime) = mtime_str {
534 form_data.push(("local_mtime", mtime.as_str()));
535 }
536
537 debug!(
538 "Create file: path={}, size={}, isdir={}, uploadid={}",
539 options.path, options.size, options.isdir, options.uploadid
540 );
541
542 let response: CreateFileResponse = self
543 .http_client
544 .post_form("/rest/2.0/xpan/file", Some(&form_data), Some(¶ms))
545 .await?;
546
547 if response.errno != 0 {
548 let error_msg = get_create_error_message(response.errno);
549 return Err(NetDiskError::api_error(response.errno, &error_msg));
550 }
551
552 Ok(response)
553 }
554}
555
556#[derive(Debug, Clone)]
558pub struct UploadChunkOptions {
559 pub path: String,
561 pub uploadid: String,
563 pub partseq: u32,
565 pub data: Vec<u8>,
567}
568
569impl UploadChunkOptions {
570 pub fn new(path: &str, uploadid: &str, partseq: u32, data: Vec<u8>) -> Self {
572 Self {
573 path: path.to_string(),
574 uploadid: uploadid.to_string(),
575 partseq,
576 data,
577 }
578 }
579}
580
581#[derive(Debug, Deserialize)]
583pub struct UploadChunkResponse {
584 pub md5: String,
586}
587
588#[derive(Debug, Clone, Default)]
590pub struct CreateFileOptions {
591 pub path: String,
593 pub size: u64,
595 pub isdir: i32,
597 pub block_list: Vec<String>,
599 pub uploadid: String,
601 pub rtype: i32,
603 pub local_ctime: Option<u64>,
605 pub local_mtime: Option<u64>,
607}
608
609impl CreateFileOptions {
610 pub fn new(path: &str, size: u64, block_list: Vec<String>, uploadid: &str) -> Self {
612 Self {
613 path: path.to_string(),
614 size,
615 isdir: 0,
616 block_list,
617 uploadid: uploadid.to_string(),
618 rtype: 1,
619 local_ctime: None,
620 local_mtime: None,
621 }
622 }
623
624 pub fn isdir(mut self, isdir: bool) -> Self {
626 self.isdir = if isdir { 1 } else { 0 };
627 self
628 }
629
630 pub fn rtype(mut self, rtype: i32) -> Self {
632 self.rtype = rtype;
633 self
634 }
635
636 pub fn local_ctime(mut self, ctime: u64) -> Self {
638 self.local_ctime = Some(ctime);
639 self
640 }
641
642 pub fn local_mtime(mut self, mtime: u64) -> Self {
644 self.local_mtime = Some(mtime);
645 self
646 }
647}
648
649#[derive(Debug, Deserialize)]
651pub struct CreateFileResponse {
652 pub errno: i32,
654 #[serde(rename = "fs_id")]
656 pub fs_id: u64,
657 pub md5: Option<String>,
659 #[serde(rename = "server_filename")]
661 #[serde(default)]
662 pub server_filename: Option<String>,
663 pub category: i32,
665 pub path: String,
667 pub size: u64,
669 pub ctime: u64,
671 pub mtime: u64,
673 pub isdir: i32,
675 #[serde(default)]
677 pub name: Option<String>,
678 #[serde(rename = "from_type")]
680 #[serde(default)]
681 pub from_type: Option<i32>,
682}
683
684fn get_create_error_message(errno: i32) -> String {
685 match errno {
686 -7 => "File or directory name error or access denied".to_string(),
687 -8 => "File or directory already exists".to_string(),
688 -10 => "Cloud storage capacity full".to_string(),
689 10 => "Failed to create file".to_string(),
690 31190 => "File not found".to_string(),
691 31355 => "Invalid parameter".to_string(),
692 31365 => "Total file size limit exceeded".to_string(),
693 _ => format!("Unknown error: {}", errno),
694 }
695}
696
697fn get_error_message(errno: i32) -> String {
698 match errno {
699 -7 => "File or directory name error or access denied".to_string(),
700 -10 => "Insufficient capacity".to_string(),
701 _ => format!("Unknown error: {}", errno),
702 }
703}
704
705const DEFAULT_CHUNK_SIZE: usize = 4 * 1024 * 1024;
706const DEFAULT_MAX_CONCURRENCY: usize = 10;
707
708#[derive(Debug, Clone)]
709pub struct SimpleUploadOptions {
730 pub chunk_size: usize,
732 pub max_concurrency: usize,
734 pub r#type: i32,
736}
737
738impl Default for SimpleUploadOptions {
739 fn default() -> Self {
740 Self {
741 chunk_size: DEFAULT_CHUNK_SIZE,
742 max_concurrency: DEFAULT_MAX_CONCURRENCY,
743 r#type: 1,
744 }
745 }
746}
747
748impl SimpleUploadOptions {
749 pub fn new() -> Self {
750 Self::default()
751 }
752
753 pub fn chunk_size(mut self, size: usize) -> Self {
754 self.chunk_size = size;
755 self
756 }
757
758 pub fn max_concurrency(mut self, concurrency: usize) -> Self {
759 self.max_concurrency = concurrency;
760 self
761 }
762
763 pub fn r#type(mut self, r#type: i32) -> Self {
764 self.r#type = r#type;
765 self
766 }
767}
768
769impl UploadClient {
770 pub async fn upload_file<P: AsRef<std::path::Path>>(
802 &self,
803 local_path: P,
804 remote_path: &str,
805 ) -> NetDiskResult<CreateFileResponse> {
806 self.upload_file_with_options(local_path, remote_path, SimpleUploadOptions::default())
807 .await
808 }
809
810 pub async fn upload_file_with_options<P: AsRef<std::path::Path>>(
839 &self,
840 local_path: P,
841 remote_path: &str,
842 options: SimpleUploadOptions,
843 ) -> NetDiskResult<CreateFileResponse> {
844 let file = std::fs::File::open(&local_path).map_err(|e| NetDiskError::Unknown {
845 message: format!(
846 "Failed to open file {}: {}",
847 local_path.as_ref().display(),
848 e
849 ),
850 })?;
851
852 let metadata = file.metadata().map_err(|e| NetDiskError::Unknown {
853 message: format!(
854 "Failed to get file metadata {}: {}",
855 local_path.as_ref().display(),
856 e
857 ),
858 })?;
859
860 let file_size = metadata.len();
861 debug!("File opened successfully: {} bytes", file_size);
862
863 let mut reader = std::io::BufReader::new(file);
864 self.upload_reader_with_options(&mut reader, file_size, remote_path, options)
865 .await
866 }
867
868 pub async fn upload_reader<R: std::io::Read + std::io::Seek>(
907 &self,
908 reader: &mut R,
909 file_size: u64,
910 remote_path: &str,
911 ) -> NetDiskResult<CreateFileResponse> {
912 self.upload_reader_with_options(
913 reader,
914 file_size,
915 remote_path,
916 SimpleUploadOptions::default(),
917 )
918 .await
919 }
920
921 pub async fn upload_reader_with_options<R: std::io::Read + std::io::Seek>(
950 &self,
951 reader: &mut R,
952 file_size: u64,
953 remote_path: &str,
954 options: SimpleUploadOptions,
955 ) -> NetDiskResult<CreateFileResponse> {
956 let chunk_size = options.chunk_size;
957 let max_concurrency = options.max_concurrency;
958 let r#type = options.r#type;
959
960 debug!(
961 "upload_reader: file_size={} bytes, chunk_size={}",
962 file_size, chunk_size
963 );
964
965 let mut block_list: Vec<String> = Vec::new();
966 let mut read_chunks = 0usize;
967
968 loop {
969 let mut buffer = vec![0u8; chunk_size];
970 let bytes_read = match reader.read(&mut buffer) {
971 Ok(n) => n,
972 Err(e) => {
973 debug!("First pass read error: {}", e);
974 break;
975 }
976 };
977
978 if bytes_read == 0 {
979 break;
980 }
981
982 buffer.truncate(bytes_read);
983 let chunk_md5 = format!("{:x}", md5::compute(&buffer));
984 block_list.push(chunk_md5);
985 read_chunks += 1;
986 }
987
988 debug!("First pass: read {} chunks", read_chunks);
989
990 reader.rewind().map_err(|e| NetDiskError::Unknown {
991 message: format!("Failed to rewind reader: {}", e),
992 })?;
993
994 let precreate_options =
995 PrecreateOptions::new(remote_path, file_size, block_list.clone()).rtype(r#type);
996
997 let precreate_response = self.precreate(precreate_options).await?;
998
999 let missing_blocks: Vec<u32> = precreate_response.block_list;
1000 debug!(
1001 "Server returned {} blocks need upload",
1002 missing_blocks.len()
1003 );
1004
1005 if !missing_blocks.is_empty() {
1006 let locate_response = self
1008 .locate_upload(remote_path, &precreate_response.uploadid)
1009 .await?;
1010 let upload_server = locate_response.get_first_https_server();
1011 debug!("Located upload server: {:?}", upload_server);
1012
1013 let missing_blocks_set: std::collections::HashSet<u32> =
1014 missing_blocks.into_iter().collect();
1015
1016 let batch_size = max_concurrency * 2;
1017 let mut pending_chunks: Vec<(u32, Vec<u8>)> = Vec::with_capacity(batch_size);
1018 let mut all_chunk_md5s: Vec<(u32, String)> = Vec::new();
1019 let mut partseq = 0u32;
1020
1021 loop {
1022 let mut buffer = vec![0u8; chunk_size];
1023 let bytes_read = match reader.read(&mut buffer) {
1024 Ok(n) => n,
1025 Err(e) => {
1026 debug!("Second pass read error: {}", e);
1027 break;
1028 }
1029 };
1030
1031 if bytes_read == 0 {
1032 break;
1033 }
1034
1035 buffer.truncate(bytes_read);
1036 let chunk_md5 = format!("{:x}", md5::compute(&buffer));
1037
1038 if missing_blocks_set.contains(&partseq) {
1039 pending_chunks.push((partseq, buffer));
1040 } else {
1041 all_chunk_md5s.push((partseq, chunk_md5.clone()));
1042 }
1043
1044 if pending_chunks.len() >= batch_size
1045 || (partseq + 1 == read_chunks as u32 && !pending_chunks.is_empty())
1046 {
1047 let batch_results = self
1048 .upload_chunks_parallel(
1049 remote_path,
1050 &precreate_response.uploadid,
1051 std::mem::take(&mut pending_chunks),
1052 max_concurrency,
1053 upload_server.as_deref(),
1054 )
1055 .await?;
1056
1057 for (seq, md5) in batch_results {
1058 all_chunk_md5s.push((seq, md5));
1059 }
1060 }
1061
1062 partseq += 1;
1063 }
1064
1065 all_chunk_md5s.sort_by_key(|(i, _)| *i);
1066 let new_block_list: Vec<String> =
1067 all_chunk_md5s.into_iter().map(|(_, md5)| md5).collect();
1068
1069 let create_options = CreateFileOptions::new(
1070 remote_path,
1071 file_size,
1072 new_block_list,
1073 &precreate_response.uploadid,
1074 )
1075 .rtype(r#type);
1076
1077 self.create_file(create_options).await
1078 } else {
1079 let create_options = CreateFileOptions::new(
1080 remote_path,
1081 file_size,
1082 block_list,
1083 &precreate_response.uploadid,
1084 )
1085 .rtype(r#type);
1086
1087 self.create_file(create_options).await
1088 }
1089 }
1090
1091 pub async fn upload_bytes(
1121 &self,
1122 data: &[u8],
1123 remote_path: &str,
1124 ) -> NetDiskResult<CreateFileResponse> {
1125 self.upload_bytes_with_options(data, remote_path, SimpleUploadOptions::default())
1126 .await
1127 }
1128
1129 pub async fn upload_bytes_with_options(
1154 &self,
1155 data: &[u8],
1156 remote_path: &str,
1157 options: SimpleUploadOptions,
1158 ) -> NetDiskResult<CreateFileResponse> {
1159 let file_size = data.len() as u64;
1160 let chunk_size = options.chunk_size;
1161 let max_concurrency = options.max_concurrency;
1162 let r#type = options.r#type;
1163
1164 let block_list: Vec<String> = data
1165 .chunks(chunk_size)
1166 .map(|chunk| format!("{:x}", md5::compute(chunk)))
1167 .collect();
1168
1169 let precreate_options =
1170 PrecreateOptions::new(remote_path, file_size, block_list.clone()).rtype(r#type);
1171
1172 let precreate_response = self.precreate(precreate_options).await?;
1173
1174 let missing_blocks_set: std::collections::HashSet<u32> =
1175 precreate_response.block_list.into_iter().collect();
1176
1177 let chunks_to_upload: Vec<(u32, Vec<u8>)> = data
1178 .chunks(chunk_size)
1179 .enumerate()
1180 .filter(|(i, _)| missing_blocks_set.contains(&(*i as u32)))
1181 .map(|(i, chunk)| (i as u32, chunk.to_vec()))
1182 .collect();
1183
1184 if !chunks_to_upload.is_empty() {
1185 let locate_response = self
1187 .locate_upload(remote_path, &precreate_response.uploadid)
1188 .await?;
1189 let upload_server = locate_response.get_first_https_server();
1190 debug!("Located upload server: {:?}", upload_server);
1191
1192 let chunk_results = self
1193 .upload_chunks_parallel(
1194 remote_path,
1195 &precreate_response.uploadid,
1196 chunks_to_upload,
1197 max_concurrency,
1198 upload_server.as_deref(),
1199 )
1200 .await?;
1201
1202 let mut sorted_results = chunk_results;
1203 sorted_results.sort_by_key(|(i, _)| *i);
1204 let new_block_list: Vec<String> =
1205 sorted_results.into_iter().map(|(_, md5)| md5).collect();
1206
1207 let create_options = CreateFileOptions::new(
1208 remote_path,
1209 file_size,
1210 new_block_list,
1211 &precreate_response.uploadid,
1212 )
1213 .rtype(r#type);
1214
1215 self.create_file(create_options).await
1216 } else {
1217 let create_options = CreateFileOptions::new(
1218 remote_path,
1219 file_size,
1220 block_list,
1221 &precreate_response.uploadid,
1222 )
1223 .rtype(r#type);
1224
1225 self.create_file(create_options).await
1226 }
1227 }
1228}