1#[cfg(feature = "s3-sync")]
4use anyhow::Result;
5#[cfg(feature = "s3-sync")]
6use colored::Colorize;
7#[cfg(feature = "s3-sync")]
8use std::collections::HashMap;
9
10#[cfg(feature = "s3-sync")]
11use super::{decode_base64, encode_base64, ConfigFile};
12
13#[cfg(feature = "s3-sync")]
14use aws_config::BehaviorVersion;
15#[cfg(feature = "s3-sync")]
16use aws_sdk_s3::{config::Credentials, primitives::ByteStream, Client};
17
18#[cfg(feature = "s3-sync")]
20#[derive(Debug, Clone)]
21pub struct S3Config {
22 pub bucket_name: String,
23 pub region: String,
24 pub access_key_id: String,
25 pub secret_access_key: String,
26 pub endpoint_url: Option<String>,
27}
28
29#[cfg(feature = "s3-sync")]
31pub struct S3Provider {
32 client: Client,
33 bucket_name: String,
34 folder_prefix: String,
35}
36
37#[cfg(feature = "s3-sync")]
38impl S3Provider {
39 pub async fn new_with_provider(provider_name: &str) -> Result<Self> {
41 let s3_config = Self::get_s3_config(provider_name).await?;
42
43 let mut config_builder = aws_config::defaults(BehaviorVersion::latest())
45 .region(aws_config::Region::new(s3_config.region.clone()))
46 .credentials_provider(Credentials::new(
47 s3_config.access_key_id.clone(),
48 s3_config.secret_access_key.clone(),
49 None,
50 None,
51 "lc-sync",
52 ));
53
54 if let Some(endpoint_url) = &s3_config.endpoint_url {
56 config_builder = config_builder.endpoint_url(endpoint_url);
57 }
58
59 let config = config_builder.load().await;
60 let client = Client::new(&config);
61
62 let folder_prefix = "llm_client_config".to_string();
63
64 Ok(Self {
65 client,
66 bucket_name: s3_config.bucket_name,
67 folder_prefix,
68 })
69 }
70
71 async fn get_s3_config(provider_name: &str) -> Result<S3Config> {
73 use crate::sync::config::{ProviderConfig, SyncConfig};
74 use std::io::{self, Write};
75
76 if let Ok(sync_config) = SyncConfig::load() {
78 if let Some(ProviderConfig::S3 {
79 bucket_name,
80 region,
81 access_key_id,
82 secret_access_key,
83 endpoint_url,
84 }) = sync_config.get_provider(provider_name)
85 {
86 println!("{} Using stored S3 configuration for '{}'", "â".green(), provider_name);
87 return Ok(S3Config {
88 bucket_name: bucket_name.clone(),
89 region: region.clone(),
90 access_key_id: access_key_id.clone(),
91 secret_access_key: secret_access_key.clone(),
92 endpoint_url: endpoint_url.clone(),
93 });
94 }
95 }
96
97 println!("{} S3 Configuration Setup for '{}'", "đ§".blue(), provider_name);
98 println!("{} No stored configuration found. You can:", "đĄ".yellow());
99 println!(
100 " - Set up configuration: {}",
101 format!("lc sync configure {} setup", provider_name).dimmed()
102 );
103 println!(" - Use environment variables:");
104 println!(" LC_S3_BUCKET, LC_S3_REGION, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, LC_S3_ENDPOINT");
105 println!(" - Enter credentials interactively (below)");
106 println!();
107
108 let bucket_name = if let Ok(bucket) = std::env::var("LC_S3_BUCKET") {
110 println!("{} Using bucket from LC_S3_BUCKET: {}", "â".green(), bucket);
111 bucket
112 } else {
113 print!("Enter S3 bucket name: ");
114 io::stdout().flush()?;
116 let mut input = String::new();
117 io::stdin().read_line(&mut input)?;
118 let bucket = input.trim().to_string();
119 if bucket.is_empty() {
120 anyhow::bail!("Bucket name cannot be empty");
121 }
122 bucket
123 };
124
125 let region = if let Ok(region) = std::env::var("LC_S3_REGION") {
126 println!("{} Using region from LC_S3_REGION: {}", "â".green(), region);
127 region
128 } else {
129 print!("Enter AWS region (default: us-east-1): ");
130 io::stdout().flush()?;
132 let mut input = String::new();
133 io::stdin().read_line(&mut input)?;
134 let region = input.trim().to_string();
135 if region.is_empty() {
136 "us-east-1".to_string()
137 } else {
138 region
139 }
140 };
141
142 let access_key_id = if let Ok(key) = std::env::var("AWS_ACCESS_KEY_ID") {
143 println!("{} Using access key from AWS_ACCESS_KEY_ID", "â".green());
144 key
145 } else {
146 print!("Enter AWS Access Key ID: ");
147 io::stdout().flush()?;
149 let mut input = String::new();
150 io::stdin().read_line(&mut input)?;
151 let key = input.trim().to_string();
152 if key.is_empty() {
153 anyhow::bail!("Access Key ID cannot be empty");
154 }
155 key
156 };
157
158 let secret_access_key = if let Ok(secret) = std::env::var("AWS_SECRET_ACCESS_KEY") {
159 println!(
160 "{} Using secret key from AWS_SECRET_ACCESS_KEY",
161 "â".green()
162 );
163 secret
164 } else {
165 print!("Enter AWS Secret Access Key: ");
166 io::stdout().flush()?;
168 let secret = rpassword::read_password()?;
169 if secret.is_empty() {
170 anyhow::bail!("Secret Access Key cannot be empty");
171 }
172 secret
173 };
174
175 let endpoint_url = if let Ok(endpoint) = std::env::var("LC_S3_ENDPOINT") {
176 println!(
177 "{} Using custom endpoint from LC_S3_ENDPOINT: {}",
178 "â".green(),
179 endpoint
180 );
181 Some(endpoint)
182 } else {
183 print!("Enter custom S3 endpoint URL (optional, for Backblaze/Cloudflare R2/etc., press Enter to skip): ");
184 io::stdout().flush()?;
186 let mut input = String::new();
187 io::stdin().read_line(&mut input)?;
188 let endpoint = input.trim().to_string();
189 if endpoint.is_empty() {
190 None
191 } else {
192 Some(endpoint)
193 }
194 };
195
196 Ok(S3Config {
197 bucket_name,
198 region,
199 access_key_id,
200 secret_access_key,
201 endpoint_url,
202 })
203 }
204
205 pub async fn upload_configs(&self, files: &[ConfigFile], encrypted: bool) -> Result<()> {
207 println!(
208 "{} Uploading to S3 bucket: {}",
209 "đ¤".blue(),
210 self.bucket_name
211 );
212
213 match self
215 .client
216 .head_bucket()
217 .bucket(&self.bucket_name)
218 .send()
219 .await
220 {
221 Ok(_) => {
222 println!("{} Bucket access verified", "â".green());
223 }
224 Err(e) => {
225 anyhow::bail!("Cannot access S3 bucket '{}': {}. Please check your AWS credentials and bucket permissions.", self.bucket_name, e);
226 }
227 }
228
229 let mut uploaded_count = 0;
230
231 for file in files {
232 let key = format!("{}/{}", self.folder_prefix, file.name);
233
234 let content_b64 = encode_base64(&file.content);
236
237 let mut metadata = HashMap::new();
239 metadata.insert("original-name".to_string(), file.name.clone());
240 metadata.insert("encrypted".to_string(), encrypted.to_string());
241 metadata.insert("encoding".to_string(), "base64".to_string());
242 metadata.insert("sync-tool".to_string(), "lc".to_string());
243 metadata.insert("sync-version".to_string(), "1.0".to_string());
244
245 let file_type = if file.name.ends_with(".toml") {
247 "config"
248 } else if file.name.ends_with(".db") {
249 "database"
250 } else if file.name.starts_with("embeddings/") {
251 "embeddings"
252 } else if file.name.starts_with("providers/") {
253 "provider-config"
254 } else {
255 "unknown"
256 };
257 metadata.insert("file-type".to_string(), file_type.to_string());
258
259 metadata.insert("file-size".to_string(), file.content.len().to_string());
261
262 match self
263 .client
264 .put_object()
265 .bucket(&self.bucket_name)
266 .key(&key)
267 .body(ByteStream::from(content_b64.into_bytes()))
268 .content_type("text/plain")
269 .set_metadata(Some(metadata))
270 .send()
271 .await
272 {
273 Ok(_) => {
274 println!(" {} Uploaded: {}", "â".green(), file.name);
275 uploaded_count += 1;
276 }
277 Err(e) => {
278 crate::debug_log!("Failed to upload {}: {}", file.name, e);
279 eprintln!(" {} Failed to upload {}: {}", "â".red(), file.name, e);
280 }
281 }
282 }
283
284 if uploaded_count == files.len() {
285 println!(
286 "{} All {} files uploaded successfully",
287 "đ".green(),
288 uploaded_count
289 );
290 } else {
291 println!(
292 "{} Uploaded {}/{} files",
293 "â ī¸".yellow(),
294 uploaded_count,
295 files.len()
296 );
297 }
298
299 Ok(())
300 }
301
302 pub async fn download_configs(&self, encrypted: bool) -> Result<Vec<ConfigFile>> {
304 println!(
305 "{} Downloading from S3 bucket: {}",
306 "đĨ".blue(),
307 self.bucket_name
308 );
309
310 let list_response = self
312 .client
313 .list_objects_v2()
314 .bucket(&self.bucket_name)
315 .prefix(&self.folder_prefix)
316 .send()
317 .await
318 .map_err(|e| {
319 anyhow::anyhow!(
320 "Failed to list objects in bucket '{}': {}",
321 self.bucket_name,
322 e
323 )
324 })?;
325
326 let objects = list_response.contents();
327
328 if objects.is_empty() {
329 println!("{} No configuration files found in S3", "âšī¸".blue());
330 return Ok(Vec::new());
331 }
332
333 println!("{} Found {} objects in S3", "đ".blue(), objects.len());
334
335 let mut downloaded_files = Vec::new();
336
337 for object in objects {
338 if let Some(key) = object.key() {
339 if key.ends_with('/') {
341 continue;
342 }
343
344 let filename = key
346 .strip_prefix(&format!("{}/", self.folder_prefix))
347 .unwrap_or(key)
348 .to_string();
349
350 match self
351 .client
352 .get_object()
353 .bucket(&self.bucket_name)
354 .key(key)
355 .send()
356 .await
357 {
358 Ok(response) => {
359 let metadata = response.metadata().cloned().unwrap_or_default();
361 let is_encrypted = metadata
362 .get("encrypted")
363 .map(|v| v == "true")
364 .unwrap_or(false);
365
366 let body =
368 response.body.collect().await.map_err(|e| {
369 anyhow::anyhow!("Failed to read object body: {}", e)
370 })?;
371 let content_b64 =
372 String::from_utf8(body.into_bytes().to_vec()).map_err(|e| {
373 anyhow::anyhow!("Invalid UTF-8 in object content: {}", e)
374 })?;
375
376 let content = decode_base64(&content_b64).map_err(|e| {
378 anyhow::anyhow!(
379 "Failed to decode base64 content for {}: {}",
380 filename,
381 e
382 )
383 })?;
384
385 if encrypted && !is_encrypted {
386 crate::debug_log!(
387 "Warning: {} is not encrypted but --encrypted flag was used",
388 filename
389 );
390 eprintln!(
391 " {} Warning: {} is not encrypted but --encrypted flag was used",
392 "â ī¸".yellow(),
393 filename
394 );
395 } else if !encrypted && is_encrypted {
396 crate::debug_log!(
397 "Warning: {} is encrypted but --encrypted flag was not used",
398 filename
399 );
400 eprintln!(
401 " {} Warning: {} is encrypted but --encrypted flag was not used",
402 "â ī¸".yellow(),
403 filename
404 );
405 }
406
407 downloaded_files.push(ConfigFile {
408 name: filename.clone(),
409 content,
410 });
411
412 println!(" {} Downloaded: {}", "â".green(), filename);
413 }
414 Err(e) => {
415 crate::debug_log!("Failed to download {}: {}", filename, e);
416 eprintln!(" {} Failed to download {}: {}", "â".red(), filename, e);
417 }
418 }
419 }
420 }
421
422 println!(
423 "{} Downloaded {} files successfully",
424 "đ".green(),
425 downloaded_files.len()
426 );
427
428 Ok(downloaded_files)
429 }
430
431 #[allow(dead_code)]
433 pub async fn list_configs(&self) -> Result<Vec<String>> {
434 let list_response = self
435 .client
436 .list_objects_v2()
437 .bucket(&self.bucket_name)
438 .prefix(&self.folder_prefix)
439 .send()
440 .await
441 .map_err(|e| anyhow::anyhow!("Failed to list objects: {}", e))?;
442
443 let mut filenames = Vec::new();
444
445 for object in list_response.contents() {
446 if let Some(key) = object.key() {
447 if !key.ends_with('/') {
448 let filename = key
449 .strip_prefix(&format!("{}/", self.folder_prefix))
450 .unwrap_or(key)
451 .to_string();
452 filenames.push(filename);
453 }
454 }
455 }
456
457 Ok(filenames)
458 }
459
460 #[allow(dead_code)]
462 pub async fn delete_configs(&self, filenames: &[String]) -> Result<()> {
463 for filename in filenames {
464 let key = format!("{}/{}", self.folder_prefix, filename);
465
466 match self
467 .client
468 .delete_object()
469 .bucket(&self.bucket_name)
470 .key(&key)
471 .send()
472 .await
473 {
474 Ok(_) => {
475 println!(" {} Deleted: {}", "â".green(), filename);
476 }
477 Err(e) => {
478 crate::debug_log!("Failed to delete {}: {}", filename, e);
479 eprintln!(" {} Failed to delete {}: {}", "â".red(), filename, e);
480 }
481 }
482 }
483
484 Ok(())
485 }
486}
487
488#[cfg(all(test, feature = "s3-sync"))]
489mod tests {
490 use super::*;
491
492 #[test]
493 fn test_s3_provider_creation() {
494 assert_eq!("llm_client_config", "llm_client_config");
496 }
497
498 #[test]
499 fn test_s3_config_creation() {
500 let config = S3Config {
502 bucket_name: "test-bucket".to_string(),
503 region: "us-east-1".to_string(),
504 access_key_id: "test-key".to_string(),
505 secret_access_key: "test-secret".to_string(),
506 endpoint_url: None,
507 };
508
509 assert_eq!(config.bucket_name, "test-bucket");
510 assert_eq!(config.region, "us-east-1");
511 assert_eq!(config.access_key_id, "test-key");
512 assert_eq!(config.secret_access_key, "test-secret");
513 assert!(config.endpoint_url.is_none());
514 }
515}