1use rig::completion::ToolDefinition;
7use rig::tool::Tool;
8use serde::Deserialize;
9use serde_json::json;
10use std::path::PathBuf;
11use std::str::FromStr;
12
13use crate::agent::tools::ExecutionContext;
14use crate::agent::tools::error::{ErrorCategory, format_error_for_llm};
15use crate::analyzer::{AnalysisConfig, TechnologyCategory, analyze_project_with_config};
16use crate::platform::api::types::{
17 CloudProvider, CloudRunnerConfigInput, CreateDeploymentConfigRequest, DeploymentSecretInput,
18 ProjectRepository, build_cloud_runner_config_v2,
19};
20
21use super::set_secrets::{SecretPromptResult, default_true, prompt_secret_value};
22use crate::platform::PlatformSession;
23use crate::platform::api::{PlatformApiClient, PlatformApiError, TriggerDeploymentRequest};
24use crate::wizard::{
25 DynamicCloudRegion, DynamicMachineType, HetznerFetchResult, RecommendationInput,
26 discover_env_files, extract_network_endpoints, filter_endpoints_for_provider,
27 get_available_endpoints, get_hetzner_regions_dynamic, get_hetzner_server_types_dynamic,
28 get_provider_deployment_statuses, match_env_vars_to_services, parse_env_file,
29 recommend_deployment,
30};
31use std::process::Command;
32
33struct HetznerAvailabilityData {
35 regions: Vec<DynamicCloudRegion>,
36 server_types: Vec<DynamicMachineType>,
37}
38
39#[derive(Debug, Deserialize)]
41pub struct SecretKeyInput {
42 pub key: String,
44 pub value: Option<String>,
46 #[serde(default = "default_true")]
48 pub is_secret: bool,
49}
50
51#[derive(Debug, Deserialize)]
53pub struct DeployServiceArgs {
54 pub path: Option<String>,
56 pub service_name: Option<String>,
58 pub provider: Option<String>,
60 pub machine_type: Option<String>,
62 pub region: Option<String>,
64 pub port: Option<u16>,
66 #[serde(default)]
69 pub is_public: bool,
70 pub cpu: Option<String>,
72 pub memory: Option<String>,
74 pub min_instances: Option<i32>,
76 pub max_instances: Option<i32>,
78 #[serde(default = "default_preview")]
81 pub preview_only: bool,
82 pub secret_keys: Option<Vec<SecretKeyInput>>,
86}
87
88fn default_preview() -> bool {
89 true
90}
91
92#[derive(Debug, thiserror::Error)]
94#[error("Deploy service error: {0}")]
95pub struct DeployServiceError(String);
96
97#[derive(Debug, Clone)]
106pub struct DeployServiceTool {
107 project_path: PathBuf,
108 execution_context: ExecutionContext,
109}
110
111impl DeployServiceTool {
112 pub fn new(project_path: PathBuf) -> Self {
114 Self {
115 project_path,
116 execution_context: ExecutionContext::InteractiveCli,
117 }
118 }
119
120 pub fn with_context(project_path: PathBuf, ctx: ExecutionContext) -> Self {
122 Self {
123 project_path,
124 execution_context: ctx,
125 }
126 }
127}
128
129impl Tool for DeployServiceTool {
130 const NAME: &'static str = "deploy_service";
131
132 type Error = DeployServiceError;
133 type Args = DeployServiceArgs;
134 type Output = String;
135
136 async fn definition(&self, _prompt: String) -> ToolDefinition {
137 ToolDefinition {
138 name: Self::NAME.to_string(),
139 description: r#"Analyze a project and deploy it with intelligent recommendations.
140
141This tool provides an end-to-end deployment experience:
1421. Analyzes the project to detect language, framework, ports, and health endpoints
1432. Checks available deployment capabilities (providers, clusters, registries)
1443. Generates smart recommendations with reasoning
1454. Shows a preview for user confirmation
1465. Creates deployment config and triggers deployment
147
148**Default behavior (preview_only=true):**
149Returns analysis and recommendations. User should confirm before actual deployment.
150
151**Direct deployment (preview_only=false):**
152Uses provided overrides or recommendation defaults to deploy immediately.
153
154**Parameters:**
155- path: Optional subdirectory for monorepo services
156- provider: Override recommendation (gcp, hetzner, azure)
157- machine_type: Override machine selection (e.g., cx22, e2-small)
158- region: Override region selection (e.g., nbg1, us-central1)
159- port: Override detected port
160- is_public: Whether service should be publicly accessible (default: false)
161- preview_only: If true (default), show recommendation only
162
163**IMPORTANT - Public vs Internal:**
164- is_public=false (default): Service is internal-only, not accessible from internet
165- is_public=true: Service gets a public URL, accessible from anywhere
166- ALWAYS show this in the preview and ask user before deploying public services
167
168**What it analyzes:**
169- Programming language and framework
170- Port configuration from source code, package.json, Dockerfiles
171- Health check endpoints (/health, /healthz, etc.)
172- Existing infrastructure (K8s manifests, Helm charts)
173
174**Recommendation reasoning includes:**
175- Why a specific provider was chosen
176- Why a machine type fits the workload (based on memory requirements)
177- Where the port was detected from
178- Confidence level in the recommendation
179
180**Example flow:**
181User: "deploy this service"
1821. Call with preview_only=true → Shows recommendation
1832. User: "yes, deploy it" → Call with preview_only=false to deploy
1843. User: "make it public" → Call with preview_only=true AND is_public=true to show NEW preview
1854. User: "yes" → NOW call with preview_only=false to deploy
186
187**CRITICAL - Human in the loop:**
188- NEVER deploy (preview_only=false) immediately after user requests a CHANGE
189- If user says "make it public", "use GCP", "change region", etc. → show NEW preview first
190- Only deploy after user explicitly confirms the final settings with "yes", "deploy", "confirm"
191- A change request is NOT a deployment confirmation
192
193**Multiple cloud providers:**
194- The response includes connected_providers listing ALL connected providers (e.g. Hetzner AND Azure)
195- ALWAYS mention all connected providers to the user, not just the recommended one
196- The user can override the provider with the provider parameter
197- If deploying related services, consider whether they should be on the same provider for private networking
198
199**Deployed service endpoints:**
200- The response includes deployed_service_endpoints showing services already running in the project
201- Services may have public URLs (reachable from anywhere) or private IPs (only reachable from the same cloud provider network)
202- endpoint_suggestions maps detected env vars to deployed services (e.g. SENTIMENT_SERVICE_URL -> sentiment-analysis)
203- Private endpoints are pre-filtered to only show services on the same provider network
204- ALWAYS mention available endpoints when deploying services that have env vars matching deployed services
205
206**Private networks (project_networks):**
207- The response includes project_networks showing provisioned VPCs/networks for the target provider
208- Each network includes connection_details with key/value pairs (VPC_ID, SUBNET_ID, DEFAULT_DOMAIN, etc.)
209- If networks have useful connection details (e.g., a default domain, VPC connector), mention them to the user
210- Ask the user if they want to inject any network details as environment variables
211- Network details are NOT secrets — they are infrastructure identifiers
212- Private networks enable service-to-service communication on the same provider
213
214**Environment variables (secret_keys) and .env files:**
215- The preview response includes parsed_env_files: discovered .env files with their parsed keys/values
216- If .env files are found, ALWAYS ask the user: "I found a .env file with N variables. Should I inject these into the deployment?"
217- For non-secret vars from .env files, pass them as secret_keys with is_secret=false and include the value
218- For secret vars (API keys, tokens, passwords), pass them as secret_keys with is_secret=true and omit the value — the user is prompted securely in the terminal
219- Secret values from .env files are NEVER included in parsed_env_files or this conversation
220- If no .env files found but detected_env_vars exist, mention those and ask user how to provide them
221- NEVER ask the user to type secret values in chat
222
223**Prerequisites:**
224- User must be authenticated (sync-ctl auth login)
225- A project must be selected (use select_project first)
226- Provider must be connected (check with list_deployment_capabilities)"#
227 .to_string(),
228 parameters: json!({
229 "type": "object",
230 "properties": {
231 "path": {
232 "type": "string",
233 "description": "Subdirectory to deploy (for monorepos)"
234 },
235 "provider": {
236 "type": "string",
237 "enum": ["gcp", "hetzner", "azure"],
238 "description": "Override: cloud provider"
239 },
240 "machine_type": {
241 "type": "string",
242 "description": "Override: machine type (e.g., cx22, e2-small)"
243 },
244 "region": {
245 "type": "string",
246 "description": "Override: deployment region"
247 },
248 "port": {
249 "type": "integer",
250 "description": "Override: port to expose"
251 },
252 "is_public": {
253 "type": "boolean",
254 "description": "Whether service should be publicly accessible. Default: false (internal only). Set to true for public URL."
255 },
256 "preview_only": {
257 "type": "boolean",
258 "description": "If true (default), show recommendation only. If false, deploy."
259 },
260 "secret_keys": {
261 "type": "array",
262 "description": "Env vars to include in deployment. For secrets, omit value \u{2014} user is prompted in terminal.",
263 "items": {
264 "type": "object",
265 "properties": {
266 "key": {
267 "type": "string",
268 "description": "Environment variable name"
269 },
270 "value": {
271 "type": "string",
272 "description": "Omit for secrets \u{2014} user will be prompted securely in terminal."
273 },
274 "is_secret": {
275 "type": "boolean",
276 "description": "Whether this is a secret (default: true). Secrets are prompted in terminal.",
277 "default": true
278 }
279 },
280 "required": ["key"]
281 }
282 }
283 }
284 }),
285 }
286 }
287
288 async fn call(&self, args: Self::Args) -> Result<Self::Output, Self::Error> {
289 let analysis_path = if let Some(ref subpath) = args.path {
291 self.project_path.join(subpath)
292 } else {
293 self.project_path.clone()
294 };
295
296 if !analysis_path.exists() {
298 return Ok(format_error_for_llm(
299 "deploy_service",
300 ErrorCategory::FileNotFound,
301 &format!("Path not found: {}", analysis_path.display()),
302 Some(vec![
303 "Check if the path exists",
304 "Use list_directory to explore",
305 ]),
306 ));
307 }
308
309 let config = AnalysisConfig {
311 deep_analysis: true,
312 ..Default::default()
313 };
314
315 let analysis = match analyze_project_with_config(&analysis_path, &config) {
316 Ok(a) => a,
317 Err(e) => {
318 return Ok(format_error_for_llm(
319 "deploy_service",
320 ErrorCategory::InternalError,
321 &format!("Analysis failed: {}", e),
322 Some(vec!["Check if the directory contains a valid project"]),
323 ));
324 }
325 };
326
327 let client = match PlatformApiClient::new() {
329 Ok(c) => c,
330 Err(_) => {
331 return Ok(format_error_for_llm(
332 "deploy_service",
333 ErrorCategory::PermissionDenied,
334 "Not authenticated",
335 Some(vec!["Run: sync-ctl auth login"]),
336 ));
337 }
338 };
339
340 let session = match PlatformSession::load() {
342 Ok(s) => s,
343 Err(_) => {
344 return Ok(format_error_for_llm(
345 "deploy_service",
346 ErrorCategory::InternalError,
347 "Failed to load platform session",
348 Some(vec!["Try selecting a project with select_project"]),
349 ));
350 }
351 };
352
353 if !session.is_project_selected() {
354 return Ok(format_error_for_llm(
355 "deploy_service",
356 ErrorCategory::ValidationFailed,
357 "No project selected",
358 Some(vec!["Use select_project to choose a project first"]),
359 ));
360 }
361
362 let project_id = session.project_id.clone().unwrap_or_default();
363 let environment_id = session.environment_id.clone();
364
365 let existing_configs = match client.list_deployment_configs(&project_id).await {
367 Ok(configs) => configs,
368 Err(e) => {
369 tracing::warn!("Failed to fetch existing configs: {}", e);
371 Vec::new()
372 }
373 };
374
375 let service_name = args
377 .service_name
378 .clone()
379 .unwrap_or_else(|| get_service_name(&analysis_path));
380
381 let existing_config = existing_configs
383 .iter()
384 .find(|c| c.service_name.eq_ignore_ascii_case(&service_name));
385
386 let environments: Vec<crate::platform::api::types::Environment> = client
388 .list_environments(&project_id)
389 .await
390 .unwrap_or_default();
391
392 let (resolved_env_id, resolved_env_name, is_production) =
394 if let Some(ref env_id) = environment_id {
395 let env = environments.iter().find(|e| e.id == *env_id);
396 let name = env
397 .map(|e| e.name.clone())
398 .unwrap_or_else(|| "Unknown".to_string());
399 let is_prod = name.to_lowercase().contains("prod");
400 (env_id.clone(), name, is_prod)
401 } else if let Some(existing) = &existing_config {
402 let env = environments
404 .iter()
405 .find(|e| e.id == existing.environment_id);
406 let name = env
407 .map(|e| e.name.clone())
408 .unwrap_or_else(|| "Unknown".to_string());
409 let is_prod = name.to_lowercase().contains("prod");
410 (existing.environment_id.clone(), name, is_prod)
411 } else if let Some(first_env) = environments.first() {
412 let is_prod = first_env.name.to_lowercase().contains("prod");
413 (first_env.id.clone(), first_env.name.clone(), is_prod)
414 } else {
415 ("".to_string(), "No environment".to_string(), false)
416 };
417
418 let capabilities = match get_provider_deployment_statuses(&client, &project_id).await {
420 Ok(c) => c,
421 Err(e) => {
422 return Ok(format_error_for_llm(
423 "deploy_service",
424 ErrorCategory::NetworkError,
425 &format!("Failed to get deployment capabilities: {}", e),
426 None,
427 ));
428 }
429 };
430
431 let available_providers: Vec<CloudProvider> = capabilities
433 .iter()
434 .filter(|s| s.provider.is_available() && s.is_connected)
435 .map(|s| s.provider.clone())
436 .collect();
437
438 if available_providers.is_empty() {
439 return Ok(format_error_for_llm(
440 "deploy_service",
441 ErrorCategory::ResourceUnavailable,
442 "No cloud providers connected",
443 Some(vec![
444 "Connect a cloud provider (GCP, Hetzner, or Azure) in platform settings",
445 "Use open_provider_settings to configure a provider",
446 ]),
447 ));
448 }
449
450 let has_existing_k8s = capabilities.iter().any(|s| !s.clusters.is_empty());
452
453 let recommendation_input = RecommendationInput {
455 analysis: analysis.clone(),
456 available_providers: available_providers.clone(),
457 has_existing_k8s,
458 user_region_hint: args.region.clone(),
459 };
460
461 let recommendation = recommend_deployment(recommendation_input);
462
463 let final_provider_for_check = args
466 .provider
467 .as_ref()
468 .and_then(|p| CloudProvider::from_str(p).ok())
469 .unwrap_or(recommendation.provider.clone());
470
471 let mut hetzner_availability: Option<HetznerAvailabilityData> = None;
473
474 if final_provider_for_check == CloudProvider::Hetzner {
475 let regions = match get_hetzner_regions_dynamic(&client, &project_id).await {
477 HetznerFetchResult::Success(r) if !r.is_empty() => r,
478 HetznerFetchResult::Success(_) => {
479 return Ok(format_error_for_llm(
480 "deploy_service",
481 ErrorCategory::ResourceUnavailable,
482 "No Hetzner regions available",
483 Some(vec![
484 "Check your Hetzner account status",
485 "Use list_hetzner_availability to see current availability",
486 ]),
487 ));
488 }
489 HetznerFetchResult::NoCredentials => {
490 return Ok(format_error_for_llm(
491 "deploy_service",
492 ErrorCategory::PermissionDenied,
493 "Cannot recommend Hetzner deployment: Hetzner credentials not configured",
494 Some(vec![
495 "Add your Hetzner API token in project settings",
496 "Use open_provider_settings to configure Hetzner",
497 "Or specify a different provider (e.g., provider='gcp')",
498 ]),
499 ));
500 }
501 HetznerFetchResult::ApiError(err) => {
502 return Ok(format_error_for_llm(
503 "deploy_service",
504 ErrorCategory::NetworkError,
505 &format!(
506 "Cannot recommend Hetzner deployment: Failed to fetch availability - {}",
507 err
508 ),
509 Some(vec![
510 "Use list_hetzner_availability to check current status",
511 "Or specify a different provider (e.g., provider='gcp')",
512 ]),
513 ));
514 }
515 };
516
517 let server_types = match get_hetzner_server_types_dynamic(
519 &client,
520 &project_id,
521 args.region.as_deref(),
522 )
523 .await
524 {
525 HetznerFetchResult::Success(s) if !s.is_empty() => s,
526 HetznerFetchResult::Success(_) => {
527 return Ok(format_error_for_llm(
528 "deploy_service",
529 ErrorCategory::ResourceUnavailable,
530 "No Hetzner server types available",
531 Some(vec![
532 "Check your Hetzner account status",
533 "Use list_hetzner_availability to see current availability",
534 ]),
535 ));
536 }
537 HetznerFetchResult::NoCredentials => {
538 return Ok(format_error_for_llm(
539 "deploy_service",
540 ErrorCategory::PermissionDenied,
541 "Cannot recommend Hetzner deployment: Hetzner credentials not configured",
542 Some(vec![
543 "Add your Hetzner API token in project settings",
544 "Use open_provider_settings to configure Hetzner",
545 ]),
546 ));
547 }
548 HetznerFetchResult::ApiError(err) => {
549 return Ok(format_error_for_llm(
550 "deploy_service",
551 ErrorCategory::NetworkError,
552 &format!(
553 "Cannot recommend Hetzner deployment: Failed to fetch server types - {}",
554 err
555 ),
556 Some(vec![
557 "Use list_hetzner_availability to check current status",
558 ]),
559 ));
560 }
561 };
562
563 hetzner_availability = Some(HetznerAvailabilityData {
565 regions,
566 server_types,
567 });
568 }
569
570 let primary_language = analysis
572 .languages
573 .first()
574 .map(|l| l.name.clone())
575 .unwrap_or_else(|| "Unknown".to_string());
576
577 let primary_framework = analysis
578 .technologies
579 .iter()
580 .find(|t| {
581 matches!(
582 t.category,
583 TechnologyCategory::BackendFramework | TechnologyCategory::MetaFramework
584 )
585 })
586 .map(|t| t.name.clone())
587 .unwrap_or_else(|| "None detected".to_string());
588
589 let has_dockerfile = analysis
590 .docker_analysis
591 .as_ref()
592 .map(|d| !d.dockerfiles.is_empty())
593 .unwrap_or(false);
594
595 let has_k8s = analysis
596 .infrastructure
597 .as_ref()
598 .map(|i| i.has_kubernetes)
599 .unwrap_or(false);
600
601 if args.preview_only {
603 let (deployment_mode, mode_explanation, next_steps) = if let Some(existing) =
605 &existing_config
606 {
607 (
608 "REDEPLOY",
609 format!(
610 "Service '{}' already has a deployment config (ID: {}). Deploying will trigger a REDEPLOY of the existing service.",
611 existing.service_name, existing.id
612 ),
613 vec![
614 "To redeploy with current config: call deploy_service with preview_only=false".to_string(),
615 "This will trigger a new deployment of the existing service".to_string(),
616 "The existing configuration will be used".to_string(),
617 ]
618 )
619 } else {
620 (
621 "NEW_DEPLOYMENT",
622 format!(
623 "No existing deployment config found for '{}'. This will create a NEW deployment configuration.",
624 service_name
625 ),
626 vec![
627 "To deploy with these settings: call deploy_service with preview_only=false".to_string(),
628 "To customize: specify provider, machine_type, region, or port parameters".to_string(),
629 "Check parsed_env_files — if .env files were found, ask user whether to inject them as secret_keys".to_string(),
630 "To see more options: check the hetzner_availability section for current pricing".to_string(),
631 ]
632 )
633 };
634
635 let production_warning = if is_production {
637 Some(
638 "⚠️ WARNING: This will deploy to PRODUCTION environment. Please confirm you intend to deploy to production.",
639 )
640 } else {
641 None
642 };
643
644 let (
646 final_machine_type,
647 final_region,
648 machine_reasoning,
649 region_reasoning,
650 price_monthly,
651 ) = if let Some(ref hetzner) = hetzner_availability {
652 let available_types: Vec<_> = hetzner
657 .server_types
658 .iter()
659 .filter(|st| !st.available_in.is_empty())
660 .collect();
661
662 let user_region = args.region.as_deref();
664
665 let best_machine_with_region = if let Some(region) = user_region {
667 available_types
669 .iter()
670 .filter(|st| {
671 st.memory_gb >= 4.0 && st.available_in.contains(®ion.to_string())
672 })
673 .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
674 .map(|st| (*st, region.to_string()))
675 .or_else(|| {
676 available_types
678 .iter()
679 .filter(|st| st.available_in.contains(®ion.to_string()))
680 .min_by(|a, b| {
681 a.price_monthly.partial_cmp(&b.price_monthly).unwrap()
682 })
683 .map(|st| (*st, region.to_string()))
684 })
685 } else {
686 available_types
688 .iter()
689 .filter(|st| st.memory_gb >= 4.0)
690 .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
691 .map(|st| {
692 let region = st
694 .available_in
695 .first()
696 .cloned()
697 .unwrap_or_else(|| "nbg1".to_string());
698 (*st, region)
699 })
700 .or_else(|| {
701 available_types
703 .iter()
704 .min_by(|a, b| {
705 a.price_monthly.partial_cmp(&b.price_monthly).unwrap()
706 })
707 .map(|st| {
708 let region = st
709 .available_in
710 .first()
711 .cloned()
712 .unwrap_or_else(|| "nbg1".to_string());
713 (*st, region)
714 })
715 })
716 };
717
718 if let Some((machine, region_id)) = best_machine_with_region {
719 let region_name = hetzner
720 .regions
721 .iter()
722 .find(|r| r.id == region_id)
723 .map(|r| format!("{}, {}", r.name, r.location))
724 .unwrap_or_else(|| region_id.clone());
725
726 let available_count = hetzner
727 .regions
728 .iter()
729 .find(|r| r.id == region_id)
730 .map(|r| r.available_server_types.len())
731 .unwrap_or(0);
732
733 (
734 args.machine_type
735 .clone()
736 .unwrap_or_else(|| machine.id.clone()),
737 region_id.clone(),
738 format!(
739 "Selected {} ({} vCPU, {:.0} GB RAM) - cheapest AVAILABLE option at €{:.2}/mo",
740 machine.id, machine.cores, machine.memory_gb, machine.price_monthly
741 ),
742 format!(
743 "Selected {} ({}) - {} server types available",
744 region_id, region_name, available_count
745 ),
746 Some(machine.price_monthly),
747 )
748 } else {
749 (
751 args.machine_type
752 .clone()
753 .unwrap_or_else(|| recommendation.machine_type.clone()),
754 args.region
755 .clone()
756 .unwrap_or_else(|| recommendation.region.clone()),
757 "WARNING: No server types currently available - using fallback".to_string(),
758 "Using fallback region".to_string(),
759 None,
760 )
761 }
762 } else {
763 (
765 args.machine_type
766 .clone()
767 .unwrap_or_else(|| recommendation.machine_type.clone()),
768 args.region
769 .clone()
770 .unwrap_or_else(|| recommendation.region.clone()),
771 recommendation.machine_reasoning.clone(),
772 recommendation.region_reasoning.clone(),
773 None,
774 )
775 };
776
777 let hetzner_availability_info = hetzner_availability.as_ref().map(|h| {
779 json!({
780 "regions": h.regions.iter().map(|r| json!({
781 "id": r.id,
782 "name": r.name,
783 "country": r.location,
784 "available_server_types_count": r.available_server_types.len(),
785 })).collect::<Vec<_>>(),
786 "server_types": h.server_types.iter().take(10).map(|st| json!({
787 "id": st.id,
788 "cores": st.cores,
789 "memory_gb": st.memory_gb,
790 "price_monthly_eur": st.price_monthly,
791 "available_in": st.available_in,
792 })).collect::<Vec<_>>(),
793 "cheapest_4gb": h.server_types.iter()
794 .filter(|st| st.memory_gb >= 4.0)
795 .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
796 .map(|st| json!({
797 "id": st.id,
798 "specs": format!("{} vCPU, {:.0} GB RAM", st.cores, st.memory_gb),
799 "price_monthly_eur": st.price_monthly,
800 })),
801 })
802 });
803
804 let discovered_env_files_raw = discover_env_files(&analysis_path);
806 let discovered_env_file_paths: Vec<String> = discovered_env_files_raw
807 .iter()
808 .map(|p| p.display().to_string())
809 .collect();
810
811 let parsed_env_files: Vec<serde_json::Value> = discovered_env_files_raw
813 .iter()
814 .filter_map(|rel_path| {
815 let abs_path = analysis_path.join(rel_path);
816 match parse_env_file(&abs_path) {
817 Ok(entries) if !entries.is_empty() => Some(json!({
818 "file": rel_path.display().to_string(),
819 "variable_count": entries.len(),
820 "variables": entries.iter().map(|e| json!({
821 "key": e.key,
822 "is_secret": e.is_secret,
823 "value": if e.is_secret { None } else { Some(&e.value) },
826 })).collect::<Vec<_>>(),
827 })),
828 Ok(_) => None, Err(e) => {
830 tracing::debug!("Could not parse env file {:?}: {}", rel_path, e);
831 None
832 }
833 }
834 })
835 .collect();
836
837 let deployed_endpoints = match client.list_deployments(&project_id, Some(50)).await {
839 Ok(paginated) => get_available_endpoints(&paginated.data),
840 Err(e) => {
841 tracing::debug!("Could not fetch deployments for endpoint matching: {}", e);
842 Vec::new()
843 }
844 };
845 let deployed_endpoints: Vec<_> = deployed_endpoints
846 .into_iter()
847 .filter(|ep| ep.service_name != service_name)
848 .collect();
849 let deployed_endpoints = filter_endpoints_for_provider(
852 deployed_endpoints,
853 final_provider_for_check.as_str(),
854 );
855
856 let detected_env_var_names: Vec<String> = analysis
857 .environment_variables
858 .iter()
859 .map(|e| e.name.clone())
860 .collect();
861
862 let endpoint_suggestions =
863 match_env_vars_to_services(&detected_env_var_names, &deployed_endpoints);
864
865 let project_networks = match client.list_project_networks(&project_id).await {
867 Ok(nets) => nets,
868 Err(e) => {
869 tracing::debug!("Could not fetch project networks: {}", e);
870 Vec::new()
871 }
872 };
873
874 let network_endpoints = extract_network_endpoints(
875 &project_networks,
876 final_provider_for_check.as_str(),
877 Some(&resolved_env_id),
878 );
879
880 let response = json!({
881 "status": "recommendation",
882 "deployment_mode": deployment_mode,
883 "mode_explanation": mode_explanation,
884 "environment": {
885 "id": resolved_env_id,
886 "name": resolved_env_name,
887 "is_production": is_production,
888 },
889 "connected_providers": capabilities.iter()
890 .filter(|s| s.provider.is_available() && s.is_connected)
891 .map(|s| json!({
892 "provider": s.provider.as_str(),
893 "display_name": s.provider.display_name(),
894 "cloud_runner_available": s.cloud_runner_available,
895 "clusters": s.clusters.len(),
896 "registries": s.registries.len(),
897 "summary": s.summary,
898 }))
899 .collect::<Vec<_>>(),
900 "production_warning": production_warning,
901 "existing_config": existing_config.map(|c| json!({
902 "id": c.id,
903 "service_name": c.service_name,
904 "environment_id": c.environment_id,
905 "branch": c.branch,
906 "port": c.port,
907 "auto_deploy_enabled": c.auto_deploy_enabled,
908 "created_at": c.created_at.to_rfc3339(),
909 })),
910 "analysis": {
911 "path": analysis_path.display().to_string(),
912 "language": primary_language,
913 "framework": primary_framework,
914 "detected_port": recommendation.port,
915 "port_source": recommendation.port_source,
916 "health_endpoint": recommendation.health_check_path,
917 "has_dockerfile": has_dockerfile,
918 "has_kubernetes": has_k8s,
919 "detected_env_vars": analysis.environment_variables.iter().map(|e| json!({
920 "name": e.name,
921 "required": e.required,
922 "has_default": e.default_value.is_some(),
923 "description": e.description,
924 })).collect::<Vec<_>>(),
925 },
926 "recommendation": {
927 "provider": recommendation.provider.as_str(),
928 "provider_reasoning": recommendation.provider_reasoning,
929 "target": recommendation.target.as_str(),
930 "target_reasoning": recommendation.target_reasoning,
931 "machine_type": final_machine_type,
932 "machine_reasoning": machine_reasoning,
933 "region": final_region,
934 "region_reasoning": region_reasoning,
935 "price_monthly_eur": price_monthly,
936 "port": recommendation.port,
937 "health_check_path": recommendation.health_check_path,
938 "is_public": args.is_public,
939 "is_public_note": if args.is_public {
940 "Service will be PUBLICLY accessible from the internet"
941 } else {
942 "Service will be INTERNAL only (not accessible from internet)"
943 },
944 "confidence": recommendation.confidence,
945 "availability_source": if hetzner_availability.is_some() { "real-time" } else { "static" },
946 },
947 "hetzner_availability": hetzner_availability_info,
948 "alternatives": {
949 "providers": recommendation.alternatives.providers.iter().map(|p| json!({
950 "provider": p.provider.as_str(),
951 "available": p.available,
952 "reason_if_unavailable": p.reason_if_unavailable,
953 })).collect::<Vec<_>>(),
954 "machine_types": if let Some(ref ha) = hetzner_availability {
955 ha.server_types.iter().take(6).map(|st| json!({
957 "machine_type": st.id,
958 "vcpu": st.cores,
959 "memory_gb": st.memory_gb,
960 "price_monthly_eur": st.price_monthly,
961 "available_in": st.available_in,
962 })).collect::<Vec<_>>()
963 } else {
964 recommendation.alternatives.machine_types.iter().map(|m| json!({
965 "machine_type": m.machine_type,
966 "vcpu": m.vcpu,
967 "memory_gb": m.memory_gb,
968 "description": m.description,
969 })).collect::<Vec<_>>()
970 },
971 "regions": if let Some(ref ha) = hetzner_availability {
972 ha.regions.iter().map(|r| json!({
974 "region": r.id,
975 "display_name": format!("{}, {}", r.name, r.location),
976 "available_server_types_count": r.available_server_types.len(),
977 })).collect::<Vec<_>>()
978 } else {
979 recommendation.alternatives.regions.iter().map(|r| json!({
980 "region": r.region,
981 "display_name": r.display_name,
982 })).collect::<Vec<_>>()
983 },
984 },
985 "service_name": service_name,
986 "discovered_env_files": discovered_env_file_paths,
987 "parsed_env_files": parsed_env_files,
988 "deployed_service_endpoints": deployed_endpoints.iter().map(|ep| json!({
989 "service_name": ep.service_name,
990 "url": ep.url,
991 "is_private": ep.is_private,
992 "status": ep.status,
993 })).collect::<Vec<_>>(),
994 "endpoint_suggestions": endpoint_suggestions.iter().map(|s| json!({
995 "env_var": s.env_var_name,
996 "service_name": s.service.service_name,
997 "url": s.service.url,
998 "is_private": s.service.is_private,
999 "confidence": format!("{:?}", s.confidence),
1000 "reason": s.reason,
1001 })).collect::<Vec<_>>(),
1002 "project_networks": network_endpoints.iter().map(|ne| json!({
1003 "network_id": ne.network_id,
1004 "cloud_provider": ne.cloud_provider,
1005 "region": ne.region,
1006 "status": ne.status,
1007 "environment_id": ne.environment_id,
1008 "connection_details": ne.connection_details.iter().map(|(k, v)| json!({
1009 "key": k,
1010 "value": v,
1011 "suggested_env_var": k,
1012 })).collect::<Vec<_>>(),
1013 })).collect::<Vec<_>>(),
1014 "next_steps": next_steps,
1015 "confirmation_prompt": if existing_config.is_some() {
1016 format!(
1017 "REDEPLOY '{}' to {} environment?{}",
1018 service_name,
1019 resolved_env_name,
1020 if is_production { " ⚠️ (PRODUCTION)" } else { "" }
1021 )
1022 } else {
1023 let price_info = price_monthly.map(|p| format!(" (€{:.2}/mo)", p)).unwrap_or_default();
1024 format!(
1025 "Deploy NEW service '{}' to {} ({}) with {}{} in {} on {} environment?{}",
1026 service_name,
1027 recommendation.provider.display_name(),
1028 recommendation.target.display_name(),
1029 final_machine_type,
1030 price_info,
1031 final_region,
1032 resolved_env_name,
1033 if is_production { " ⚠️ (PRODUCTION)" } else { "" }
1034 )
1035 },
1036 });
1037
1038 return serde_json::to_string_pretty(&response)
1039 .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)));
1040 }
1041
1042 if let Some(existing) = &existing_config {
1046 let trigger_request = TriggerDeploymentRequest {
1047 project_id: project_id.clone(),
1048 config_id: existing.id.clone(),
1049 commit_sha: None,
1050 };
1051
1052 return match client.trigger_deployment(&trigger_request).await {
1053 Ok(response) => {
1054 let result = json!({
1055 "status": "redeployed",
1056 "deployment_mode": "REDEPLOY",
1057 "config_id": existing.id,
1058 "task_id": response.backstage_task_id,
1059 "service_name": service_name,
1060 "environment": {
1061 "id": resolved_env_id,
1062 "name": resolved_env_name,
1063 "is_production": is_production,
1064 },
1065 "message": format!(
1066 "Redeploy triggered for existing service '{}' on {} environment. Task ID: {}",
1067 service_name, resolved_env_name, response.backstage_task_id
1068 ),
1069 "next_steps": [
1070 format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id),
1071 "View logs after deployment: use get_service_logs",
1072 ],
1073 });
1074
1075 serde_json::to_string_pretty(&result)
1076 .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)))
1077 }
1078 Err(e) => Ok(format_api_error("deploy_service", e)),
1079 };
1080 }
1081
1082 let final_provider = args
1084 .provider
1085 .as_ref()
1086 .and_then(|p| CloudProvider::from_str(p).ok())
1087 .unwrap_or(recommendation.provider.clone());
1088
1089 let (final_machine, final_region) = if let Some(ref hetzner) = hetzner_availability {
1091 let available_types: Vec<_> = hetzner
1095 .server_types
1096 .iter()
1097 .filter(|st| !st.available_in.is_empty())
1098 .collect();
1099
1100 let user_region = args.region.as_deref();
1101
1102 let best_machine_with_region = if let Some(region) = user_region {
1104 available_types
1106 .iter()
1107 .filter(|st| {
1108 st.memory_gb >= 4.0 && st.available_in.contains(®ion.to_string())
1109 })
1110 .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1111 .map(|st| (st.id.clone(), region.to_string()))
1112 .or_else(|| {
1113 available_types
1114 .iter()
1115 .filter(|st| st.available_in.contains(®ion.to_string()))
1116 .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1117 .map(|st| (st.id.clone(), region.to_string()))
1118 })
1119 } else {
1120 available_types
1122 .iter()
1123 .filter(|st| st.memory_gb >= 4.0)
1124 .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1125 .map(|st| {
1126 let region = st
1127 .available_in
1128 .first()
1129 .cloned()
1130 .unwrap_or_else(|| "nbg1".to_string());
1131 (st.id.clone(), region)
1132 })
1133 .or_else(|| {
1134 available_types
1135 .iter()
1136 .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1137 .map(|st| {
1138 let region = st
1139 .available_in
1140 .first()
1141 .cloned()
1142 .unwrap_or_else(|| "nbg1".to_string());
1143 (st.id.clone(), region)
1144 })
1145 })
1146 };
1147
1148 if let Some((machine, region)) = best_machine_with_region {
1149 (
1150 args.machine_type.clone().unwrap_or(machine),
1151 args.region.clone().unwrap_or(region),
1152 )
1153 } else {
1154 (
1156 args.machine_type
1157 .clone()
1158 .unwrap_or_else(|| recommendation.machine_type.clone()),
1159 args.region
1160 .clone()
1161 .unwrap_or_else(|| recommendation.region.clone()),
1162 )
1163 }
1164 } else {
1165 let machine = args
1167 .machine_type
1168 .clone()
1169 .unwrap_or_else(|| recommendation.machine_type.clone());
1170 let region = args
1171 .region
1172 .clone()
1173 .unwrap_or_else(|| recommendation.region.clone());
1174 (machine, region)
1175 };
1176
1177 let final_port = args.port.unwrap_or(recommendation.port);
1178
1179 let repositories = match client.list_project_repositories(&project_id).await {
1181 Ok(repos) => repos,
1182 Err(e) => {
1183 return Ok(format_error_for_llm(
1184 "deploy_service",
1185 ErrorCategory::NetworkError,
1186 &format!("Failed to get repositories: {}", e),
1187 Some(vec!["Ensure a repository is connected to the project"]),
1188 ));
1189 }
1190 };
1191
1192 let repo = match find_matching_repository(&repositories.repositories, &self.project_path) {
1194 Some(r) => r,
1195 None => {
1196 return Ok(format_error_for_llm(
1197 "deploy_service",
1198 ErrorCategory::ResourceUnavailable,
1199 "No repository connected to project",
1200 Some(vec![
1201 "Connect a GitHub repository to the project first",
1202 "Use the platform UI to connect a repository",
1203 ]),
1204 ));
1205 }
1206 };
1207
1208 tracing::info!(
1209 "Deploy service: Using repository {} (id: {}), default_branch: {:?}",
1210 repo.repository_full_name,
1211 repo.repository_id,
1212 repo.default_branch
1213 );
1214
1215 if resolved_env_id.is_empty() {
1217 return Ok(format_error_for_llm(
1218 "deploy_service",
1219 ErrorCategory::ResourceUnavailable,
1220 "No environment found for project",
1221 Some(vec!["Create an environment in the platform first"]),
1222 ));
1223 }
1224
1225 let (dockerfile_path, build_context) = analysis
1245 .docker_analysis
1246 .as_ref()
1247 .and_then(|d| d.dockerfiles.first())
1248 .map(|df| {
1249 let dockerfile_name = df
1251 .path
1252 .file_name()
1253 .map(|n| n.to_string_lossy().to_string())
1254 .unwrap_or_else(|| "Dockerfile".to_string());
1255
1256 let analysis_relative_dir = df
1258 .path
1259 .parent()
1260 .and_then(|p| p.strip_prefix(&analysis_path).ok())
1261 .map(|p| p.to_string_lossy().to_string())
1262 .unwrap_or_default();
1263
1264 let subpath = args.path.as_deref().unwrap_or("");
1267
1268 if subpath.is_empty() {
1269 if analysis_relative_dir.is_empty() {
1271 (dockerfile_name, ".".to_string())
1272 } else {
1273 (
1274 format!("{}/{}", analysis_relative_dir, dockerfile_name),
1275 analysis_relative_dir,
1276 )
1277 }
1278 } else {
1279 if analysis_relative_dir.is_empty() {
1281 (
1284 format!("{}/{}", subpath, dockerfile_name),
1285 subpath.to_string(),
1286 )
1287 } else {
1288 let full_context = format!("{}/{}", subpath, analysis_relative_dir);
1291 (
1292 format!("{}/{}", full_context, dockerfile_name),
1293 full_context,
1294 )
1295 }
1296 }
1297 })
1298 .unwrap_or_else(|| {
1299 let subpath = args.path.as_deref().unwrap_or("");
1301 if subpath.is_empty() {
1302 ("Dockerfile".to_string(), ".".to_string())
1303 } else {
1304 (format!("{}/Dockerfile", subpath), subpath.to_string())
1305 }
1306 });
1307
1308 tracing::debug!(
1309 "Deploy service docker config: dockerfile_path={}, build_context={}, subpath={:?}",
1310 dockerfile_path,
1311 build_context,
1312 args.path
1313 );
1314
1315 let mut gcp_project_id = None;
1317 let mut subscription_id = None;
1318 if matches!(final_provider, CloudProvider::Gcp | CloudProvider::Azure) {
1319 if let Ok(Some(cred)) = client
1320 .check_provider_connection(&final_provider, &project_id)
1321 .await
1322 {
1323 match final_provider {
1324 CloudProvider::Gcp => gcp_project_id = cred.provider_account_id,
1325 CloudProvider::Azure => subscription_id = cred.provider_account_id,
1326 _ => {}
1327 }
1328 }
1329 }
1330
1331 let final_cpu = args.cpu.clone().or_else(|| recommendation.cpu.clone());
1333 let final_memory = args
1334 .memory
1335 .clone()
1336 .or_else(|| recommendation.memory.clone());
1337
1338 let config_input = CloudRunnerConfigInput {
1339 provider: Some(final_provider.clone()),
1340 region: Some(final_region.clone()),
1341 server_type: if final_provider == CloudProvider::Hetzner {
1342 Some(final_machine.clone())
1343 } else {
1344 None
1345 },
1346 gcp_project_id,
1347 cpu: final_cpu.clone(),
1348 memory: final_memory.clone(),
1349 min_instances: args.min_instances,
1350 max_instances: args.max_instances,
1351 allow_unauthenticated: Some(args.is_public),
1352 subscription_id,
1353 is_public: Some(args.is_public),
1354 health_check_path: recommendation.health_check_path.clone(),
1355 ..Default::default()
1356 };
1357 let cloud_runner_config = build_cloud_runner_config_v2(&config_input);
1358
1359 let secrets = if let Some(ref keys) = args.secret_keys {
1361 let mut resolved = Vec::new();
1362 for sk in keys {
1363 let value = match &sk.value {
1364 Some(v) => v.clone(),
1365 None if self.execution_context.has_terminal() => {
1366 match prompt_secret_value(&sk.key) {
1367 SecretPromptResult::Value(v) => v,
1368 SecretPromptResult::Skipped => continue,
1369 SecretPromptResult::Cancelled => {
1370 return Ok(format_error_for_llm(
1371 "deploy_service",
1372 ErrorCategory::ValidationFailed,
1373 "Secret entry cancelled by user",
1374 Some(vec![
1375 "The user cancelled secret input. Try again when ready.",
1376 ]),
1377 ));
1378 }
1379 }
1380 }
1381 None => continue, };
1383 resolved.push(DeploymentSecretInput {
1384 key: sk.key.clone(),
1385 value,
1386 is_secret: sk.is_secret,
1387 });
1388 }
1389 if resolved.is_empty() {
1390 None
1391 } else {
1392 Some(resolved)
1393 }
1394 } else {
1395 None
1396 };
1397
1398 let secrets_set_info = secrets.as_ref().map(|s| {
1400 s.iter()
1401 .map(|si| json!({"key": si.key, "is_secret": si.is_secret}))
1402 .collect::<Vec<_>>()
1403 });
1404
1405 let config_request = CreateDeploymentConfigRequest {
1406 project_id: project_id.clone(),
1407 service_name: service_name.clone(),
1408 repository_id: repo.repository_id,
1409 repository_full_name: repo.repository_full_name.clone(),
1410 dockerfile_path: Some(dockerfile_path.clone()),
1411 dockerfile: Some(dockerfile_path.clone()),
1412 build_context: Some(build_context.clone()),
1413 context: Some(build_context.clone()),
1414 port: final_port as i32,
1415 branch: repo
1416 .default_branch
1417 .clone()
1418 .unwrap_or_else(|| "main".to_string()),
1419 target_type: recommendation.target.as_str().to_string(),
1420 cloud_provider: final_provider.as_str().to_string(),
1421 environment_id: resolved_env_id.clone(),
1422 cluster_id: None, registry_id: None, auto_deploy_enabled: true,
1425 is_public: Some(args.is_public),
1426 cloud_runner_config: Some(cloud_runner_config),
1427 secrets,
1428 };
1429
1430 let config = match client.create_deployment_config(&config_request).await {
1432 Ok(c) => c,
1433 Err(e) => {
1434 return Ok(format_api_error("deploy_service", e));
1435 }
1436 };
1437
1438 let trigger_request = TriggerDeploymentRequest {
1440 project_id: project_id.clone(),
1441 config_id: config.id.clone(),
1442 commit_sha: None,
1443 };
1444
1445 match client.trigger_deployment(&trigger_request).await {
1446 Ok(response) => {
1447 let result = json!({
1448 "status": "deployed",
1449 "deployment_mode": "NEW_DEPLOYMENT",
1450 "config_id": config.id,
1451 "task_id": response.backstage_task_id,
1452 "service_name": service_name,
1453 "environment": {
1454 "id": resolved_env_id,
1455 "name": resolved_env_name,
1456 "is_production": is_production,
1457 },
1458 "provider": final_provider.as_str(),
1459 "machine_type": final_machine,
1460 "region": final_region,
1461 "port": final_port,
1462 "docker_config": {
1463 "dockerfile_path": dockerfile_path,
1464 "build_context": build_context,
1465 },
1466 "secrets_set": secrets_set_info,
1467 "message": format!(
1468 "NEW deployment started for '{}' on {} environment. Task ID: {}",
1469 service_name, resolved_env_name, response.backstage_task_id
1470 ),
1471 "next_steps": [
1472 format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id),
1473 "View logs after deployment: use get_service_logs",
1474 ],
1475 });
1476
1477 serde_json::to_string_pretty(&result)
1478 .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)))
1479 }
1480 Err(e) => Ok(format_api_error("deploy_service", e)),
1481 }
1482 }
1483}
1484
1485fn get_service_name(path: &PathBuf) -> String {
1487 path.file_name()
1488 .and_then(|n| n.to_str())
1489 .map(|n| n.to_lowercase().replace(['_', ' '], "-"))
1490 .unwrap_or_else(|| "service".to_string())
1491}
1492
1493fn detect_git_remote(project_path: &PathBuf) -> Option<String> {
1495 let output = Command::new("git")
1496 .args(["remote", "get-url", "origin"])
1497 .current_dir(project_path)
1498 .output()
1499 .ok()?;
1500
1501 if output.status.success() {
1502 let url = String::from_utf8(output.stdout).ok()?;
1503 Some(url.trim().to_string())
1504 } else {
1505 None
1506 }
1507}
1508
1509fn parse_repo_from_url(url: &str) -> Option<String> {
1512 let url = url.trim();
1513
1514 if url.starts_with("git@") {
1516 let parts: Vec<&str> = url.split(':').collect();
1517 if parts.len() == 2 {
1518 let path = parts[1].trim_end_matches(".git");
1519 return Some(path.to_string());
1520 }
1521 }
1522
1523 if url.starts_with("https://") || url.starts_with("http://") {
1525 if let Some(path) = url
1526 .split('/')
1527 .skip(3)
1528 .collect::<Vec<_>>()
1529 .join("/")
1530 .strip_suffix(".git")
1531 {
1532 return Some(path.to_string());
1533 }
1534 let path: String = url.split('/').skip(3).collect::<Vec<_>>().join("/");
1536 if !path.is_empty() {
1537 return Some(path);
1538 }
1539 }
1540
1541 None
1542}
1543
1544fn find_matching_repository<'a>(
1546 repositories: &'a [ProjectRepository],
1547 project_path: &PathBuf,
1548) -> Option<&'a ProjectRepository> {
1549 if let Some(detected_name) =
1551 detect_git_remote(project_path).and_then(|url| parse_repo_from_url(&url))
1552 {
1553 tracing::debug!("Detected local git remote: {}", detected_name);
1554
1555 if let Some(repo) = repositories
1556 .iter()
1557 .find(|r| r.repository_full_name.eq_ignore_ascii_case(&detected_name))
1558 {
1559 tracing::debug!("Matched detected repo: {}", repo.repository_full_name);
1560 return Some(repo);
1561 }
1562 }
1563
1564 if let Some(repo) = repositories.iter().find(|r| {
1567 r.is_primary_git_ops != Some(true)
1568 && !r
1569 .repository_full_name
1570 .to_lowercase()
1571 .contains("infrastructure")
1572 && !r.repository_full_name.to_lowercase().contains("gitops")
1573 }) {
1574 tracing::debug!("Using non-gitops repo: {}", repo.repository_full_name);
1575 return Some(repo);
1576 }
1577
1578 repositories.first()
1580}
1581
1582fn format_api_error(tool_name: &str, error: PlatformApiError) -> String {
1584 match error {
1585 PlatformApiError::Unauthorized => format_error_for_llm(
1586 tool_name,
1587 ErrorCategory::PermissionDenied,
1588 "Not authenticated - please run `sync-ctl auth login` first",
1589 Some(vec![
1590 "The user needs to authenticate with the Syncable platform",
1591 "Run: sync-ctl auth login",
1592 ]),
1593 ),
1594 PlatformApiError::NotFound(msg) => format_error_for_llm(
1595 tool_name,
1596 ErrorCategory::ResourceUnavailable,
1597 &format!("Resource not found: {}", msg),
1598 Some(vec![
1599 "The project ID may be incorrect",
1600 "Use list_projects to find valid project IDs",
1601 ]),
1602 ),
1603 PlatformApiError::PermissionDenied(msg) => format_error_for_llm(
1604 tool_name,
1605 ErrorCategory::PermissionDenied,
1606 &format!("Permission denied: {}", msg),
1607 Some(vec!["Contact the project admin for access"]),
1608 ),
1609 PlatformApiError::RateLimited => format_error_for_llm(
1610 tool_name,
1611 ErrorCategory::ResourceUnavailable,
1612 "Rate limit exceeded - please try again later",
1613 Some(vec!["Wait a moment before retrying"]),
1614 ),
1615 PlatformApiError::HttpError(e) => format_error_for_llm(
1616 tool_name,
1617 ErrorCategory::NetworkError,
1618 &format!("Network error: {}", e),
1619 Some(vec!["Check network connectivity"]),
1620 ),
1621 PlatformApiError::ParseError(msg) => format_error_for_llm(
1622 tool_name,
1623 ErrorCategory::InternalError,
1624 &format!("Failed to parse API response: {}", msg),
1625 None,
1626 ),
1627 PlatformApiError::ApiError { status, message } => format_error_for_llm(
1628 tool_name,
1629 ErrorCategory::ExternalCommandFailed,
1630 &format!("API error ({}): {}", status, message),
1631 Some(vec!["Check the error message for details"]),
1632 ),
1633 PlatformApiError::ServerError { status, message } => format_error_for_llm(
1634 tool_name,
1635 ErrorCategory::ExternalCommandFailed,
1636 &format!("Server error ({}): {}", status, message),
1637 Some(vec!["Try again later"]),
1638 ),
1639 PlatformApiError::ConnectionFailed => format_error_for_llm(
1640 tool_name,
1641 ErrorCategory::NetworkError,
1642 "Could not connect to Syncable API",
1643 Some(vec!["Check your internet connection"]),
1644 ),
1645 }
1646}
1647
1648#[cfg(test)]
1649mod tests {
1650 use super::*;
1651
1652 #[test]
1653 fn test_tool_name() {
1654 assert_eq!(DeployServiceTool::NAME, "deploy_service");
1655 }
1656
1657 #[test]
1658 fn test_default_preview_only() {
1659 assert!(default_preview());
1660 }
1661
1662 #[test]
1663 fn test_get_service_name() {
1664 assert_eq!(
1665 get_service_name(&PathBuf::from("/path/to/my_service")),
1666 "my-service"
1667 );
1668 assert_eq!(get_service_name(&PathBuf::from("/path/to/MyApp")), "myapp");
1669 assert_eq!(
1670 get_service_name(&PathBuf::from("/path/to/api-service")),
1671 "api-service"
1672 );
1673 }
1674
1675 #[test]
1676 fn test_tool_creation() {
1677 let tool = DeployServiceTool::new(PathBuf::from("/test"));
1678 assert!(format!("{:?}", tool).contains("DeployServiceTool"));
1679 }
1680
1681 #[tokio::test]
1682 async fn test_nonexistent_path_returns_error() {
1683 let tool = DeployServiceTool::new(PathBuf::from("/nonexistent/path/that/does/not/exist"));
1684 let args = DeployServiceArgs {
1685 path: Some("nope".to_string()),
1686 service_name: None,
1687 provider: None,
1688 machine_type: None,
1689 region: None,
1690 port: None,
1691 is_public: false,
1692 cpu: None,
1693 memory: None,
1694 min_instances: None,
1695 max_instances: None,
1696 preview_only: true,
1697 secret_keys: None,
1698 };
1699
1700 let result = tool.call(args).await.unwrap();
1701 assert!(
1702 result.contains("error")
1703 || result.contains("not found")
1704 || result.contains("Path not found")
1705 );
1706 }
1707}