Skip to main content

syncable_cli/agent/tools/platform/
deploy_service.rs

1//! Deploy service tool for the agent
2//!
3//! A compound tool that enables conversational deployment with intelligent recommendations.
4//! Analyzes the project, provides recommendations with reasoning, and executes deployment.
5
6use rig::completion::ToolDefinition;
7use rig::tool::Tool;
8use serde::Deserialize;
9use serde_json::json;
10use std::path::PathBuf;
11use std::str::FromStr;
12
13use crate::agent::tools::ExecutionContext;
14use crate::agent::tools::error::{ErrorCategory, format_error_for_llm};
15use crate::analyzer::{AnalysisConfig, TechnologyCategory, analyze_project_with_config};
16use crate::platform::api::types::{
17    CloudProvider, CloudRunnerConfigInput, CreateDeploymentConfigRequest, DeploymentSecretInput,
18    ProjectRepository, build_cloud_runner_config_v2,
19};
20
21use super::set_secrets::{SecretPromptResult, default_true, prompt_secret_value};
22use crate::platform::PlatformSession;
23use crate::platform::api::{PlatformApiClient, PlatformApiError, TriggerDeploymentRequest};
24use crate::wizard::{
25    DynamicCloudRegion, DynamicMachineType, HetznerFetchResult, RecommendationInput,
26    discover_env_files, extract_network_endpoints, filter_endpoints_for_provider,
27    get_available_endpoints, get_hetzner_regions_dynamic, get_hetzner_server_types_dynamic,
28    get_provider_deployment_statuses, match_env_vars_to_services, parse_env_file,
29    recommend_deployment,
30};
31use std::process::Command;
32
33/// Cached Hetzner availability data for smart recommendations
34struct HetznerAvailabilityData {
35    regions: Vec<DynamicCloudRegion>,
36    server_types: Vec<DynamicMachineType>,
37}
38
39/// A single secret/env var key input for the deploy tool
40#[derive(Debug, Deserialize)]
41pub struct SecretKeyInput {
42    /// Environment variable name
43    pub key: String,
44    /// Value to set. OMIT for secrets — user will be prompted in terminal.
45    pub value: Option<String>,
46    /// Whether this is a secret (default: true for safety)
47    #[serde(default = "default_true")]
48    pub is_secret: bool,
49}
50
51/// Arguments for the deploy service tool
52#[derive(Debug, Deserialize)]
53pub struct DeployServiceArgs {
54    /// Optional: specific subdirectory/service to deploy (for monorepos)
55    pub path: Option<String>,
56    /// Optional: override recommended provider (gcp, hetzner, azure)
57    pub provider: Option<String>,
58    /// Optional: override machine type selection
59    pub machine_type: Option<String>,
60    /// Optional: override region selection
61    pub region: Option<String>,
62    /// Optional: override detected port
63    pub port: Option<u16>,
64    /// Whether to make the service publicly accessible (default: false for safety)
65    /// Internal services can only be accessed within the cluster/network
66    #[serde(default)]
67    pub is_public: bool,
68    /// Optional: CPU allocation (for GCP Cloud Run / Azure ACA)
69    pub cpu: Option<String>,
70    /// Optional: Memory allocation (for GCP Cloud Run / Azure ACA)
71    pub memory: Option<String>,
72    /// Optional: min instances/replicas
73    pub min_instances: Option<i32>,
74    /// Optional: max instances/replicas
75    pub max_instances: Option<i32>,
76    /// If true (default), show recommendation but don't deploy yet
77    /// If false with settings, deploy immediately
78    #[serde(default = "default_preview")]
79    pub preview_only: bool,
80    /// Optional: environment variable keys to set during deployment.
81    /// For secrets (is_secret=true), values are collected via terminal prompt.
82    /// For non-secrets, include the value directly.
83    pub secret_keys: Option<Vec<SecretKeyInput>>,
84}
85
86fn default_preview() -> bool {
87    true
88}
89
90/// Error type for deploy service operations
91#[derive(Debug, thiserror::Error)]
92#[error("Deploy service error: {0}")]
93pub struct DeployServiceError(String);
94
95/// Tool to analyze a project and deploy it with intelligent recommendations
96///
97/// Provides an end-to-end deployment experience:
98/// 1. Analyzes the project (language, framework, ports, health endpoints)
99/// 2. Checks available deployment capabilities
100/// 3. Generates smart recommendations with reasoning
101/// 4. Shows a preview for user confirmation
102/// 5. Creates deployment config and triggers deployment
103#[derive(Debug, Clone)]
104pub struct DeployServiceTool {
105    project_path: PathBuf,
106    execution_context: ExecutionContext,
107}
108
109impl DeployServiceTool {
110    /// Create a new DeployServiceTool (defaults to InteractiveCli)
111    pub fn new(project_path: PathBuf) -> Self {
112        Self {
113            project_path,
114            execution_context: ExecutionContext::InteractiveCli,
115        }
116    }
117
118    /// Create with explicit execution context
119    pub fn with_context(project_path: PathBuf, ctx: ExecutionContext) -> Self {
120        Self {
121            project_path,
122            execution_context: ctx,
123        }
124    }
125}
126
127impl Tool for DeployServiceTool {
128    const NAME: &'static str = "deploy_service";
129
130    type Error = DeployServiceError;
131    type Args = DeployServiceArgs;
132    type Output = String;
133
134    async fn definition(&self, _prompt: String) -> ToolDefinition {
135        ToolDefinition {
136            name: Self::NAME.to_string(),
137            description: r#"Analyze a project and deploy it with intelligent recommendations.
138
139This tool provides an end-to-end deployment experience:
1401. Analyzes the project to detect language, framework, ports, and health endpoints
1412. Checks available deployment capabilities (providers, clusters, registries)
1423. Generates smart recommendations with reasoning
1434. Shows a preview for user confirmation
1445. Creates deployment config and triggers deployment
145
146**Default behavior (preview_only=true):**
147Returns analysis and recommendations. User should confirm before actual deployment.
148
149**Direct deployment (preview_only=false):**
150Uses provided overrides or recommendation defaults to deploy immediately.
151
152**Parameters:**
153- path: Optional subdirectory for monorepo services
154- provider: Override recommendation (gcp, hetzner, azure)
155- machine_type: Override machine selection (e.g., cx22, e2-small)
156- region: Override region selection (e.g., nbg1, us-central1)
157- port: Override detected port
158- is_public: Whether service should be publicly accessible (default: false)
159- preview_only: If true (default), show recommendation only
160
161**IMPORTANT - Public vs Internal:**
162- is_public=false (default): Service is internal-only, not accessible from internet
163- is_public=true: Service gets a public URL, accessible from anywhere
164- ALWAYS show this in the preview and ask user before deploying public services
165
166**What it analyzes:**
167- Programming language and framework
168- Port configuration from source code, package.json, Dockerfiles
169- Health check endpoints (/health, /healthz, etc.)
170- Existing infrastructure (K8s manifests, Helm charts)
171
172**Recommendation reasoning includes:**
173- Why a specific provider was chosen
174- Why a machine type fits the workload (based on memory requirements)
175- Where the port was detected from
176- Confidence level in the recommendation
177
178**Example flow:**
179User: "deploy this service"
1801. Call with preview_only=true → Shows recommendation
1812. User: "yes, deploy it" → Call with preview_only=false to deploy
1823. User: "make it public" → Call with preview_only=true AND is_public=true to show NEW preview
1834. User: "yes" → NOW call with preview_only=false to deploy
184
185**CRITICAL - Human in the loop:**
186- NEVER deploy (preview_only=false) immediately after user requests a CHANGE
187- If user says "make it public", "use GCP", "change region", etc. → show NEW preview first
188- Only deploy after user explicitly confirms the final settings with "yes", "deploy", "confirm"
189- A change request is NOT a deployment confirmation
190
191**Multiple cloud providers:**
192- The response includes connected_providers listing ALL connected providers (e.g. Hetzner AND Azure)
193- ALWAYS mention all connected providers to the user, not just the recommended one
194- The user can override the provider with the provider parameter
195- If deploying related services, consider whether they should be on the same provider for private networking
196
197**Deployed service endpoints:**
198- The response includes deployed_service_endpoints showing services already running in the project
199- Services may have public URLs (reachable from anywhere) or private IPs (only reachable from the same cloud provider network)
200- endpoint_suggestions maps detected env vars to deployed services (e.g. SENTIMENT_SERVICE_URL -> sentiment-analysis)
201- Private endpoints are pre-filtered to only show services on the same provider network
202- ALWAYS mention available endpoints when deploying services that have env vars matching deployed services
203
204**Private networks (project_networks):**
205- The response includes project_networks showing provisioned VPCs/networks for the target provider
206- Each network includes connection_details with key/value pairs (VPC_ID, SUBNET_ID, DEFAULT_DOMAIN, etc.)
207- If networks have useful connection details (e.g., a default domain, VPC connector), mention them to the user
208- Ask the user if they want to inject any network details as environment variables
209- Network details are NOT secrets — they are infrastructure identifiers
210- Private networks enable service-to-service communication on the same provider
211
212**Environment variables (secret_keys) and .env files:**
213- The preview response includes parsed_env_files: discovered .env files with their parsed keys/values
214- If .env files are found, ALWAYS ask the user: "I found a .env file with N variables. Should I inject these into the deployment?"
215- For non-secret vars from .env files, pass them as secret_keys with is_secret=false and include the value
216- For secret vars (API keys, tokens, passwords), pass them as secret_keys with is_secret=true and omit the value — the user is prompted securely in the terminal
217- Secret values from .env files are NEVER included in parsed_env_files or this conversation
218- If no .env files found but detected_env_vars exist, mention those and ask user how to provide them
219- NEVER ask the user to type secret values in chat
220
221**Prerequisites:**
222- User must be authenticated (sync-ctl auth login)
223- A project must be selected (use select_project first)
224- Provider must be connected (check with list_deployment_capabilities)"#
225                .to_string(),
226            parameters: json!({
227                "type": "object",
228                "properties": {
229                    "path": {
230                        "type": "string",
231                        "description": "Subdirectory to deploy (for monorepos)"
232                    },
233                    "provider": {
234                        "type": "string",
235                        "enum": ["gcp", "hetzner", "azure"],
236                        "description": "Override: cloud provider"
237                    },
238                    "machine_type": {
239                        "type": "string",
240                        "description": "Override: machine type (e.g., cx22, e2-small)"
241                    },
242                    "region": {
243                        "type": "string",
244                        "description": "Override: deployment region"
245                    },
246                    "port": {
247                        "type": "integer",
248                        "description": "Override: port to expose"
249                    },
250                    "is_public": {
251                        "type": "boolean",
252                        "description": "Whether service should be publicly accessible. Default: false (internal only). Set to true for public URL."
253                    },
254                    "preview_only": {
255                        "type": "boolean",
256                        "description": "If true (default), show recommendation only. If false, deploy."
257                    },
258                    "secret_keys": {
259                        "type": "array",
260                        "description": "Env vars to include in deployment. For secrets, omit value \u{2014} user is prompted in terminal.",
261                        "items": {
262                            "type": "object",
263                            "properties": {
264                                "key": {
265                                    "type": "string",
266                                    "description": "Environment variable name"
267                                },
268                                "value": {
269                                    "type": "string",
270                                    "description": "Omit for secrets \u{2014} user will be prompted securely in terminal."
271                                },
272                                "is_secret": {
273                                    "type": "boolean",
274                                    "description": "Whether this is a secret (default: true). Secrets are prompted in terminal.",
275                                    "default": true
276                                }
277                            },
278                            "required": ["key"]
279                        }
280                    }
281                }
282            }),
283        }
284    }
285
286    async fn call(&self, args: Self::Args) -> Result<Self::Output, Self::Error> {
287        // 1. Determine analysis path
288        let analysis_path = if let Some(ref subpath) = args.path {
289            self.project_path.join(subpath)
290        } else {
291            self.project_path.clone()
292        };
293
294        // Validate path exists
295        if !analysis_path.exists() {
296            return Ok(format_error_for_llm(
297                "deploy_service",
298                ErrorCategory::FileNotFound,
299                &format!("Path not found: {}", analysis_path.display()),
300                Some(vec![
301                    "Check if the path exists",
302                    "Use list_directory to explore",
303                ]),
304            ));
305        }
306
307        // 2. Run project analysis
308        let config = AnalysisConfig {
309            deep_analysis: true,
310            ..Default::default()
311        };
312
313        let analysis = match analyze_project_with_config(&analysis_path, &config) {
314            Ok(a) => a,
315            Err(e) => {
316                return Ok(format_error_for_llm(
317                    "deploy_service",
318                    ErrorCategory::InternalError,
319                    &format!("Analysis failed: {}", e),
320                    Some(vec!["Check if the directory contains a valid project"]),
321                ));
322            }
323        };
324
325        // 3. Get API client and context
326        let client = match PlatformApiClient::new() {
327            Ok(c) => c,
328            Err(_) => {
329                return Ok(format_error_for_llm(
330                    "deploy_service",
331                    ErrorCategory::PermissionDenied,
332                    "Not authenticated",
333                    Some(vec!["Run: sync-ctl auth login"]),
334                ));
335            }
336        };
337
338        // Load platform session for context
339        let session = match PlatformSession::load() {
340            Ok(s) => s,
341            Err(_) => {
342                return Ok(format_error_for_llm(
343                    "deploy_service",
344                    ErrorCategory::InternalError,
345                    "Failed to load platform session",
346                    Some(vec!["Try selecting a project with select_project"]),
347                ));
348            }
349        };
350
351        if !session.is_project_selected() {
352            return Ok(format_error_for_llm(
353                "deploy_service",
354                ErrorCategory::ValidationFailed,
355                "No project selected",
356                Some(vec!["Use select_project to choose a project first"]),
357            ));
358        }
359
360        let project_id = session.project_id.clone().unwrap_or_default();
361        let environment_id = session.environment_id.clone();
362
363        // 4. Check for existing deployment configs (duplicate detection)
364        let existing_configs = match client.list_deployment_configs(&project_id).await {
365            Ok(configs) => configs,
366            Err(e) => {
367                // Non-fatal - continue without duplicate detection
368                tracing::warn!("Failed to fetch existing configs: {}", e);
369                Vec::new()
370            }
371        };
372
373        // Get service name early to check for duplicates
374        let service_name = get_service_name(&analysis_path);
375
376        // Find existing config with same service name
377        let existing_config = existing_configs
378            .iter()
379            .find(|c| c.service_name.eq_ignore_ascii_case(&service_name));
380
381        // 5. Get environment info for display
382        let environments: Vec<crate::platform::api::types::Environment> = client
383            .list_environments(&project_id)
384            .await
385            .unwrap_or_default();
386
387        // Resolve environment name for display
388        let (resolved_env_id, resolved_env_name, is_production) =
389            if let Some(ref env_id) = environment_id {
390                let env = environments.iter().find(|e| e.id == *env_id);
391                let name = env
392                    .map(|e| e.name.clone())
393                    .unwrap_or_else(|| "Unknown".to_string());
394                let is_prod = name.to_lowercase().contains("prod");
395                (env_id.clone(), name, is_prod)
396            } else if let Some(existing) = &existing_config {
397                // Use the environment from existing config
398                let env = environments
399                    .iter()
400                    .find(|e| e.id == existing.environment_id);
401                let name = env
402                    .map(|e| e.name.clone())
403                    .unwrap_or_else(|| "Unknown".to_string());
404                let is_prod = name.to_lowercase().contains("prod");
405                (existing.environment_id.clone(), name, is_prod)
406            } else if let Some(first_env) = environments.first() {
407                let is_prod = first_env.name.to_lowercase().contains("prod");
408                (first_env.id.clone(), first_env.name.clone(), is_prod)
409            } else {
410                ("".to_string(), "No environment".to_string(), false)
411            };
412
413        // 6. Get available providers
414        let capabilities = match get_provider_deployment_statuses(&client, &project_id).await {
415            Ok(c) => c,
416            Err(e) => {
417                return Ok(format_error_for_llm(
418                    "deploy_service",
419                    ErrorCategory::NetworkError,
420                    &format!("Failed to get deployment capabilities: {}", e),
421                    None,
422                ));
423            }
424        };
425
426        // Check if any provider is available
427        let available_providers: Vec<CloudProvider> = capabilities
428            .iter()
429            .filter(|s| s.provider.is_available() && s.is_connected)
430            .map(|s| s.provider.clone())
431            .collect();
432
433        if available_providers.is_empty() {
434            return Ok(format_error_for_llm(
435                "deploy_service",
436                ErrorCategory::ResourceUnavailable,
437                "No cloud providers connected",
438                Some(vec![
439                    "Connect a cloud provider (GCP, Hetzner, or Azure) in platform settings",
440                    "Use open_provider_settings to configure a provider",
441                ]),
442            ));
443        }
444
445        // 5. Check for existing K8s clusters
446        let has_existing_k8s = capabilities.iter().any(|s| !s.clusters.is_empty());
447
448        // 6. Generate recommendation
449        let recommendation_input = RecommendationInput {
450            analysis: analysis.clone(),
451            available_providers: available_providers.clone(),
452            has_existing_k8s,
453            user_region_hint: args.region.clone(),
454        };
455
456        let recommendation = recommend_deployment(recommendation_input);
457
458        // 6.5. For Hetzner deployments, fetch real-time availability and update recommendations
459        // We require real-time data - no static fallback allowed
460        let final_provider_for_check = args
461            .provider
462            .as_ref()
463            .and_then(|p| CloudProvider::from_str(p).ok())
464            .unwrap_or(recommendation.provider.clone());
465
466        // Store Hetzner availability data for smart recommendations
467        let mut hetzner_availability: Option<HetznerAvailabilityData> = None;
468
469        if final_provider_for_check == CloudProvider::Hetzner {
470            // Fetch real-time Hetzner regions and server types
471            let regions = match get_hetzner_regions_dynamic(&client, &project_id).await {
472                HetznerFetchResult::Success(r) if !r.is_empty() => r,
473                HetznerFetchResult::Success(_) => {
474                    return Ok(format_error_for_llm(
475                        "deploy_service",
476                        ErrorCategory::ResourceUnavailable,
477                        "No Hetzner regions available",
478                        Some(vec![
479                            "Check your Hetzner account status",
480                            "Use list_hetzner_availability to see current availability",
481                        ]),
482                    ));
483                }
484                HetznerFetchResult::NoCredentials => {
485                    return Ok(format_error_for_llm(
486                        "deploy_service",
487                        ErrorCategory::PermissionDenied,
488                        "Cannot recommend Hetzner deployment: Hetzner credentials not configured",
489                        Some(vec![
490                            "Add your Hetzner API token in project settings",
491                            "Use open_provider_settings to configure Hetzner",
492                            "Or specify a different provider (e.g., provider='gcp')",
493                        ]),
494                    ));
495                }
496                HetznerFetchResult::ApiError(err) => {
497                    return Ok(format_error_for_llm(
498                        "deploy_service",
499                        ErrorCategory::NetworkError,
500                        &format!(
501                            "Cannot recommend Hetzner deployment: Failed to fetch availability - {}",
502                            err
503                        ),
504                        Some(vec![
505                            "Use list_hetzner_availability to check current status",
506                            "Or specify a different provider (e.g., provider='gcp')",
507                        ]),
508                    ));
509                }
510            };
511
512            // Fetch server types with optional location filter
513            let server_types = match get_hetzner_server_types_dynamic(
514                &client,
515                &project_id,
516                args.region.as_deref(),
517            )
518            .await
519            {
520                HetznerFetchResult::Success(s) if !s.is_empty() => s,
521                HetznerFetchResult::Success(_) => {
522                    return Ok(format_error_for_llm(
523                        "deploy_service",
524                        ErrorCategory::ResourceUnavailable,
525                        "No Hetzner server types available",
526                        Some(vec![
527                            "Check your Hetzner account status",
528                            "Use list_hetzner_availability to see current availability",
529                        ]),
530                    ));
531                }
532                HetznerFetchResult::NoCredentials => {
533                    return Ok(format_error_for_llm(
534                        "deploy_service",
535                        ErrorCategory::PermissionDenied,
536                        "Cannot recommend Hetzner deployment: Hetzner credentials not configured",
537                        Some(vec![
538                            "Add your Hetzner API token in project settings",
539                            "Use open_provider_settings to configure Hetzner",
540                        ]),
541                    ));
542                }
543                HetznerFetchResult::ApiError(err) => {
544                    return Ok(format_error_for_llm(
545                        "deploy_service",
546                        ErrorCategory::NetworkError,
547                        &format!(
548                            "Cannot recommend Hetzner deployment: Failed to fetch server types - {}",
549                            err
550                        ),
551                        Some(vec![
552                            "Use list_hetzner_availability to check current status",
553                        ]),
554                    ));
555                }
556            };
557
558            // Store for later use in recommendations
559            hetzner_availability = Some(HetznerAvailabilityData {
560                regions,
561                server_types,
562            });
563        }
564
565        // 7. Extract analysis summary
566        let primary_language = analysis
567            .languages
568            .first()
569            .map(|l| l.name.clone())
570            .unwrap_or_else(|| "Unknown".to_string());
571
572        let primary_framework = analysis
573            .technologies
574            .iter()
575            .find(|t| {
576                matches!(
577                    t.category,
578                    TechnologyCategory::BackendFramework | TechnologyCategory::MetaFramework
579                )
580            })
581            .map(|t| t.name.clone())
582            .unwrap_or_else(|| "None detected".to_string());
583
584        let has_dockerfile = analysis
585            .docker_analysis
586            .as_ref()
587            .map(|d| !d.dockerfiles.is_empty())
588            .unwrap_or(false);
589
590        let has_k8s = analysis
591            .infrastructure
592            .as_ref()
593            .map(|i| i.has_kubernetes)
594            .unwrap_or(false);
595
596        // 10. If preview_only, return recommendation
597        if args.preview_only {
598            // Build the deployment mode info
599            let (deployment_mode, mode_explanation, next_steps) = if let Some(existing) =
600                &existing_config
601            {
602                (
603                    "REDEPLOY",
604                    format!(
605                        "Service '{}' already has a deployment config (ID: {}). Deploying will trigger a REDEPLOY of the existing service.",
606                        existing.service_name, existing.id
607                    ),
608                    vec![
609                        "To redeploy with current config: call deploy_service with preview_only=false".to_string(),
610                        "This will trigger a new deployment of the existing service".to_string(),
611                        "The existing configuration will be used".to_string(),
612                    ]
613                )
614            } else {
615                (
616                    "NEW_DEPLOYMENT",
617                    format!(
618                        "No existing deployment config found for '{}'. This will create a NEW deployment configuration.",
619                        service_name
620                    ),
621                    vec![
622                        "To deploy with these settings: call deploy_service with preview_only=false".to_string(),
623                        "To customize: specify provider, machine_type, region, or port parameters".to_string(),
624                        "Check parsed_env_files — if .env files were found, ask user whether to inject them as secret_keys".to_string(),
625                        "To see more options: check the hetzner_availability section for current pricing".to_string(),
626                    ]
627                )
628            };
629
630            // Production warning
631            let production_warning = if is_production {
632                Some(
633                    "⚠️  WARNING: This will deploy to PRODUCTION environment. Please confirm you intend to deploy to production.",
634                )
635            } else {
636                None
637            };
638
639            // For Hetzner, use real-time availability to select best options
640            let (
641                final_machine_type,
642                final_region,
643                machine_reasoning,
644                region_reasoning,
645                price_monthly,
646            ) = if let Some(ref hetzner) = hetzner_availability {
647                // SMART SELECTION: Find the best region + machine combination
648                // Strategy: Find cheapest machine with 4GB+ that's actually available somewhere
649
650                // First, find all server types that are actually available (non-empty available_in)
651                let available_types: Vec<_> = hetzner
652                    .server_types
653                    .iter()
654                    .filter(|st| !st.available_in.is_empty())
655                    .collect();
656
657                // If user specified a region, check if anything is available there
658                let user_region = args.region.as_deref();
659
660                // Find best machine: cheapest with 4GB+ that's available
661                let best_machine_with_region = if let Some(region) = user_region {
662                    // User specified region - find best machine for that region
663                    available_types
664                        .iter()
665                        .filter(|st| {
666                            st.memory_gb >= 4.0 && st.available_in.contains(&region.to_string())
667                        })
668                        .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
669                        .map(|st| (*st, region.to_string()))
670                        .or_else(|| {
671                            // No 4GB+ available in that region, try any machine
672                            available_types
673                                .iter()
674                                .filter(|st| st.available_in.contains(&region.to_string()))
675                                .min_by(|a, b| {
676                                    a.price_monthly.partial_cmp(&b.price_monthly).unwrap()
677                                })
678                                .map(|st| (*st, region.to_string()))
679                        })
680                } else {
681                    // No region specified - find globally cheapest 4GB+ machine and use its best region
682                    available_types
683                        .iter()
684                        .filter(|st| st.memory_gb >= 4.0)
685                        .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
686                        .map(|st| {
687                            // Pick the first available region for this machine
688                            let region = st
689                                .available_in
690                                .first()
691                                .cloned()
692                                .unwrap_or_else(|| "nbg1".to_string());
693                            (*st, region)
694                        })
695                        .or_else(|| {
696                            // No 4GB+ available anywhere, find any cheapest machine
697                            available_types
698                                .iter()
699                                .min_by(|a, b| {
700                                    a.price_monthly.partial_cmp(&b.price_monthly).unwrap()
701                                })
702                                .map(|st| {
703                                    let region = st
704                                        .available_in
705                                        .first()
706                                        .cloned()
707                                        .unwrap_or_else(|| "nbg1".to_string());
708                                    (*st, region)
709                                })
710                        })
711                };
712
713                if let Some((machine, region_id)) = best_machine_with_region {
714                    let region_name = hetzner
715                        .regions
716                        .iter()
717                        .find(|r| r.id == region_id)
718                        .map(|r| format!("{}, {}", r.name, r.location))
719                        .unwrap_or_else(|| region_id.clone());
720
721                    let available_count = hetzner
722                        .regions
723                        .iter()
724                        .find(|r| r.id == region_id)
725                        .map(|r| r.available_server_types.len())
726                        .unwrap_or(0);
727
728                    (
729                        args.machine_type
730                            .clone()
731                            .unwrap_or_else(|| machine.id.clone()),
732                        region_id.clone(),
733                        format!(
734                            "Selected {} ({} vCPU, {:.0} GB RAM) - cheapest AVAILABLE option at €{:.2}/mo",
735                            machine.id, machine.cores, machine.memory_gb, machine.price_monthly
736                        ),
737                        format!(
738                            "Selected {} ({}) - {} server types available",
739                            region_id, region_name, available_count
740                        ),
741                        Some(machine.price_monthly),
742                    )
743                } else {
744                    // No server types available anywhere - this shouldn't happen if we passed validation
745                    (
746                        args.machine_type
747                            .clone()
748                            .unwrap_or_else(|| recommendation.machine_type.clone()),
749                        args.region
750                            .clone()
751                            .unwrap_or_else(|| recommendation.region.clone()),
752                        "WARNING: No server types currently available - using fallback".to_string(),
753                        "Using fallback region".to_string(),
754                        None,
755                    )
756                }
757            } else {
758                // Non-Hetzner provider - use static recommendation
759                (
760                    args.machine_type
761                        .clone()
762                        .unwrap_or_else(|| recommendation.machine_type.clone()),
763                    args.region
764                        .clone()
765                        .unwrap_or_else(|| recommendation.region.clone()),
766                    recommendation.machine_reasoning.clone(),
767                    recommendation.region_reasoning.clone(),
768                    None,
769                )
770            };
771
772            // Build availability info for response
773            let hetzner_availability_info = hetzner_availability.as_ref().map(|h| {
774                json!({
775                    "regions": h.regions.iter().map(|r| json!({
776                        "id": r.id,
777                        "name": r.name,
778                        "country": r.location,
779                        "available_server_types_count": r.available_server_types.len(),
780                    })).collect::<Vec<_>>(),
781                    "server_types": h.server_types.iter().take(10).map(|st| json!({
782                        "id": st.id,
783                        "cores": st.cores,
784                        "memory_gb": st.memory_gb,
785                        "price_monthly_eur": st.price_monthly,
786                        "available_in": st.available_in,
787                    })).collect::<Vec<_>>(),
788                    "cheapest_4gb": h.server_types.iter()
789                        .filter(|st| st.memory_gb >= 4.0)
790                        .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
791                        .map(|st| json!({
792                            "id": st.id,
793                            "specs": format!("{} vCPU, {:.0} GB RAM", st.cores, st.memory_gb),
794                            "price_monthly_eur": st.price_monthly,
795                        })),
796                })
797            });
798
799            // Discover .env files and parse their contents for agent surfacing
800            let discovered_env_files_raw = discover_env_files(&analysis_path);
801            let discovered_env_file_paths: Vec<String> = discovered_env_files_raw
802                .iter()
803                .map(|p| p.display().to_string())
804                .collect();
805
806            // Parse each .env file so the LLM can present keys to the user
807            let parsed_env_files: Vec<serde_json::Value> = discovered_env_files_raw
808                .iter()
809                .filter_map(|rel_path| {
810                    let abs_path = analysis_path.join(rel_path);
811                    match parse_env_file(&abs_path) {
812                        Ok(entries) if !entries.is_empty() => Some(json!({
813                            "file": rel_path.display().to_string(),
814                            "variable_count": entries.len(),
815                            "variables": entries.iter().map(|e| json!({
816                                "key": e.key,
817                                "is_secret": e.is_secret,
818                                // Only include values for non-secret vars — secrets are
819                                // never exposed to the LLM conversation.
820                                "value": if e.is_secret { None } else { Some(&e.value) },
821                            })).collect::<Vec<_>>(),
822                        })),
823                        Ok(_) => None, // empty file
824                        Err(e) => {
825                            tracing::debug!("Could not parse env file {:?}: {}", rel_path, e);
826                            None
827                        }
828                    }
829                })
830                .collect();
831
832            // Fetch deployed services and compute endpoint suggestions
833            let deployed_endpoints = match client.list_deployments(&project_id, Some(50)).await {
834                Ok(paginated) => get_available_endpoints(&paginated.data),
835                Err(e) => {
836                    tracing::debug!("Could not fetch deployments for endpoint matching: {}", e);
837                    Vec::new()
838                }
839            };
840            let deployed_endpoints: Vec<_> = deployed_endpoints
841                .into_iter()
842                .filter(|ep| ep.service_name != service_name)
843                .collect();
844            // Only show private endpoints from the same cloud provider — private
845            // IPs are not reachable across different provider networks.
846            let deployed_endpoints = filter_endpoints_for_provider(
847                deployed_endpoints,
848                final_provider_for_check.as_str(),
849            );
850
851            let detected_env_var_names: Vec<String> = analysis
852                .environment_variables
853                .iter()
854                .map(|e| e.name.clone())
855                .collect();
856
857            let endpoint_suggestions =
858                match_env_vars_to_services(&detected_env_var_names, &deployed_endpoints);
859
860            // Fetch project networks for the target provider
861            let project_networks = match client.list_project_networks(&project_id).await {
862                Ok(nets) => nets,
863                Err(e) => {
864                    tracing::debug!("Could not fetch project networks: {}", e);
865                    Vec::new()
866                }
867            };
868
869            let network_endpoints = extract_network_endpoints(
870                &project_networks,
871                final_provider_for_check.as_str(),
872                Some(&resolved_env_id),
873            );
874
875            let response = json!({
876                "status": "recommendation",
877                "deployment_mode": deployment_mode,
878                "mode_explanation": mode_explanation,
879                "environment": {
880                    "id": resolved_env_id,
881                    "name": resolved_env_name,
882                    "is_production": is_production,
883                },
884                "connected_providers": capabilities.iter()
885                    .filter(|s| s.provider.is_available() && s.is_connected)
886                    .map(|s| json!({
887                        "provider": s.provider.as_str(),
888                        "display_name": s.provider.display_name(),
889                        "cloud_runner_available": s.cloud_runner_available,
890                        "clusters": s.clusters.len(),
891                        "registries": s.registries.len(),
892                        "summary": s.summary,
893                    }))
894                    .collect::<Vec<_>>(),
895                "production_warning": production_warning,
896                "existing_config": existing_config.map(|c| json!({
897                    "id": c.id,
898                    "service_name": c.service_name,
899                    "environment_id": c.environment_id,
900                    "branch": c.branch,
901                    "port": c.port,
902                    "auto_deploy_enabled": c.auto_deploy_enabled,
903                    "created_at": c.created_at.to_rfc3339(),
904                })),
905                "analysis": {
906                    "path": analysis_path.display().to_string(),
907                    "language": primary_language,
908                    "framework": primary_framework,
909                    "detected_port": recommendation.port,
910                    "port_source": recommendation.port_source,
911                    "health_endpoint": recommendation.health_check_path,
912                    "has_dockerfile": has_dockerfile,
913                    "has_kubernetes": has_k8s,
914                    "detected_env_vars": analysis.environment_variables.iter().map(|e| json!({
915                        "name": e.name,
916                        "required": e.required,
917                        "has_default": e.default_value.is_some(),
918                        "description": e.description,
919                    })).collect::<Vec<_>>(),
920                },
921                "recommendation": {
922                    "provider": recommendation.provider.as_str(),
923                    "provider_reasoning": recommendation.provider_reasoning,
924                    "target": recommendation.target.as_str(),
925                    "target_reasoning": recommendation.target_reasoning,
926                    "machine_type": final_machine_type,
927                    "machine_reasoning": machine_reasoning,
928                    "region": final_region,
929                    "region_reasoning": region_reasoning,
930                    "price_monthly_eur": price_monthly,
931                    "port": recommendation.port,
932                    "health_check_path": recommendation.health_check_path,
933                    "is_public": args.is_public,
934                    "is_public_note": if args.is_public {
935                        "Service will be PUBLICLY accessible from the internet"
936                    } else {
937                        "Service will be INTERNAL only (not accessible from internet)"
938                    },
939                    "confidence": recommendation.confidence,
940                    "availability_source": if hetzner_availability.is_some() { "real-time" } else { "static" },
941                },
942                "hetzner_availability": hetzner_availability_info,
943                "alternatives": {
944                    "providers": recommendation.alternatives.providers.iter().map(|p| json!({
945                        "provider": p.provider.as_str(),
946                        "available": p.available,
947                        "reason_if_unavailable": p.reason_if_unavailable,
948                    })).collect::<Vec<_>>(),
949                    "machine_types": if hetzner_availability.is_some() {
950                        // Use real-time data for Hetzner
951                        hetzner_availability.as_ref().unwrap().server_types.iter().take(6).map(|st| json!({
952                            "machine_type": st.id,
953                            "vcpu": st.cores,
954                            "memory_gb": st.memory_gb,
955                            "price_monthly_eur": st.price_monthly,
956                            "available_in": st.available_in,
957                        })).collect::<Vec<_>>()
958                    } else {
959                        recommendation.alternatives.machine_types.iter().map(|m| json!({
960                            "machine_type": m.machine_type,
961                            "vcpu": m.vcpu,
962                            "memory_gb": m.memory_gb,
963                            "description": m.description,
964                        })).collect::<Vec<_>>()
965                    },
966                    "regions": if hetzner_availability.is_some() {
967                        // Use real-time data for Hetzner
968                        hetzner_availability.as_ref().unwrap().regions.iter().map(|r| json!({
969                            "region": r.id,
970                            "display_name": format!("{}, {}", r.name, r.location),
971                            "available_server_types_count": r.available_server_types.len(),
972                        })).collect::<Vec<_>>()
973                    } else {
974                        recommendation.alternatives.regions.iter().map(|r| json!({
975                            "region": r.region,
976                            "display_name": r.display_name,
977                        })).collect::<Vec<_>>()
978                    },
979                },
980                "service_name": service_name,
981                "discovered_env_files": discovered_env_file_paths,
982                "parsed_env_files": parsed_env_files,
983                "deployed_service_endpoints": deployed_endpoints.iter().map(|ep| json!({
984                    "service_name": ep.service_name,
985                    "url": ep.url,
986                    "is_private": ep.is_private,
987                    "status": ep.status,
988                })).collect::<Vec<_>>(),
989                "endpoint_suggestions": endpoint_suggestions.iter().map(|s| json!({
990                    "env_var": s.env_var_name,
991                    "service_name": s.service.service_name,
992                    "url": s.service.url,
993                    "is_private": s.service.is_private,
994                    "confidence": format!("{:?}", s.confidence),
995                    "reason": s.reason,
996                })).collect::<Vec<_>>(),
997                "project_networks": network_endpoints.iter().map(|ne| json!({
998                    "network_id": ne.network_id,
999                    "cloud_provider": ne.cloud_provider,
1000                    "region": ne.region,
1001                    "status": ne.status,
1002                    "environment_id": ne.environment_id,
1003                    "connection_details": ne.connection_details.iter().map(|(k, v)| json!({
1004                        "key": k,
1005                        "value": v,
1006                        "suggested_env_var": k,
1007                    })).collect::<Vec<_>>(),
1008                })).collect::<Vec<_>>(),
1009                "next_steps": next_steps,
1010                "confirmation_prompt": if existing_config.is_some() {
1011                    format!(
1012                        "REDEPLOY '{}' to {} environment?{}",
1013                        service_name,
1014                        resolved_env_name,
1015                        if is_production { " ⚠️  (PRODUCTION)" } else { "" }
1016                    )
1017                } else {
1018                    let price_info = price_monthly.map(|p| format!(" (€{:.2}/mo)", p)).unwrap_or_default();
1019                    format!(
1020                        "Deploy NEW service '{}' to {} ({}) with {}{} in {} on {} environment?{}",
1021                        service_name,
1022                        recommendation.provider.display_name(),
1023                        recommendation.target.display_name(),
1024                        final_machine_type,
1025                        price_info,
1026                        final_region,
1027                        resolved_env_name,
1028                        if is_production { " ⚠️  (PRODUCTION)" } else { "" }
1029                    )
1030                },
1031            });
1032
1033            return serde_json::to_string_pretty(&response)
1034                .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)));
1035        }
1036
1037        // 11. Execute deployment - EITHER redeploy existing OR create new
1038
1039        // If existing config found, trigger redeploy instead of creating new config
1040        if let Some(existing) = &existing_config {
1041            let trigger_request = TriggerDeploymentRequest {
1042                project_id: project_id.clone(),
1043                config_id: existing.id.clone(),
1044                commit_sha: None,
1045            };
1046
1047            return match client.trigger_deployment(&trigger_request).await {
1048                Ok(response) => {
1049                    let result = json!({
1050                        "status": "redeployed",
1051                        "deployment_mode": "REDEPLOY",
1052                        "config_id": existing.id,
1053                        "task_id": response.backstage_task_id,
1054                        "service_name": service_name,
1055                        "environment": {
1056                            "id": resolved_env_id,
1057                            "name": resolved_env_name,
1058                            "is_production": is_production,
1059                        },
1060                        "message": format!(
1061                            "Redeploy triggered for existing service '{}' on {} environment. Task ID: {}",
1062                            service_name, resolved_env_name, response.backstage_task_id
1063                        ),
1064                        "next_steps": [
1065                            format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id),
1066                            "View logs after deployment: use get_service_logs",
1067                        ],
1068                    });
1069
1070                    serde_json::to_string_pretty(&result)
1071                        .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)))
1072                }
1073                Err(e) => Ok(format_api_error("deploy_service", e)),
1074            };
1075        }
1076
1077        // NEW DEPLOYMENT PATH - no existing config found
1078        let final_provider = args
1079            .provider
1080            .as_ref()
1081            .and_then(|p| CloudProvider::from_str(p).ok())
1082            .unwrap_or(recommendation.provider.clone());
1083
1084        // For Hetzner, use real-time availability data to select best options
1085        let (final_machine, final_region) = if let Some(ref hetzner) = hetzner_availability {
1086            // SMART SELECTION: Same logic as preview
1087
1088            // Find all server types that are actually available (non-empty available_in)
1089            let available_types: Vec<_> = hetzner
1090                .server_types
1091                .iter()
1092                .filter(|st| !st.available_in.is_empty())
1093                .collect();
1094
1095            let user_region = args.region.as_deref();
1096
1097            // Find best machine: cheapest with 4GB+ that's available
1098            let best_machine_with_region = if let Some(region) = user_region {
1099                // User specified region - find best machine for that region
1100                available_types
1101                    .iter()
1102                    .filter(|st| {
1103                        st.memory_gb >= 4.0 && st.available_in.contains(&region.to_string())
1104                    })
1105                    .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1106                    .map(|st| (st.id.clone(), region.to_string()))
1107                    .or_else(|| {
1108                        available_types
1109                            .iter()
1110                            .filter(|st| st.available_in.contains(&region.to_string()))
1111                            .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1112                            .map(|st| (st.id.clone(), region.to_string()))
1113                    })
1114            } else {
1115                // No region specified - find globally cheapest 4GB+ machine
1116                available_types
1117                    .iter()
1118                    .filter(|st| st.memory_gb >= 4.0)
1119                    .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1120                    .map(|st| {
1121                        let region = st
1122                            .available_in
1123                            .first()
1124                            .cloned()
1125                            .unwrap_or_else(|| "nbg1".to_string());
1126                        (st.id.clone(), region)
1127                    })
1128                    .or_else(|| {
1129                        available_types
1130                            .iter()
1131                            .min_by(|a, b| a.price_monthly.partial_cmp(&b.price_monthly).unwrap())
1132                            .map(|st| {
1133                                let region = st
1134                                    .available_in
1135                                    .first()
1136                                    .cloned()
1137                                    .unwrap_or_else(|| "nbg1".to_string());
1138                                (st.id.clone(), region)
1139                            })
1140                    })
1141            };
1142
1143            if let Some((machine, region)) = best_machine_with_region {
1144                (
1145                    args.machine_type.clone().unwrap_or(machine),
1146                    args.region.clone().unwrap_or(region),
1147                )
1148            } else {
1149                // Fallback to static defaults
1150                (
1151                    args.machine_type
1152                        .clone()
1153                        .unwrap_or_else(|| recommendation.machine_type.clone()),
1154                    args.region
1155                        .clone()
1156                        .unwrap_or_else(|| recommendation.region.clone()),
1157                )
1158            }
1159        } else {
1160            // Non-Hetzner or no availability data - use static defaults
1161            let machine = args
1162                .machine_type
1163                .clone()
1164                .unwrap_or_else(|| recommendation.machine_type.clone());
1165            let region = args
1166                .region
1167                .clone()
1168                .unwrap_or_else(|| recommendation.region.clone());
1169            (machine, region)
1170        };
1171
1172        let final_port = args.port.unwrap_or(recommendation.port);
1173
1174        // Get repository info
1175        let repositories = match client.list_project_repositories(&project_id).await {
1176            Ok(repos) => repos,
1177            Err(e) => {
1178                return Ok(format_error_for_llm(
1179                    "deploy_service",
1180                    ErrorCategory::NetworkError,
1181                    &format!("Failed to get repositories: {}", e),
1182                    Some(vec!["Ensure a repository is connected to the project"]),
1183                ));
1184            }
1185        };
1186
1187        // Smart repository selection: match local git remote or find non-gitops repo
1188        let repo = match find_matching_repository(&repositories.repositories, &self.project_path) {
1189            Some(r) => r,
1190            None => {
1191                return Ok(format_error_for_llm(
1192                    "deploy_service",
1193                    ErrorCategory::ResourceUnavailable,
1194                    "No repository connected to project",
1195                    Some(vec![
1196                        "Connect a GitHub repository to the project first",
1197                        "Use the platform UI to connect a repository",
1198                    ]),
1199                ));
1200            }
1201        };
1202
1203        tracing::info!(
1204            "Deploy service: Using repository {} (id: {}), default_branch: {:?}",
1205            repo.repository_full_name,
1206            repo.repository_id,
1207            repo.default_branch
1208        );
1209
1210        // Use resolved environment ID from earlier
1211        if resolved_env_id.is_empty() {
1212            return Ok(format_error_for_llm(
1213                "deploy_service",
1214                ErrorCategory::ResourceUnavailable,
1215                "No environment found for project",
1216                Some(vec!["Create an environment in the platform first"]),
1217            ));
1218        }
1219
1220        // Build deployment config request
1221        // Derive dockerfile path and build context from DockerfileInfo
1222        //
1223        // IMPORTANT: Paths must be relative to the REPO ROOT for Cloud Runner.
1224        // Cloud Runner clones the GitHub repo and builds from there.
1225        //
1226        // Example: User analyzes path="services/contact-intelligence" which has a Dockerfile.
1227        // The GitHub repo structure is:
1228        //   repo-root/
1229        //     services/
1230        //       contact-intelligence/
1231        //         Dockerfile
1232        //
1233        // Cloud Runner needs:
1234        //   dockerfile: "services/contact-intelligence/Dockerfile"
1235        //   context: "services/contact-intelligence"
1236        //
1237        // NOT:
1238        //   dockerfile: "Dockerfile", context: "."  (would look at repo root)
1239        let (dockerfile_path, build_context) = analysis
1240            .docker_analysis
1241            .as_ref()
1242            .and_then(|d| d.dockerfiles.first())
1243            .map(|df| {
1244                // Get dockerfile filename (e.g., "Dockerfile" or "Dockerfile.prod")
1245                let dockerfile_name = df
1246                    .path
1247                    .file_name()
1248                    .map(|n| n.to_string_lossy().to_string())
1249                    .unwrap_or_else(|| "Dockerfile".to_string());
1250
1251                // Derive dockerfile's directory relative to analysis_path
1252                let analysis_relative_dir = df
1253                    .path
1254                    .parent()
1255                    .and_then(|p| p.strip_prefix(&analysis_path).ok())
1256                    .map(|p| p.to_string_lossy().to_string())
1257                    .unwrap_or_default();
1258
1259                // Build paths relative to REPO ROOT by prepending args.path (the subdirectory)
1260                // This ensures Cloud Runner finds the Dockerfile in the cloned repo
1261                let subpath = args.path.as_deref().unwrap_or("");
1262
1263                if subpath.is_empty() {
1264                    // Analyzing repo root - use paths as-is
1265                    if analysis_relative_dir.is_empty() {
1266                        (dockerfile_name, ".".to_string())
1267                    } else {
1268                        (
1269                            format!("{}/{}", analysis_relative_dir, dockerfile_name),
1270                            analysis_relative_dir,
1271                        )
1272                    }
1273                } else {
1274                    // Analyzing a subdirectory - prepend subpath to make repo-root-relative
1275                    if analysis_relative_dir.is_empty() {
1276                        // Dockerfile at root of analyzed subdir
1277                        // e.g., subpath="services/contact-intelligence" -> dockerfile="services/contact-intelligence/Dockerfile"
1278                        (
1279                            format!("{}/{}", subpath, dockerfile_name),
1280                            subpath.to_string(),
1281                        )
1282                    } else {
1283                        // Dockerfile in nested dir within analyzed subdir
1284                        // e.g., subpath="services", analysis_relative_dir="contact-intelligence"
1285                        let full_context = format!("{}/{}", subpath, analysis_relative_dir);
1286                        (
1287                            format!("{}/{}", full_context, dockerfile_name),
1288                            full_context,
1289                        )
1290                    }
1291                }
1292            })
1293            .unwrap_or_else(|| {
1294                // No dockerfile found - use subpath as context if provided, else root
1295                let subpath = args.path.as_deref().unwrap_or("");
1296                if subpath.is_empty() {
1297                    ("Dockerfile".to_string(), ".".to_string())
1298                } else {
1299                    (format!("{}/Dockerfile", subpath), subpath.to_string())
1300                }
1301            });
1302
1303        tracing::debug!(
1304            "Deploy service docker config: dockerfile_path={}, build_context={}, subpath={:?}",
1305            dockerfile_path,
1306            build_context,
1307            args.path
1308        );
1309
1310        // Fetch provider_account_id from credentials for GCP/Azure
1311        let mut gcp_project_id = None;
1312        let mut subscription_id = None;
1313        if matches!(final_provider, CloudProvider::Gcp | CloudProvider::Azure) {
1314            if let Ok(Some(cred)) = client
1315                .check_provider_connection(&final_provider, &project_id)
1316                .await
1317            {
1318                match final_provider {
1319                    CloudProvider::Gcp => gcp_project_id = cred.provider_account_id,
1320                    CloudProvider::Azure => subscription_id = cred.provider_account_id,
1321                    _ => {}
1322                }
1323            }
1324        }
1325
1326        // Determine CPU/memory from args or recommendation
1327        let final_cpu = args.cpu.clone().or_else(|| recommendation.cpu.clone());
1328        let final_memory = args
1329            .memory
1330            .clone()
1331            .or_else(|| recommendation.memory.clone());
1332
1333        let config_input = CloudRunnerConfigInput {
1334            provider: Some(final_provider.clone()),
1335            region: Some(final_region.clone()),
1336            server_type: if final_provider == CloudProvider::Hetzner {
1337                Some(final_machine.clone())
1338            } else {
1339                None
1340            },
1341            gcp_project_id,
1342            cpu: final_cpu.clone(),
1343            memory: final_memory.clone(),
1344            min_instances: args.min_instances,
1345            max_instances: args.max_instances,
1346            allow_unauthenticated: Some(args.is_public),
1347            subscription_id,
1348            is_public: Some(args.is_public),
1349            health_check_path: recommendation.health_check_path.clone(),
1350            ..Default::default()
1351        };
1352        let cloud_runner_config = build_cloud_runner_config_v2(&config_input);
1353
1354        // Resolve secrets if provided
1355        let secrets = if let Some(ref keys) = args.secret_keys {
1356            let mut resolved = Vec::new();
1357            for sk in keys {
1358                let value = match &sk.value {
1359                    Some(v) => v.clone(),
1360                    None if self.execution_context.has_terminal() => {
1361                        match prompt_secret_value(&sk.key) {
1362                            SecretPromptResult::Value(v) => v,
1363                            SecretPromptResult::Skipped => continue,
1364                            SecretPromptResult::Cancelled => {
1365                                return Ok(format_error_for_llm(
1366                                    "deploy_service",
1367                                    ErrorCategory::ValidationFailed,
1368                                    "Secret entry cancelled by user",
1369                                    Some(vec![
1370                                        "The user cancelled secret input. Try again when ready.",
1371                                    ]),
1372                                ));
1373                            }
1374                        }
1375                    }
1376                    None => continue, // server mode, skip secrets without values
1377                };
1378                resolved.push(DeploymentSecretInput {
1379                    key: sk.key.clone(),
1380                    value,
1381                    is_secret: sk.is_secret,
1382                });
1383            }
1384            if resolved.is_empty() {
1385                None
1386            } else {
1387                Some(resolved)
1388            }
1389        } else {
1390            None
1391        };
1392
1393        // SECURITY: Pre-compute response info (keys only, no values) before moving secrets
1394        let secrets_set_info = secrets.as_ref().map(|s| {
1395            s.iter()
1396                .map(|si| json!({"key": si.key, "is_secret": si.is_secret}))
1397                .collect::<Vec<_>>()
1398        });
1399
1400        let config_request = CreateDeploymentConfigRequest {
1401            project_id: project_id.clone(),
1402            service_name: service_name.clone(),
1403            repository_id: repo.repository_id,
1404            repository_full_name: repo.repository_full_name.clone(),
1405            dockerfile_path: Some(dockerfile_path.clone()),
1406            dockerfile: Some(dockerfile_path.clone()),
1407            build_context: Some(build_context.clone()),
1408            context: Some(build_context.clone()),
1409            port: final_port as i32,
1410            branch: repo
1411                .default_branch
1412                .clone()
1413                .unwrap_or_else(|| "main".to_string()),
1414            target_type: recommendation.target.as_str().to_string(),
1415            cloud_provider: final_provider.as_str().to_string(),
1416            environment_id: resolved_env_id.clone(),
1417            cluster_id: None,  // Cloud Runner doesn't need cluster
1418            registry_id: None, // Auto-provision
1419            auto_deploy_enabled: true,
1420            is_public: Some(args.is_public),
1421            cloud_runner_config: Some(cloud_runner_config),
1422            secrets,
1423        };
1424
1425        // Create config
1426        let config = match client.create_deployment_config(&config_request).await {
1427            Ok(c) => c,
1428            Err(e) => {
1429                return Ok(format_api_error("deploy_service", e));
1430            }
1431        };
1432
1433        // Trigger deployment
1434        let trigger_request = TriggerDeploymentRequest {
1435            project_id: project_id.clone(),
1436            config_id: config.id.clone(),
1437            commit_sha: None,
1438        };
1439
1440        match client.trigger_deployment(&trigger_request).await {
1441            Ok(response) => {
1442                let result = json!({
1443                    "status": "deployed",
1444                    "deployment_mode": "NEW_DEPLOYMENT",
1445                    "config_id": config.id,
1446                    "task_id": response.backstage_task_id,
1447                    "service_name": service_name,
1448                    "environment": {
1449                        "id": resolved_env_id,
1450                        "name": resolved_env_name,
1451                        "is_production": is_production,
1452                    },
1453                    "provider": final_provider.as_str(),
1454                    "machine_type": final_machine,
1455                    "region": final_region,
1456                    "port": final_port,
1457                    "docker_config": {
1458                        "dockerfile_path": dockerfile_path,
1459                        "build_context": build_context,
1460                    },
1461                    "secrets_set": secrets_set_info,
1462                    "message": format!(
1463                        "NEW deployment started for '{}' on {} environment. Task ID: {}",
1464                        service_name, resolved_env_name, response.backstage_task_id
1465                    ),
1466                    "next_steps": [
1467                        format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id),
1468                        "View logs after deployment: use get_service_logs",
1469                    ],
1470                });
1471
1472                serde_json::to_string_pretty(&result)
1473                    .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)))
1474            }
1475            Err(e) => Ok(format_api_error("deploy_service", e)),
1476        }
1477    }
1478}
1479
1480/// Extract service name from path
1481fn get_service_name(path: &PathBuf) -> String {
1482    path.file_name()
1483        .and_then(|n| n.to_str())
1484        .map(|n| n.to_lowercase().replace(['_', ' '], "-"))
1485        .unwrap_or_else(|| "service".to_string())
1486}
1487
1488/// Detect the git remote URL from a directory
1489fn detect_git_remote(project_path: &PathBuf) -> Option<String> {
1490    let output = Command::new("git")
1491        .args(["remote", "get-url", "origin"])
1492        .current_dir(project_path)
1493        .output()
1494        .ok()?;
1495
1496    if output.status.success() {
1497        let url = String::from_utf8(output.stdout).ok()?;
1498        Some(url.trim().to_string())
1499    } else {
1500        None
1501    }
1502}
1503
1504/// Parse repository full name from git remote URL
1505/// Handles both SSH (git@github.com:owner/repo.git) and HTTPS (https://github.com/owner/repo.git)
1506fn parse_repo_from_url(url: &str) -> Option<String> {
1507    let url = url.trim();
1508
1509    // SSH format: git@github.com:owner/repo.git
1510    if url.starts_with("git@") {
1511        let parts: Vec<&str> = url.split(':').collect();
1512        if parts.len() == 2 {
1513            let path = parts[1].trim_end_matches(".git");
1514            return Some(path.to_string());
1515        }
1516    }
1517
1518    // HTTPS format: https://github.com/owner/repo.git
1519    if url.starts_with("https://") || url.starts_with("http://") {
1520        if let Some(path) = url
1521            .split('/')
1522            .skip(3)
1523            .collect::<Vec<_>>()
1524            .join("/")
1525            .strip_suffix(".git")
1526        {
1527            return Some(path.to_string());
1528        }
1529        // Without .git suffix
1530        let path: String = url.split('/').skip(3).collect::<Vec<_>>().join("/");
1531        if !path.is_empty() {
1532            return Some(path);
1533        }
1534    }
1535
1536    None
1537}
1538
1539/// Find repository matching local git remote, or fall back to non-gitops repo
1540fn find_matching_repository<'a>(
1541    repositories: &'a [ProjectRepository],
1542    project_path: &PathBuf,
1543) -> Option<&'a ProjectRepository> {
1544    // First, try to detect from local git remote
1545    if let Some(detected_name) =
1546        detect_git_remote(project_path).and_then(|url| parse_repo_from_url(&url))
1547    {
1548        tracing::debug!("Detected local git remote: {}", detected_name);
1549
1550        if let Some(repo) = repositories
1551            .iter()
1552            .find(|r| r.repository_full_name.eq_ignore_ascii_case(&detected_name))
1553        {
1554            tracing::debug!("Matched detected repo: {}", repo.repository_full_name);
1555            return Some(repo);
1556        }
1557    }
1558
1559    // Fall back: find first non-GitOps repository
1560    // GitOps repos are typically infrastructure/config repos, not application repos
1561    if let Some(repo) = repositories.iter().find(|r| {
1562        r.is_primary_git_ops != Some(true)
1563            && !r
1564                .repository_full_name
1565                .to_lowercase()
1566                .contains("infrastructure")
1567            && !r.repository_full_name.to_lowercase().contains("gitops")
1568    }) {
1569        tracing::debug!("Using non-gitops repo: {}", repo.repository_full_name);
1570        return Some(repo);
1571    }
1572
1573    // Last resort: first repo
1574    repositories.first()
1575}
1576
1577/// Format a PlatformApiError for LLM consumption
1578fn format_api_error(tool_name: &str, error: PlatformApiError) -> String {
1579    match error {
1580        PlatformApiError::Unauthorized => format_error_for_llm(
1581            tool_name,
1582            ErrorCategory::PermissionDenied,
1583            "Not authenticated - please run `sync-ctl auth login` first",
1584            Some(vec![
1585                "The user needs to authenticate with the Syncable platform",
1586                "Run: sync-ctl auth login",
1587            ]),
1588        ),
1589        PlatformApiError::NotFound(msg) => format_error_for_llm(
1590            tool_name,
1591            ErrorCategory::ResourceUnavailable,
1592            &format!("Resource not found: {}", msg),
1593            Some(vec![
1594                "The project ID may be incorrect",
1595                "Use list_projects to find valid project IDs",
1596            ]),
1597        ),
1598        PlatformApiError::PermissionDenied(msg) => format_error_for_llm(
1599            tool_name,
1600            ErrorCategory::PermissionDenied,
1601            &format!("Permission denied: {}", msg),
1602            Some(vec!["Contact the project admin for access"]),
1603        ),
1604        PlatformApiError::RateLimited => format_error_for_llm(
1605            tool_name,
1606            ErrorCategory::ResourceUnavailable,
1607            "Rate limit exceeded - please try again later",
1608            Some(vec!["Wait a moment before retrying"]),
1609        ),
1610        PlatformApiError::HttpError(e) => format_error_for_llm(
1611            tool_name,
1612            ErrorCategory::NetworkError,
1613            &format!("Network error: {}", e),
1614            Some(vec!["Check network connectivity"]),
1615        ),
1616        PlatformApiError::ParseError(msg) => format_error_for_llm(
1617            tool_name,
1618            ErrorCategory::InternalError,
1619            &format!("Failed to parse API response: {}", msg),
1620            None,
1621        ),
1622        PlatformApiError::ApiError { status, message } => format_error_for_llm(
1623            tool_name,
1624            ErrorCategory::ExternalCommandFailed,
1625            &format!("API error ({}): {}", status, message),
1626            Some(vec!["Check the error message for details"]),
1627        ),
1628        PlatformApiError::ServerError { status, message } => format_error_for_llm(
1629            tool_name,
1630            ErrorCategory::ExternalCommandFailed,
1631            &format!("Server error ({}): {}", status, message),
1632            Some(vec!["Try again later"]),
1633        ),
1634        PlatformApiError::ConnectionFailed => format_error_for_llm(
1635            tool_name,
1636            ErrorCategory::NetworkError,
1637            "Could not connect to Syncable API",
1638            Some(vec!["Check your internet connection"]),
1639        ),
1640    }
1641}
1642
1643#[cfg(test)]
1644mod tests {
1645    use super::*;
1646
1647    #[test]
1648    fn test_tool_name() {
1649        assert_eq!(DeployServiceTool::NAME, "deploy_service");
1650    }
1651
1652    #[test]
1653    fn test_default_preview_only() {
1654        assert!(default_preview());
1655    }
1656
1657    #[test]
1658    fn test_get_service_name() {
1659        assert_eq!(
1660            get_service_name(&PathBuf::from("/path/to/my_service")),
1661            "my-service"
1662        );
1663        assert_eq!(get_service_name(&PathBuf::from("/path/to/MyApp")), "myapp");
1664        assert_eq!(
1665            get_service_name(&PathBuf::from("/path/to/api-service")),
1666            "api-service"
1667        );
1668    }
1669
1670    #[test]
1671    fn test_tool_creation() {
1672        let tool = DeployServiceTool::new(PathBuf::from("/test"));
1673        assert!(format!("{:?}", tool).contains("DeployServiceTool"));
1674    }
1675
1676    #[tokio::test]
1677    async fn test_nonexistent_path_returns_error() {
1678        let tool = DeployServiceTool::new(PathBuf::from("/nonexistent/path/that/does/not/exist"));
1679        let args = DeployServiceArgs {
1680            path: Some("nope".to_string()),
1681            provider: None,
1682            machine_type: None,
1683            region: None,
1684            port: None,
1685            is_public: false,
1686            cpu: None,
1687            memory: None,
1688            min_instances: None,
1689            max_instances: None,
1690            preview_only: true,
1691            secret_keys: None,
1692        };
1693
1694        let result = tool.call(args).await.unwrap();
1695        assert!(
1696            result.contains("error")
1697                || result.contains("not found")
1698                || result.contains("Path not found")
1699        );
1700    }
1701}