1use rig::completion::ToolDefinition;
7use rig::tool::Tool;
8use serde::{Deserialize, Serialize};
9use serde_json::json;
10use std::path::PathBuf;
11use std::str::FromStr;
12
13use crate::agent::tools::error::{ErrorCategory, format_error_for_llm};
14use crate::analyzer::{AnalysisConfig, TechnologyCategory, analyze_project_with_config};
15use crate::platform::api::types::{
16 CloudProvider, CreateDeploymentConfigRequest, ProjectRepository, build_cloud_runner_config,
17};
18use crate::platform::api::{PlatformApiClient, PlatformApiError, TriggerDeploymentRequest};
19use crate::platform::PlatformSession;
20use crate::wizard::{
21 RecommendationInput, recommend_deployment, get_provider_deployment_statuses,
22};
23use std::process::Command;
24
25#[derive(Debug, Deserialize)]
27pub struct DeployServiceArgs {
28 pub path: Option<String>,
30 pub provider: Option<String>,
32 pub machine_type: Option<String>,
34 pub region: Option<String>,
36 pub port: Option<u16>,
38 #[serde(default)]
41 pub is_public: bool,
42 #[serde(default = "default_preview")]
45 pub preview_only: bool,
46}
47
48fn default_preview() -> bool {
49 true
50}
51
52#[derive(Debug, thiserror::Error)]
54#[error("Deploy service error: {0}")]
55pub struct DeployServiceError(String);
56
57#[derive(Debug, Clone, Serialize, Deserialize)]
66pub struct DeployServiceTool {
67 project_path: PathBuf,
68}
69
70impl DeployServiceTool {
71 pub fn new(project_path: PathBuf) -> Self {
73 Self { project_path }
74 }
75}
76
77impl Tool for DeployServiceTool {
78 const NAME: &'static str = "deploy_service";
79
80 type Error = DeployServiceError;
81 type Args = DeployServiceArgs;
82 type Output = String;
83
84 async fn definition(&self, _prompt: String) -> ToolDefinition {
85 ToolDefinition {
86 name: Self::NAME.to_string(),
87 description: r#"Analyze a project and deploy it with intelligent recommendations.
88
89This tool provides an end-to-end deployment experience:
901. Analyzes the project to detect language, framework, ports, and health endpoints
912. Checks available deployment capabilities (providers, clusters, registries)
923. Generates smart recommendations with reasoning
934. Shows a preview for user confirmation
945. Creates deployment config and triggers deployment
95
96**Default behavior (preview_only=true):**
97Returns analysis and recommendations. User should confirm before actual deployment.
98
99**Direct deployment (preview_only=false):**
100Uses provided overrides or recommendation defaults to deploy immediately.
101
102**Parameters:**
103- path: Optional subdirectory for monorepo services
104- provider: Override recommendation (gcp, hetzner)
105- machine_type: Override machine selection (e.g., cx22, e2-small)
106- region: Override region selection (e.g., nbg1, us-central1)
107- port: Override detected port
108- is_public: Whether service should be publicly accessible (default: false)
109- preview_only: If true (default), show recommendation only
110
111**IMPORTANT - Public vs Internal:**
112- is_public=false (default): Service is internal-only, not accessible from internet
113- is_public=true: Service gets a public URL, accessible from anywhere
114- ALWAYS show this in the preview and ask user before deploying public services
115
116**What it analyzes:**
117- Programming language and framework
118- Port configuration from source code, package.json, Dockerfiles
119- Health check endpoints (/health, /healthz, etc.)
120- Existing infrastructure (K8s manifests, Helm charts)
121
122**Recommendation reasoning includes:**
123- Why a specific provider was chosen
124- Why a machine type fits the workload (based on memory requirements)
125- Where the port was detected from
126- Confidence level in the recommendation
127
128**Example flow:**
129User: "deploy this service"
1301. Call with preview_only=true → Shows recommendation
1312. User: "yes, deploy it" → Call with preview_only=false to deploy
1323. User: "make it public" → Call with preview_only=true AND is_public=true to show NEW preview
1334. User: "yes" → NOW call with preview_only=false to deploy
134
135**CRITICAL - Human in the loop:**
136- NEVER deploy (preview_only=false) immediately after user requests a CHANGE
137- If user says "make it public", "use GCP", "change region", etc. → show NEW preview first
138- Only deploy after user explicitly confirms the final settings with "yes", "deploy", "confirm"
139- A change request is NOT a deployment confirmation
140
141**Prerequisites:**
142- User must be authenticated (sync-ctl auth login)
143- A project must be selected (use select_project first)
144- Provider must be connected (check with list_deployment_capabilities)"#
145 .to_string(),
146 parameters: json!({
147 "type": "object",
148 "properties": {
149 "path": {
150 "type": "string",
151 "description": "Subdirectory to deploy (for monorepos)"
152 },
153 "provider": {
154 "type": "string",
155 "enum": ["gcp", "hetzner"],
156 "description": "Override: cloud provider"
157 },
158 "machine_type": {
159 "type": "string",
160 "description": "Override: machine type (e.g., cx22, e2-small)"
161 },
162 "region": {
163 "type": "string",
164 "description": "Override: deployment region"
165 },
166 "port": {
167 "type": "integer",
168 "description": "Override: port to expose"
169 },
170 "is_public": {
171 "type": "boolean",
172 "description": "Whether service should be publicly accessible. Default: false (internal only). Set to true for public URL."
173 },
174 "preview_only": {
175 "type": "boolean",
176 "description": "If true (default), show recommendation only. If false, deploy."
177 }
178 }
179 }),
180 }
181 }
182
183 async fn call(&self, args: Self::Args) -> Result<Self::Output, Self::Error> {
184 let analysis_path = if let Some(ref subpath) = args.path {
186 self.project_path.join(subpath)
187 } else {
188 self.project_path.clone()
189 };
190
191 if !analysis_path.exists() {
193 return Ok(format_error_for_llm(
194 "deploy_service",
195 ErrorCategory::FileNotFound,
196 &format!("Path not found: {}", analysis_path.display()),
197 Some(vec!["Check if the path exists", "Use list_directory to explore"]),
198 ));
199 }
200
201 let config = AnalysisConfig {
203 deep_analysis: true,
204 ..Default::default()
205 };
206
207 let analysis = match analyze_project_with_config(&analysis_path, &config) {
208 Ok(a) => a,
209 Err(e) => {
210 return Ok(format_error_for_llm(
211 "deploy_service",
212 ErrorCategory::InternalError,
213 &format!("Analysis failed: {}", e),
214 Some(vec!["Check if the directory contains a valid project"]),
215 ));
216 }
217 };
218
219 let client = match PlatformApiClient::new() {
221 Ok(c) => c,
222 Err(_) => {
223 return Ok(format_error_for_llm(
224 "deploy_service",
225 ErrorCategory::PermissionDenied,
226 "Not authenticated",
227 Some(vec!["Run: sync-ctl auth login"]),
228 ));
229 }
230 };
231
232 let session = match PlatformSession::load() {
234 Ok(s) => s,
235 Err(_) => {
236 return Ok(format_error_for_llm(
237 "deploy_service",
238 ErrorCategory::InternalError,
239 "Failed to load platform session",
240 Some(vec!["Try selecting a project with select_project"]),
241 ));
242 }
243 };
244
245 if !session.is_project_selected() {
246 return Ok(format_error_for_llm(
247 "deploy_service",
248 ErrorCategory::ValidationFailed,
249 "No project selected",
250 Some(vec!["Use select_project to choose a project first"]),
251 ));
252 }
253
254 let project_id = session.project_id.clone().unwrap_or_default();
255 let environment_id = session.environment_id.clone();
256
257 let existing_configs = match client.list_deployment_configs(&project_id).await {
259 Ok(configs) => configs,
260 Err(e) => {
261 tracing::warn!("Failed to fetch existing configs: {}", e);
263 Vec::new()
264 }
265 };
266
267 let service_name = get_service_name(&analysis_path);
269
270 let existing_config = existing_configs
272 .iter()
273 .find(|c| c.service_name.eq_ignore_ascii_case(&service_name));
274
275 let environments = match client.list_environments(&project_id).await {
277 Ok(envs) => envs,
278 Err(_) => Vec::new(),
279 };
280
281 let (resolved_env_id, resolved_env_name, is_production) = if let Some(ref env_id) = environment_id {
283 let env = environments.iter().find(|e| e.id == *env_id);
284 let name = env.map(|e| e.name.clone()).unwrap_or_else(|| "Unknown".to_string());
285 let is_prod = name.to_lowercase().contains("prod");
286 (env_id.clone(), name, is_prod)
287 } else if let Some(existing) = &existing_config {
288 let env = environments.iter().find(|e| e.id == existing.environment_id);
290 let name = env.map(|e| e.name.clone()).unwrap_or_else(|| "Unknown".to_string());
291 let is_prod = name.to_lowercase().contains("prod");
292 (existing.environment_id.clone(), name, is_prod)
293 } else if let Some(first_env) = environments.first() {
294 let is_prod = first_env.name.to_lowercase().contains("prod");
295 (first_env.id.clone(), first_env.name.clone(), is_prod)
296 } else {
297 ("".to_string(), "No environment".to_string(), false)
298 };
299
300 let capabilities = match get_provider_deployment_statuses(&client, &project_id).await {
302 Ok(c) => c,
303 Err(e) => {
304 return Ok(format_error_for_llm(
305 "deploy_service",
306 ErrorCategory::NetworkError,
307 &format!("Failed to get deployment capabilities: {}", e),
308 None,
309 ));
310 }
311 };
312
313 let available_providers: Vec<CloudProvider> = capabilities
315 .iter()
316 .filter(|s| s.provider.is_available() && s.is_connected)
317 .map(|s| s.provider.clone())
318 .collect();
319
320 if available_providers.is_empty() {
321 return Ok(format_error_for_llm(
322 "deploy_service",
323 ErrorCategory::ResourceUnavailable,
324 "No cloud providers connected",
325 Some(vec![
326 "Connect GCP or Hetzner in platform settings",
327 "Use open_provider_settings to configure a provider",
328 ]),
329 ));
330 }
331
332 let has_existing_k8s = capabilities.iter().any(|s| !s.clusters.is_empty());
334
335 let recommendation_input = RecommendationInput {
337 analysis: analysis.clone(),
338 available_providers: available_providers.clone(),
339 has_existing_k8s,
340 user_region_hint: args.region.clone(),
341 };
342
343 let recommendation = recommend_deployment(recommendation_input);
344
345 let primary_language = analysis.languages.first()
347 .map(|l| l.name.clone())
348 .unwrap_or_else(|| "Unknown".to_string());
349
350 let primary_framework = analysis.technologies.iter()
351 .find(|t| matches!(t.category, TechnologyCategory::BackendFramework | TechnologyCategory::MetaFramework))
352 .map(|t| t.name.clone())
353 .unwrap_or_else(|| "None detected".to_string());
354
355 let has_dockerfile = analysis.docker_analysis
356 .as_ref()
357 .map(|d| !d.dockerfiles.is_empty())
358 .unwrap_or(false);
359
360 let has_k8s = analysis.infrastructure
361 .as_ref()
362 .map(|i| i.has_kubernetes)
363 .unwrap_or(false);
364
365 if args.preview_only {
367 let (deployment_mode, mode_explanation, next_steps) = if let Some(existing) = &existing_config {
369 (
370 "REDEPLOY",
371 format!(
372 "Service '{}' already has a deployment config (ID: {}). Deploying will trigger a REDEPLOY of the existing service.",
373 existing.service_name, existing.id
374 ),
375 vec![
376 "To redeploy with current config: call deploy_service with preview_only=false".to_string(),
377 "This will trigger a new deployment of the existing service".to_string(),
378 "The existing configuration will be used".to_string(),
379 ]
380 )
381 } else {
382 (
383 "NEW_DEPLOYMENT",
384 format!(
385 "No existing deployment config found for '{}'. This will create a NEW deployment configuration.",
386 service_name
387 ),
388 vec![
389 "To deploy with these settings: call deploy_service with preview_only=false".to_string(),
390 "To customize: specify provider, machine_type, region, or port parameters".to_string(),
391 "To see more options: check the alternatives section above".to_string(),
392 ]
393 )
394 };
395
396 let production_warning = if is_production {
398 Some("⚠️ WARNING: This will deploy to PRODUCTION environment. Please confirm you intend to deploy to production.")
399 } else {
400 None
401 };
402
403 let response = json!({
404 "status": "recommendation",
405 "deployment_mode": deployment_mode,
406 "mode_explanation": mode_explanation,
407 "environment": {
408 "id": resolved_env_id,
409 "name": resolved_env_name,
410 "is_production": is_production,
411 },
412 "production_warning": production_warning,
413 "existing_config": existing_config.map(|c| json!({
414 "id": c.id,
415 "service_name": c.service_name,
416 "environment_id": c.environment_id,
417 "branch": c.branch,
418 "port": c.port,
419 "auto_deploy_enabled": c.auto_deploy_enabled,
420 "created_at": c.created_at.to_rfc3339(),
421 })),
422 "analysis": {
423 "path": analysis_path.display().to_string(),
424 "language": primary_language,
425 "framework": primary_framework,
426 "detected_port": recommendation.port,
427 "port_source": recommendation.port_source,
428 "health_endpoint": recommendation.health_check_path,
429 "has_dockerfile": has_dockerfile,
430 "has_kubernetes": has_k8s,
431 },
432 "recommendation": {
433 "provider": recommendation.provider.as_str(),
434 "provider_reasoning": recommendation.provider_reasoning,
435 "target": recommendation.target.as_str(),
436 "target_reasoning": recommendation.target_reasoning,
437 "machine_type": recommendation.machine_type,
438 "machine_reasoning": recommendation.machine_reasoning,
439 "region": recommendation.region,
440 "region_reasoning": recommendation.region_reasoning,
441 "port": recommendation.port,
442 "health_check_path": recommendation.health_check_path,
443 "is_public": args.is_public,
444 "is_public_note": if args.is_public {
445 "Service will be PUBLICLY accessible from the internet"
446 } else {
447 "Service will be INTERNAL only (not accessible from internet)"
448 },
449 "confidence": recommendation.confidence,
450 },
451 "alternatives": {
452 "providers": recommendation.alternatives.providers.iter().map(|p| json!({
453 "provider": p.provider.as_str(),
454 "available": p.available,
455 "reason_if_unavailable": p.reason_if_unavailable,
456 })).collect::<Vec<_>>(),
457 "machine_types": recommendation.alternatives.machine_types.iter().map(|m| json!({
458 "machine_type": m.machine_type,
459 "vcpu": m.vcpu,
460 "memory_gb": m.memory_gb,
461 "description": m.description,
462 })).collect::<Vec<_>>(),
463 "regions": recommendation.alternatives.regions.iter().map(|r| json!({
464 "region": r.region,
465 "display_name": r.display_name,
466 })).collect::<Vec<_>>(),
467 },
468 "service_name": service_name,
469 "next_steps": next_steps,
470 "confirmation_prompt": if existing_config.is_some() {
471 format!(
472 "REDEPLOY '{}' to {} environment?{}",
473 service_name,
474 resolved_env_name,
475 if is_production { " ⚠️ (PRODUCTION)" } else { "" }
476 )
477 } else {
478 format!(
479 "Deploy NEW service '{}' to {} ({}) with {} in {} on {} environment?{}",
480 service_name,
481 recommendation.provider.display_name(),
482 recommendation.target.display_name(),
483 recommendation.machine_type,
484 recommendation.region,
485 resolved_env_name,
486 if is_production { " ⚠️ (PRODUCTION)" } else { "" }
487 )
488 },
489 });
490
491 return serde_json::to_string_pretty(&response)
492 .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)));
493 }
494
495 if let Some(existing) = &existing_config {
499 let trigger_request = TriggerDeploymentRequest {
500 project_id: project_id.clone(),
501 config_id: existing.id.clone(),
502 commit_sha: None,
503 };
504
505 return match client.trigger_deployment(&trigger_request).await {
506 Ok(response) => {
507 let result = json!({
508 "status": "redeployed",
509 "deployment_mode": "REDEPLOY",
510 "config_id": existing.id,
511 "task_id": response.backstage_task_id,
512 "service_name": service_name,
513 "environment": {
514 "id": resolved_env_id,
515 "name": resolved_env_name,
516 "is_production": is_production,
517 },
518 "message": format!(
519 "Redeploy triggered for existing service '{}' on {} environment. Task ID: {}",
520 service_name, resolved_env_name, response.backstage_task_id
521 ),
522 "next_steps": [
523 format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id),
524 "View logs after deployment: use get_service_logs",
525 ],
526 });
527
528 serde_json::to_string_pretty(&result)
529 .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)))
530 }
531 Err(e) => Ok(format_api_error("deploy_service", e)),
532 };
533 }
534
535 let final_provider = args.provider
537 .as_ref()
538 .and_then(|p| CloudProvider::from_str(p).ok())
539 .unwrap_or(recommendation.provider.clone());
540
541 let final_machine = args.machine_type
542 .clone()
543 .unwrap_or(recommendation.machine_type.clone());
544
545 let final_region = args.region
546 .clone()
547 .unwrap_or(recommendation.region.clone());
548
549 let final_port = args.port
550 .unwrap_or(recommendation.port);
551
552 let repositories = match client.list_project_repositories(&project_id).await {
554 Ok(repos) => repos,
555 Err(e) => {
556 return Ok(format_error_for_llm(
557 "deploy_service",
558 ErrorCategory::NetworkError,
559 &format!("Failed to get repositories: {}", e),
560 Some(vec!["Ensure a repository is connected to the project"]),
561 ));
562 }
563 };
564
565 let repo = match find_matching_repository(&repositories.repositories, &self.project_path) {
567 Some(r) => r,
568 None => {
569 return Ok(format_error_for_llm(
570 "deploy_service",
571 ErrorCategory::ResourceUnavailable,
572 "No repository connected to project",
573 Some(vec![
574 "Connect a GitHub repository to the project first",
575 "Use the platform UI to connect a repository",
576 ]),
577 ));
578 }
579 };
580
581 tracing::info!(
582 "Deploy service: Using repository {} (id: {}), default_branch: {:?}",
583 repo.repository_full_name,
584 repo.repository_id,
585 repo.default_branch
586 );
587
588 if resolved_env_id.is_empty() {
590 return Ok(format_error_for_llm(
591 "deploy_service",
592 ErrorCategory::ResourceUnavailable,
593 "No environment found for project",
594 Some(vec!["Create an environment in the platform first"]),
595 ));
596 }
597
598 let (dockerfile_path, build_context) = analysis.docker_analysis
618 .as_ref()
619 .and_then(|d| d.dockerfiles.first())
620 .map(|df| {
621 let dockerfile_name = df.path.file_name()
623 .map(|n| n.to_string_lossy().to_string())
624 .unwrap_or_else(|| "Dockerfile".to_string());
625
626 let analysis_relative_dir = df.path.parent()
628 .and_then(|p| p.strip_prefix(&analysis_path).ok())
629 .map(|p| p.to_string_lossy().to_string())
630 .unwrap_or_default();
631
632 let subpath = args.path.as_deref().unwrap_or("");
635
636 if subpath.is_empty() {
637 if analysis_relative_dir.is_empty() {
639 (dockerfile_name, ".".to_string())
640 } else {
641 (format!("{}/{}", analysis_relative_dir, dockerfile_name), analysis_relative_dir)
642 }
643 } else {
644 if analysis_relative_dir.is_empty() {
646 (format!("{}/{}", subpath, dockerfile_name), subpath.to_string())
649 } else {
650 let full_context = format!("{}/{}", subpath, analysis_relative_dir);
653 (format!("{}/{}", full_context, dockerfile_name), full_context)
654 }
655 }
656 })
657 .unwrap_or_else(|| {
658 let subpath = args.path.as_deref().unwrap_or("");
660 if subpath.is_empty() {
661 ("Dockerfile".to_string(), ".".to_string())
662 } else {
663 (format!("{}/Dockerfile", subpath), subpath.to_string())
664 }
665 });
666
667 tracing::debug!(
668 "Deploy service docker config: dockerfile_path={}, build_context={}, subpath={:?}",
669 dockerfile_path,
670 build_context,
671 args.path
672 );
673
674 let cloud_runner_config = build_cloud_runner_config(
675 &final_provider,
676 &final_region,
677 &final_machine,
678 args.is_public,
679 recommendation.health_check_path.as_deref(),
680 );
681
682 let config_request = CreateDeploymentConfigRequest {
683 project_id: project_id.clone(),
684 service_name: service_name.clone(),
685 repository_id: repo.repository_id,
686 repository_full_name: repo.repository_full_name.clone(),
687 dockerfile_path: Some(dockerfile_path.clone()),
688 dockerfile: Some(dockerfile_path.clone()),
689 build_context: Some(build_context.clone()),
690 context: Some(build_context.clone()),
691 port: final_port as i32,
692 branch: repo.default_branch.clone().unwrap_or_else(|| "main".to_string()),
693 target_type: recommendation.target.as_str().to_string(),
694 cloud_provider: final_provider.as_str().to_string(),
695 environment_id: resolved_env_id.clone(),
696 cluster_id: None, registry_id: None, auto_deploy_enabled: true,
699 is_public: Some(args.is_public),
700 cloud_runner_config: Some(cloud_runner_config),
701 };
702
703 let config = match client.create_deployment_config(&config_request).await {
705 Ok(c) => c,
706 Err(e) => {
707 return Ok(format_api_error("deploy_service", e));
708 }
709 };
710
711 let trigger_request = TriggerDeploymentRequest {
713 project_id: project_id.clone(),
714 config_id: config.id.clone(),
715 commit_sha: None,
716 };
717
718 match client.trigger_deployment(&trigger_request).await {
719 Ok(response) => {
720 let result = json!({
721 "status": "deployed",
722 "deployment_mode": "NEW_DEPLOYMENT",
723 "config_id": config.id,
724 "task_id": response.backstage_task_id,
725 "service_name": service_name,
726 "environment": {
727 "id": resolved_env_id,
728 "name": resolved_env_name,
729 "is_production": is_production,
730 },
731 "provider": final_provider.as_str(),
732 "machine_type": final_machine,
733 "region": final_region,
734 "port": final_port,
735 "docker_config": {
736 "dockerfile_path": dockerfile_path,
737 "build_context": build_context,
738 },
739 "message": format!(
740 "NEW deployment started for '{}' on {} environment. Task ID: {}",
741 service_name, resolved_env_name, response.backstage_task_id
742 ),
743 "next_steps": [
744 format!("Monitor progress: use get_deployment_status with task_id '{}'", response.backstage_task_id),
745 "View logs after deployment: use get_service_logs",
746 ],
747 });
748
749 serde_json::to_string_pretty(&result)
750 .map_err(|e| DeployServiceError(format!("Failed to serialize: {}", e)))
751 }
752 Err(e) => Ok(format_api_error("deploy_service", e)),
753 }
754 }
755}
756
757fn get_service_name(path: &PathBuf) -> String {
759 path.file_name()
760 .and_then(|n| n.to_str())
761 .map(|n| n.to_lowercase().replace(['_', ' '], "-"))
762 .unwrap_or_else(|| "service".to_string())
763}
764
765fn detect_git_remote(project_path: &PathBuf) -> Option<String> {
767 let output = Command::new("git")
768 .args(["remote", "get-url", "origin"])
769 .current_dir(project_path)
770 .output()
771 .ok()?;
772
773 if output.status.success() {
774 let url = String::from_utf8(output.stdout).ok()?;
775 Some(url.trim().to_string())
776 } else {
777 None
778 }
779}
780
781fn parse_repo_from_url(url: &str) -> Option<String> {
784 let url = url.trim();
785
786 if url.starts_with("git@") {
788 let parts: Vec<&str> = url.split(':').collect();
789 if parts.len() == 2 {
790 let path = parts[1].trim_end_matches(".git");
791 return Some(path.to_string());
792 }
793 }
794
795 if url.starts_with("https://") || url.starts_with("http://") {
797 if let Some(path) = url.split('/').skip(3).collect::<Vec<_>>().join("/").strip_suffix(".git") {
798 return Some(path.to_string());
799 }
800 let path: String = url.split('/').skip(3).collect::<Vec<_>>().join("/");
802 if !path.is_empty() {
803 return Some(path);
804 }
805 }
806
807 None
808}
809
810fn find_matching_repository<'a>(
812 repositories: &'a [ProjectRepository],
813 project_path: &PathBuf,
814) -> Option<&'a ProjectRepository> {
815 if let Some(detected_name) = detect_git_remote(project_path).and_then(|url| parse_repo_from_url(&url)) {
817 tracing::debug!("Detected local git remote: {}", detected_name);
818
819 if let Some(repo) = repositories.iter().find(|r| {
820 r.repository_full_name.eq_ignore_ascii_case(&detected_name)
821 }) {
822 tracing::debug!("Matched detected repo: {}", repo.repository_full_name);
823 return Some(repo);
824 }
825 }
826
827 if let Some(repo) = repositories.iter().find(|r| {
830 r.is_primary_git_ops != Some(true) &&
831 !r.repository_full_name.to_lowercase().contains("infrastructure") &&
832 !r.repository_full_name.to_lowercase().contains("gitops")
833 }) {
834 tracing::debug!("Using non-gitops repo: {}", repo.repository_full_name);
835 return Some(repo);
836 }
837
838 repositories.first()
840}
841
842fn format_api_error(tool_name: &str, error: PlatformApiError) -> String {
844 match error {
845 PlatformApiError::Unauthorized => format_error_for_llm(
846 tool_name,
847 ErrorCategory::PermissionDenied,
848 "Not authenticated - please run `sync-ctl auth login` first",
849 Some(vec![
850 "The user needs to authenticate with the Syncable platform",
851 "Run: sync-ctl auth login",
852 ]),
853 ),
854 PlatformApiError::NotFound(msg) => format_error_for_llm(
855 tool_name,
856 ErrorCategory::ResourceUnavailable,
857 &format!("Resource not found: {}", msg),
858 Some(vec![
859 "The project ID may be incorrect",
860 "Use list_projects to find valid project IDs",
861 ]),
862 ),
863 PlatformApiError::PermissionDenied(msg) => format_error_for_llm(
864 tool_name,
865 ErrorCategory::PermissionDenied,
866 &format!("Permission denied: {}", msg),
867 Some(vec!["Contact the project admin for access"]),
868 ),
869 PlatformApiError::RateLimited => format_error_for_llm(
870 tool_name,
871 ErrorCategory::ResourceUnavailable,
872 "Rate limit exceeded - please try again later",
873 Some(vec!["Wait a moment before retrying"]),
874 ),
875 PlatformApiError::HttpError(e) => format_error_for_llm(
876 tool_name,
877 ErrorCategory::NetworkError,
878 &format!("Network error: {}", e),
879 Some(vec!["Check network connectivity"]),
880 ),
881 PlatformApiError::ParseError(msg) => format_error_for_llm(
882 tool_name,
883 ErrorCategory::InternalError,
884 &format!("Failed to parse API response: {}", msg),
885 None,
886 ),
887 PlatformApiError::ApiError { status, message } => format_error_for_llm(
888 tool_name,
889 ErrorCategory::ExternalCommandFailed,
890 &format!("API error ({}): {}", status, message),
891 Some(vec!["Check the error message for details"]),
892 ),
893 PlatformApiError::ServerError { status, message } => format_error_for_llm(
894 tool_name,
895 ErrorCategory::ExternalCommandFailed,
896 &format!("Server error ({}): {}", status, message),
897 Some(vec!["Try again later"]),
898 ),
899 PlatformApiError::ConnectionFailed => format_error_for_llm(
900 tool_name,
901 ErrorCategory::NetworkError,
902 "Could not connect to Syncable API",
903 Some(vec!["Check your internet connection"]),
904 ),
905 }
906}
907
908#[cfg(test)]
909mod tests {
910 use super::*;
911
912 #[test]
913 fn test_tool_name() {
914 assert_eq!(DeployServiceTool::NAME, "deploy_service");
915 }
916
917 #[test]
918 fn test_default_preview_only() {
919 assert!(default_preview());
920 }
921
922 #[test]
923 fn test_get_service_name() {
924 assert_eq!(
925 get_service_name(&PathBuf::from("/path/to/my_service")),
926 "my-service"
927 );
928 assert_eq!(
929 get_service_name(&PathBuf::from("/path/to/MyApp")),
930 "myapp"
931 );
932 assert_eq!(
933 get_service_name(&PathBuf::from("/path/to/api-service")),
934 "api-service"
935 );
936 }
937
938 #[test]
939 fn test_tool_creation() {
940 let tool = DeployServiceTool::new(PathBuf::from("/test"));
941 assert!(format!("{:?}", tool).contains("DeployServiceTool"));
942 }
943
944 #[tokio::test]
945 async fn test_nonexistent_path_returns_error() {
946 let tool = DeployServiceTool::new(PathBuf::from("/nonexistent/path/that/does/not/exist"));
947 let args = DeployServiceArgs {
948 path: Some("nope".to_string()),
949 provider: None,
950 machine_type: None,
951 region: None,
952 port: None,
953 preview_only: true,
954 };
955
956 let result = tool.call(args).await.unwrap();
957 assert!(result.contains("error") || result.contains("not found") || result.contains("Path not found"));
958 }
959}