Skip to main content

roboticus_cli/cli/admin/
setup.rs

1use super::*;
2
3// ── Setup wizard & starter skills ─────────────────────────────
4
5/// Normalise a filesystem path for safe embedding in TOML double-quoted strings.
6/// Converts Windows backslashes to forward slashes (valid on all platforms).
7fn toml_safe_path(p: &str) -> String {
8    p.replace('\\', "/")
9}
10
11pub const STARTER_SKILLS: &[(&str, &str)] = &[(
12    "draft.md",
13    r#"---
14name: draft
15description: Draft emails, messages, documents, or other written content
16triggers:
17  keywords: [draft, write, compose, email, letter, message, document]
18priority: 6
19---
20
21Draft the requested content based on the user's description. Ask clarifying questions
22if the audience, tone, or purpose is unclear. Default to a professional but approachable
23tone unless told otherwise. Present the draft clearly and offer to revise.
24"#,
25)];
26
27pub fn write_starter_skills(skills_dir: &std::path::Path) -> std::io::Result<usize> {
28    let mut written = 0;
29    for (filename, content) in STARTER_SKILLS {
30        let path = skills_dir.join(filename);
31        if !path.exists() {
32            std::fs::write(&path, content)?;
33            written += 1;
34        }
35    }
36    Ok(written)
37}
38
39const APERTUS_8B_SUFFIX: &str = "apertus-8b-instruct:latest";
40const APERTUS_70B_SUFFIX: &str = "apertus-70b-instruct:latest";
41
42fn detect_system_ram_gb() -> Option<u64> {
43    #[cfg(target_os = "linux")]
44    {
45        let output = std::process::Command::new("sh")
46            .args(["-c", "awk '/MemTotal/ {print $2}' /proc/meminfo"])
47            .output()
48            .ok()?;
49        if !output.status.success() {
50            return None;
51        }
52        let kb = String::from_utf8_lossy(&output.stdout)
53            .trim()
54            .parse::<u64>()
55            .ok()?;
56        return Some(kb / 1024 / 1024);
57    }
58
59    #[cfg(target_os = "macos")]
60    {
61        let output = std::process::Command::new("sysctl")
62            .args(["-n", "hw.memsize"])
63            .output()
64            .ok()?;
65        if !output.status.success() {
66            return None;
67        }
68        let bytes = String::from_utf8_lossy(&output.stdout)
69            .trim()
70            .parse::<u64>()
71            .ok()?;
72        return Some(bytes / 1024 / 1024 / 1024);
73    }
74
75    #[cfg(target_os = "windows")]
76    {
77        let output = std::process::Command::new("powershell")
78            .args([
79                "-NoProfile",
80                "-Command",
81                "(Get-CimInstance Win32_ComputerSystem).TotalPhysicalMemory",
82            ])
83            .output()
84            .ok()?;
85        if !output.status.success() {
86            return None;
87        }
88        let bytes = String::from_utf8_lossy(&output.stdout)
89            .trim()
90            .parse::<u64>()
91            .ok()?;
92        return Some(bytes / 1024 / 1024 / 1024);
93    }
94
95    #[allow(unreachable_code)]
96    None
97}
98
99fn aperture_options_for_provider(provider_prefix: &str, ram_gb: Option<u64>) -> Vec<String> {
100    let mut options = vec![format!("{provider_prefix}/{APERTUS_8B_SUFFIX}")];
101    if ram_gb.map(|v| v >= 64).unwrap_or(false) {
102        options.push(format!("{provider_prefix}/{APERTUS_70B_SUFFIX}"));
103    }
104    options
105}
106
107fn has_hf_model_cache() -> bool {
108    let home = roboticus_core::home_dir();
109    let home_str = home.to_string_lossy().to_string();
110    let hf_home = std::env::var("HF_HOME")
111        .ok()
112        .or_else(|| Some(format!("{home_str}/.cache/huggingface")));
113    let hub_dir = match hf_home {
114        Some(v) => std::path::PathBuf::from(v).join("hub"),
115        None => return false,
116    };
117    if !hub_dir.exists() {
118        return false;
119    }
120    std::fs::read_dir(&hub_dir)
121        .ok()
122        .map(|iter| {
123            iter.filter_map(Result::ok)
124                .any(|entry| entry.file_name().to_string_lossy().starts_with("models--"))
125        })
126        .unwrap_or(false)
127}
128
129fn has_ollama_models() -> bool {
130    if which_binary("ollama").is_none() {
131        return false;
132    }
133    let output = std::process::Command::new("ollama").arg("list").output();
134    let Ok(out) = output else {
135        return false;
136    };
137    if !out.status.success() {
138        return false;
139    }
140    let line_count = String::from_utf8_lossy(&out.stdout).lines().count();
141    line_count > 1
142}
143
144const LOCAL_MODEL_FRAMEWORKS: &[&str] = &[
145    "sglang",
146    "vllm",
147    "docker",
148    "ollama",
149    "llama-server",
150    "llama_cpp",
151];
152
153fn has_framework_in_path(path_var: &std::ffi::OsStr) -> bool {
154    LOCAL_MODEL_FRAMEWORKS
155        .iter()
156        .any(|bin| crate::cli::which_binary_in_path(bin, path_var).is_some())
157}
158
159fn has_existing_local_model_stack() -> bool {
160    let has_framework = std::env::var_os("PATH")
161        .map(|p| has_framework_in_path(&p))
162        .unwrap_or(false);
163    has_framework || has_ollama_models() || has_hf_model_cache()
164}
165
166fn run_quick_personality_setup(
167    workspace: &std::path::Path,
168) -> Result<(), Box<dyn std::error::Error>> {
169    let (DIM, BOLD, ACCENT, GREEN, YELLOW, RED, CYAN, RESET, MONO) = colors();
170    let (OK, ACTION, WARN, DETAIL, ERR) = icons();
171    use dialoguer::{Input, Select};
172
173    let name: String = Input::new()
174        .with_prompt("  Agent name")
175        .default("Roboticus".into())
176        .interact_text()?;
177
178    let formality_options = vec!["formal", "balanced", "casual"];
179    let formality_idx = Select::new()
180        .with_prompt("  Communication style")
181        .items(&formality_options)
182        .default(1)
183        .interact()?;
184
185    let proactive_options = vec![
186        "wait (only act when told)",
187        "suggest (flag opportunities, ask first)",
188        "initiative (act proactively)",
189    ];
190    let proactive_idx = Select::new()
191        .with_prompt("  Proactiveness level")
192        .items(&proactive_options)
193        .default(1)
194        .interact()?;
195    let proactive_val = match proactive_idx {
196        0 => "wait",
197        2 => "initiative",
198        _ => "suggest",
199    };
200
201    let domain_options = vec!["general", "developer", "business", "creative", "research"];
202    let domain_idx = Select::new()
203        .with_prompt("  Primary domain")
204        .items(&domain_options)
205        .default(0)
206        .interact()?;
207
208    let boundaries: String = Input::new()
209        .with_prompt(
210            "  Any hard boundaries? (topics/actions that are off-limits, or press Enter to skip)",
211        )
212        .allow_empty(true)
213        .interact_text()?;
214
215    let os_toml = roboticus_core::personality::generate_os_toml(
216        &name,
217        formality_options[formality_idx],
218        proactive_val,
219        domain_options[domain_idx],
220    );
221    let fw_toml = roboticus_core::personality::generate_firmware_toml(&boundaries);
222
223    std::fs::create_dir_all(workspace)?;
224    std::fs::write(workspace.join("OS.toml"), &os_toml)?;
225    std::fs::write(workspace.join("FIRMWARE.toml"), &fw_toml)?;
226
227    println!("  {OK} Personality configured for {BOLD}{name}{RESET} (OS.toml + FIRMWARE.toml)");
228
229    Ok(())
230}
231
232pub fn cmd_setup() -> Result<(), Box<dyn std::error::Error>> {
233    let (DIM, BOLD, ACCENT, GREEN, YELLOW, RED, CYAN, RESET, MONO) = colors();
234    let (OK, ACTION, WARN, DETAIL, ERR) = icons();
235    use dialoguer::{Confirm, Input, Select};
236
237    println!("\n  {BOLD}Roboticus Setup Wizard{RESET}\n");
238    println!("  This wizard will help you create an roboticus.toml configuration.\n");
239
240    // Prerequisites: Go + gosh (plugin scripting engine)
241    println!("  {BOLD}Checking prerequisites...{RESET}\n");
242    let go_bin = which_binary("go");
243    let has_go = go_bin.is_some();
244    let has_gosh = which_binary("gosh").is_some();
245
246    if !has_go {
247        println!("  {WARN} Go is not installed (required for the gosh plugin engine).");
248        println!(
249            "     Install from {CYAN}https://go.dev/dl/{RESET} or: {MONO}brew install go{RESET}"
250        );
251        println!();
252        let proceed = Confirm::new()
253            .with_prompt(
254                "  Continue without Go? (plugins won't work until Go + gosh are installed)",
255            )
256            .default(true)
257            .interact()?;
258        if !proceed {
259            println!("\n  Setup paused. Install Go, then re-run {BOLD}roboticus init{RESET}.\n");
260            return Ok(());
261        }
262    } else if !has_gosh {
263        println!("  {OK} Go found");
264        println!("  {WARN} gosh scripting engine not found.");
265        let install_now = Confirm::new()
266            .with_prompt("  Install gosh now via `go install`?")
267            .default(true)
268            .interact()?;
269        if install_now {
270            println!("  Installing gosh...");
271            let result = if let Some(go_path) = go_bin.as_deref() {
272                std::process::Command::new(go_path)
273                    .args(["install", "github.com/drewwalton19216801/gosh@latest"])
274                    .status()
275            } else {
276                Err(std::io::Error::new(
277                    std::io::ErrorKind::NotFound,
278                    "go binary not found",
279                ))
280            };
281            match result {
282                Ok(s) if s.success() => {
283                    println!("  {OK} gosh installed successfully");
284                }
285                _ => {
286                    println!("  {WARN} gosh installation failed. Install manually:");
287                    println!(
288                        "     {MONO}go install github.com/drewwalton19216801/gosh@latest{RESET}"
289                    );
290                }
291            }
292        } else {
293            println!(
294                "  Skipped. Install later: {MONO}go install github.com/drewwalton19216801/gosh@latest{RESET}"
295            );
296        }
297    } else {
298        println!("  {OK} Go found");
299        println!("  {OK} gosh scripting engine found");
300    }
301    println!();
302
303    // 1. Agent name
304    let agent_name: String = Input::new()
305        .with_prompt("  Agent name")
306        .default("Roboticus".into())
307        .interact_text()?;
308
309    let offer_apertus_onboarding = !has_existing_local_model_stack();
310    if !offer_apertus_onboarding {
311        println!(
312            "  {DETAIL} Existing local model framework/model cache detected; skipping automatic SGLang + Apertus recommendation."
313        );
314    }
315
316    // 2. LLM provider
317    let providers = if offer_apertus_onboarding {
318        vec![
319            "SGLang (local, recommended for Apertus)",
320            "vLLM (local)",
321            "Docker Model Runner (local)",
322            "Ollama (local)",
323            "OpenAI",
324            "Anthropic",
325            "Google AI",
326            "Moonshot",
327            "OpenRouter",
328            "llama-cpp (local)",
329        ]
330    } else {
331        vec![
332            "Ollama (local)",
333            "SGLang (local)",
334            "vLLM (local)",
335            "Docker Model Runner (local)",
336            "OpenAI",
337            "Anthropic",
338            "Google AI",
339            "Moonshot",
340            "OpenRouter",
341            "llama-cpp (local)",
342        ]
343    };
344    let provider_idx = Select::new()
345        .with_prompt("  Select LLM provider")
346        .items(&providers)
347        .default(if offer_apertus_onboarding { 0 } else { 4 })
348        .interact()?;
349
350    let (provider_prefix, needs_api_key) = match (offer_apertus_onboarding, provider_idx) {
351        (true, 0) => ("sglang", false),
352        (true, 1) => ("vllm", false),
353        (true, 2) => ("docker-model-runner", false),
354        (true, 3) => ("ollama", false),
355        (true, 4) => ("openai", true),
356        (true, 5) => ("anthropic", true),
357        (true, 6) => ("google", true),
358        (true, 7) => ("moonshot", true),
359        (true, 8) => ("openrouter", true),
360        (true, 9) => ("llama-cpp", false),
361        (false, 0) => ("ollama", false),
362        (false, 1) => ("sglang", false),
363        (false, 2) => ("vllm", false),
364        (false, 3) => ("docker-model-runner", false),
365        (false, 4) => ("openai", true),
366        (false, 5) => ("anthropic", true),
367        (false, 6) => ("google", true),
368        (false, 7) => ("moonshot", true),
369        (false, 8) => ("openrouter", true),
370        (false, 9) => ("llama-cpp", false),
371        _ => ("openai", true),
372    };
373
374    // 3. API key
375    let api_key = if needs_api_key {
376        let key: String = Input::new()
377            .with_prompt("  API key (or press Enter to set later)")
378            .allow_empty(true)
379            .interact_text()?;
380        if key.is_empty() { None } else { Some(key) }
381    } else {
382        None
383    };
384
385    // 4. Model selection
386    let ram_gb = detect_system_ram_gb();
387    let model = match provider_prefix {
388        "sglang" | "vllm" | "docker-model-runner" | "ollama" => {
389            if offer_apertus_onboarding {
390                if let Some(ram) = ram_gb {
391                    println!("  {DETAIL} Detected system RAM: {ram} GB");
392                } else {
393                    println!(
394                        "  {WARN} Could not detect system RAM. Only 8B Apertus is recommended by default."
395                    );
396                }
397
398                match provider_prefix {
399                    "sglang" if which_binary("sglang").is_none() => {
400                        println!("  {WARN} sglang binary not found.");
401                        let install_now = Confirm::new()
402                            .with_prompt("  Install SGLang now via pip? (recommended for Apertus)")
403                            .default(true)
404                            .interact()?;
405                        if install_now {
406                            let py_bin = which_binary("python3")
407                                .or_else(|| which_binary("python"))
408                                .unwrap_or_else(|| "python3".into());
409                            let status = std::process::Command::new(py_bin)
410                                .args(["-m", "pip", "install", "--user", "sglang[all]"])
411                                .status();
412                            if status.as_ref().map(|s| s.success()).unwrap_or(false) {
413                                println!("  {OK} SGLang install completed.");
414                            } else {
415                                println!(
416                                    "  {WARN} SGLang install failed. You can install it later and keep this model selection."
417                                );
418                            }
419                        }
420                    }
421                    "vllm" if which_binary("vllm").is_none() => {
422                        println!("  {WARN} vllm command not found.");
423                        let install_now = Confirm::new()
424                            .with_prompt("  Install vLLM now via pip?")
425                            .default(false)
426                            .interact()?;
427                        if install_now {
428                            let py_bin = which_binary("python3")
429                                .or_else(|| which_binary("python"))
430                                .unwrap_or_else(|| "python3".into());
431                            let status = std::process::Command::new(py_bin)
432                                .args(["-m", "pip", "install", "--user", "vllm"])
433                                .status();
434                            if status.as_ref().map(|s| s.success()).unwrap_or(false) {
435                                println!("  {OK} vLLM install completed.");
436                            } else {
437                                println!(
438                                    "  {WARN} vLLM install failed. You can install it later and keep this model selection."
439                                );
440                            }
441                        }
442                    }
443                    "docker-model-runner" if which_binary("docker").is_none() => {
444                        println!("  {WARN} Docker not found. Docker Model Runner requires Docker.");
445                    }
446                    "ollama" if which_binary("ollama").is_none() => {
447                        println!(
448                            "  {WARN} Ollama not found. Install from https://ollama.ai to run local models."
449                        );
450                    }
451                    _ => {}
452                }
453
454                let model_options = aperture_options_for_provider(provider_prefix, ram_gb);
455                let model_idx = Select::new()
456                    .with_prompt("  Select Apertus model")
457                    .items(&model_options)
458                    .default(0)
459                    .interact()?;
460                model_options[model_idx].clone()
461            } else {
462                let default_model = match provider_prefix {
463                    "ollama" => "ollama/qwen3:8b",
464                    "sglang" => "sglang/default",
465                    "vllm" => "vllm/default",
466                    "docker-model-runner" => "docker-model-runner/default",
467                    _ => "ollama/qwen3:8b",
468                };
469                Input::new()
470                    .with_prompt("  Model")
471                    .default(default_model.into())
472                    .interact_text()?
473            }
474        }
475        _ => {
476            let default_model = match provider_prefix {
477                "openai" => "openai/gpt-4o",
478                "anthropic" => "anthropic/claude-sonnet-4-20250514",
479                "google" => "google/gemini-3.1-pro-preview",
480                "moonshot" => "moonshot/kimi-k2.5",
481                "openrouter" => "openrouter/google/gemini-3.1-pro-preview",
482                "llama-cpp" => "llama-cpp/default",
483                _ => "sglang/apertus-8b-instruct:latest",
484            };
485            Input::new()
486                .with_prompt("  Model")
487                .default(default_model.into())
488                .interact_text()?
489        }
490    };
491
492    // 5. Server port
493    let port: String = Input::new()
494        .with_prompt("  Server port")
495        .default("18789".into())
496        .interact_text()?;
497    let port_num: u16 = port.parse().unwrap_or(18789);
498
499    // 6. Channels
500    let enable_telegram = Confirm::new()
501        .with_prompt("  Enable Telegram channel?")
502        .default(false)
503        .interact()?;
504
505    let (telegram_token, telegram_chat_ids) = if enable_telegram {
506        let token: String = Input::new()
507            .with_prompt("  Telegram bot token")
508            .interact_text()?;
509        println!("  Tip: to find your Telegram chat ID, message @userinfobot on Telegram.");
510        let chat_ids_raw: String = Input::new()
511            .with_prompt("  Allowed Telegram chat IDs (comma-separated, or empty to allow all)")
512            .default(String::new())
513            .allow_empty(true)
514            .interact_text()?;
515        let chat_ids: Vec<String> = chat_ids_raw
516            .split(',')
517            .map(|s| s.trim().to_string())
518            .filter(|s| !s.is_empty())
519            .collect();
520        (Some(token), chat_ids)
521    } else {
522        (None, Vec::new())
523    };
524
525    let enable_discord = Confirm::new()
526        .with_prompt("  Enable Discord channel?")
527        .default(false)
528        .interact()?;
529
530    let discord_token = if enable_discord {
531        let token: String = Input::new()
532            .with_prompt("  Discord bot token")
533            .interact_text()?;
534        Some(token)
535    } else {
536        None
537    };
538
539    // 7. Workspace directory
540    let home = roboticus_core::home_dir();
541    let home = home.to_string_lossy();
542    let default_workspace = format!("{home}/.roboticus/workspace");
543    let workspace: String = Input::new()
544        .with_prompt("  Workspace directory")
545        .default(default_workspace)
546        .interact_text()?;
547
548    // 8. Database path
549    let default_db = format!("{home}/.roboticus/state.db");
550    let db_path: String = Input::new()
551        .with_prompt("  Database path")
552        .default(default_db)
553        .interact_text()?;
554
555    // Generate config
556    let mut config = String::new();
557    config.push_str("# Roboticus Configuration (generated by onboard wizard)\n\n");
558    config.push_str("[agent]\n");
559    config.push_str(&format!("name = \"{agent_name}\"\n"));
560    config.push_str(&format!(
561        "id = \"{}\"\n",
562        agent_name.to_lowercase().replace(' ', "-")
563    ));
564    config.push_str(&format!("workspace = \"{}\"\n", toml_safe_path(&workspace)));
565    config.push_str("log_level = \"info\"\n\n");
566
567    let server_api_key = roboticus_core::config_utils::generate_server_api_key();
568    config.push_str("[server]\n");
569    config.push_str(&format!("port = {port_num}\n"));
570    config.push_str("bind = \"localhost\"\n");
571    config.push_str(&format!("api_key = \"{server_api_key}\"\n\n"));
572
573    config.push_str("[database]\n");
574    config.push_str(&format!("path = \"{}\"\n\n", toml_safe_path(&db_path)));
575
576    config.push_str("[models]\n");
577    config.push_str(&format!("primary = \"{model}\"\n"));
578    config.push_str("fallbacks = []\n\n");
579
580    config.push_str("[models.routing]\n");
581    config.push_str("mode = \"metascore\"\n");
582    config.push_str("confidence_threshold = 0.9\n");
583    config.push_str("local_first = true\n\n");
584
585    config.push_str(
586        "# Bundled provider defaults (sglang, vllm, docker-model-runner, ollama, openai, anthropic, google, openrouter)\n",
587    );
588    config.push_str("# are auto-merged. Override or add new providers here.\n");
589    if api_key.is_some() {
590        config.push_str(&format!(
591            "# Set the API key via env: {}_API_KEY\n\n",
592            provider_prefix.to_uppercase()
593        ));
594    } else {
595        config.push('\n');
596    }
597
598    config.push_str("[memory]\n");
599    config.push_str("working_budget_pct = 30.0\n");
600    config.push_str("episodic_budget_pct = 25.0\n");
601    config.push_str("semantic_budget_pct = 20.0\n");
602    config.push_str("procedural_budget_pct = 15.0\n");
603    config.push_str("relationship_budget_pct = 10.0\n\n");
604
605    config.push_str("[treasury]\n");
606    config.push_str("per_payment_cap = 100.0\n");
607    config.push_str("hourly_transfer_limit = 500.0\n");
608    config.push_str("daily_transfer_limit = 2000.0\n");
609    config.push_str("minimum_reserve = 5.0\n");
610    config.push_str("daily_inference_budget = 50.0\n\n");
611
612    if let Some(ref token) = telegram_token {
613        config.push_str("[channels.telegram]\n");
614        config.push_str(&format!("token = \"{token}\"\n"));
615        if telegram_chat_ids.is_empty() {
616            config.push_str("# allowed_chat_ids = []  # Tip: message @userinfobot on Telegram to find your chat ID\n\n");
617        } else {
618            config.push_str(&format!(
619                "allowed_chat_ids = [{}]\n\n",
620                telegram_chat_ids
621                    .iter()
622                    .map(|id| id.to_string())
623                    .collect::<Vec<_>>()
624                    .join(", ")
625            ));
626        }
627    }
628
629    if let Some(ref token) = discord_token {
630        config.push_str("[channels.discord]\n");
631        config.push_str(&format!("token = \"{token}\"\n\n"));
632    }
633
634    config.push_str("[skills]\n");
635    config.push_str(&format!(
636        "skills_dir = \"{}\"\n\n",
637        toml_safe_path(&format!("{home}/.roboticus/skills"))
638    ));
639
640    config.push_str("[a2a]\n");
641    config.push_str("enabled = true\n");
642
643    // Write config
644    let config_path = "roboticus.toml";
645    let is_first_install = !std::path::Path::new(config_path).exists();
646    if !is_first_install {
647        let overwrite = Confirm::new()
648            .with_prompt("  roboticus.toml already exists. Overwrite?")
649            .default(false)
650            .interact()?;
651        if !overwrite {
652            println!("\n  Aborted. Existing config preserved.\n");
653            return Ok(());
654        }
655    }
656
657    std::fs::write(config_path, &config)?;
658    println!("\n  {OK} Configuration written to {config_path}");
659    println!(
660        "  {DETAIL} A random {BOLD}server.api_key{RESET} was generated for REST / MCP clients (see [server] in the file)."
661    );
662
663    // Create workspace dir
664    let ws_path = std::path::Path::new(&workspace);
665    if !ws_path.exists() {
666        std::fs::create_dir_all(ws_path)?;
667        println!("  {OK} Created workspace: {workspace}");
668    }
669
670    // Create skills dir with starter skills
671    let skills_path = format!("{home}/.roboticus/skills");
672    let sp = std::path::Path::new(&skills_path);
673    if !sp.exists() {
674        std::fs::create_dir_all(sp)?;
675    }
676    let skills_written = write_starter_skills(sp)?;
677    if skills_written > 0 {
678        println!("  {ACTION} Created {skills_written} starter skills");
679    } else {
680        println!("  {OK} Skills directory ready");
681    }
682
683    // Personality setup
684    println!("\n  {BOLD}Personality Setup{RESET}\n");
685    let personality_options = vec![
686        "Keep Roboticus (recommended default)",
687        "Quick setup (5 questions)",
688        "Full interview (guided conversation with your agent)",
689    ];
690    let personality_idx = Select::new()
691        .with_prompt("  How would you like to configure your agent's personality?")
692        .items(&personality_options)
693        .default(0)
694        .interact()?;
695
696    match personality_idx {
697        0 => {
698            roboticus_core::personality::write_defaults(ws_path)?;
699            println!("  {OK} Roboticus personality loaded (OS.toml + FIRMWARE.toml)");
700        }
701        1 => {
702            run_quick_personality_setup(ws_path)?;
703        }
704        2 => {
705            let basic_name: String = Input::new()
706                .with_prompt("  Agent name")
707                .default(agent_name.clone())
708                .interact_text()?;
709            let domains = vec!["general", "developer", "business", "creative", "research"];
710            let domain_idx = Select::new()
711                .with_prompt("  Primary domain")
712                .items(&domains)
713                .default(0)
714                .interact()?;
715
716            let starter_os = roboticus_core::personality::generate_os_toml(
717                &basic_name,
718                "balanced",
719                "suggest",
720                domains[domain_idx],
721            );
722            std::fs::write(ws_path.join("OS.toml"), &starter_os)?;
723            roboticus_core::personality::write_defaults(ws_path)?;
724
725            println!();
726            println!("  {OK} Starter personality written.");
727            println!("  {DETAIL} Start your agent:  {BOLD}roboticus serve{RESET}");
728            println!("  {DETAIL} Then send it:      {BOLD}/interview{RESET}");
729            println!("  {DETAIL} The agent will walk you through a deep personality interview.");
730        }
731        _ => {}
732    }
733
734    // On first install, explicitly ask whether to run the interview flow.
735    if is_first_install && personality_idx != 2 {
736        let do_interview = Confirm::new()
737            .with_prompt("  Run the guided personality interview now? (recommended)")
738            .default(true)
739            .interact()?;
740        if do_interview {
741            println!();
742            println!("  {DETAIL} Start your agent:  {BOLD}roboticus serve{RESET}");
743            println!("  {DETAIL} Then send it:      {BOLD}/interview{RESET}");
744            println!("  {DETAIL} The agent will walk you through a deep personality interview.");
745        }
746    }
747
748    println!();
749    println!("  {OK} Setup complete! Run {BOLD}roboticus serve{RESET} to start.");
750    println!();
751
752    Ok(())
753}
754
755#[cfg(test)]
756mod tests {
757    use super::*;
758    use crate::test_support::EnvGuard;
759
760    #[test]
761    fn write_starter_skills_is_idempotent() {
762        let dir = tempfile::tempdir().unwrap();
763        let first = write_starter_skills(dir.path()).unwrap();
764        assert_eq!(first, STARTER_SKILLS.len());
765
766        let second = write_starter_skills(dir.path()).unwrap();
767        assert_eq!(second, 0);
768    }
769
770    #[test]
771    fn aperture_options_include_70b_only_for_high_ram() {
772        let low = aperture_options_for_provider("sglang", Some(32));
773        assert!(low.iter().any(|m| m.ends_with(APERTUS_8B_SUFFIX)));
774        assert!(!low.iter().any(|m| m.ends_with(APERTUS_70B_SUFFIX)));
775
776        let high = aperture_options_for_provider("sglang", Some(128));
777        assert!(high.iter().any(|m| m.ends_with(APERTUS_8B_SUFFIX)));
778        assert!(high.iter().any(|m| m.ends_with(APERTUS_70B_SUFFIX)));
779    }
780
781    #[test]
782    fn starter_skills_contain_expected_frontmatter() {
783        for (name, content) in STARTER_SKILLS {
784            assert!(name.ends_with(".md"));
785            assert!(content.starts_with("---"));
786            assert!(content.contains("name:"));
787            assert!(content.contains("description:"));
788            assert!(content.contains("triggers:"));
789        }
790    }
791
792    #[serial_test::serial]
793    #[test]
794    fn has_hf_model_cache_detects_models_directory() {
795        let dir = tempfile::tempdir().unwrap();
796        let hf_home = dir.path().join(".cache").join("huggingface");
797        let hub = hf_home.join("hub");
798        std::fs::create_dir_all(&hub).unwrap();
799        std::fs::create_dir_all(hub.join("models--test--foo")).unwrap();
800        let _guard = EnvGuard::set("HF_HOME", hf_home.to_str().unwrap());
801        assert!(has_hf_model_cache());
802    }
803
804    #[serial_test::serial]
805    #[test]
806    fn has_hf_model_cache_false_when_unset_or_empty() {
807        let dir = tempfile::tempdir().unwrap();
808        let hf_home = dir.path().join("empty_hf");
809        std::fs::create_dir_all(&hf_home).unwrap();
810        let _guard = EnvGuard::set("HF_HOME", hf_home.to_str().unwrap());
811        assert!(!has_hf_model_cache());
812    }
813
814    #[test]
815    fn has_existing_local_model_stack_false_with_no_tools_or_cache() {
816        let empty_path = tempfile::tempdir().unwrap();
817        let path_var = std::ffi::OsString::from(empty_path.path().to_str().unwrap());
818        assert!(!has_framework_in_path(&path_var));
819        // HF cache with no models-– dir returns false.
820        let hf_home = tempfile::tempdir().unwrap();
821        assert!(!hf_home.path().join("hub").exists());
822    }
823
824    #[test]
825    fn detect_system_ram_gb_returns_reasonable_value_when_present() {
826        let ram = detect_system_ram_gb();
827        if let Some(v) = ram {
828            assert!(v > 0);
829        }
830    }
831}