1use anyhow::{Context, Result};
2use clap::{CommandFactory, Parser, Subcommand};
3
4use crate::bootstrap;
5use crate::fleet;
6use crate::logging::{self, LogFormat};
7use crate::template_cmd;
8use crate::ui;
9use crate::upgrade;
10
11use mvm_runtime::config;
12use mvm_runtime::shell;
13use mvm_runtime::vm::{firecracker, image, lima, microvm};
14
15#[derive(Parser)]
16#[command(name = "mvm", version, about = "Firecracker microVM development tool")]
17struct Cli {
18 #[arg(long, global = true)]
20 log_format: Option<String>,
21
22 #[arg(long, global = true)]
24 fc_version: Option<String>,
25
26 #[command(subcommand)]
27 command: Commands,
28}
29
30#[derive(Subcommand)]
31enum Commands {
32 Bootstrap {
34 #[arg(long)]
36 production: bool,
37 },
38 Setup {
40 #[arg(long)]
42 recreate: bool,
43 #[arg(long)]
45 force: bool,
46 #[arg(long, default_value = "8")]
48 lima_cpus: u32,
49 #[arg(long, default_value = "16")]
51 lima_mem: u32,
52 },
53 Dev {
55 #[arg(long, default_value = "8")]
57 lima_cpus: u32,
58 #[arg(long, default_value = "16")]
60 lima_mem: u32,
61 #[arg(long)]
63 project: Option<String>,
64 },
65 Start {
67 image: Option<String>,
69 #[arg(long)]
71 config: Option<String>,
72 #[arg(long, short = 'v')]
74 volume: Vec<String>,
75 #[arg(long, short = 'c')]
77 cpus: Option<u32>,
78 #[arg(long, short = 'm')]
80 memory: Option<u32>,
81 },
82 Stop {
84 name: Option<String>,
86 #[arg(long)]
88 all: bool,
89 },
90 Ssh,
92 SshConfig,
94 Shell {
96 #[arg(long)]
98 project: Option<String>,
99 #[arg(long, default_value = "8")]
101 lima_cpus: u32,
102 #[arg(long, default_value = "16")]
104 lima_mem: u32,
105 },
106 Sync {
108 #[arg(long)]
110 debug: bool,
111 #[arg(long)]
113 skip_deps: bool,
114 #[arg(long)]
116 force: bool,
117 },
118 Logs {
120 name: String,
122 #[arg(long, short = 'f')]
124 follow: bool,
125 #[arg(long, short = 'n', default_value = "50")]
127 lines: u32,
128 #[arg(long)]
130 hypervisor: bool,
131 },
132 Status,
134 Destroy {
136 #[arg(long, short = 'y')]
138 yes: bool,
139 },
140 Upgrade {
142 #[arg(long)]
144 check: bool,
145 #[arg(long)]
147 force: bool,
148 },
149 Doctor {
151 #[arg(long)]
153 json: bool,
154 },
155 Release {
157 #[arg(long)]
159 dry_run: bool,
160 #[arg(long)]
162 guard_only: bool,
163 },
164 Template {
166 #[command(subcommand)]
167 action: TemplateCmd,
168 },
169 Build {
171 #[arg(default_value = ".")]
173 path: String,
174 #[arg(long, short = 'o')]
176 output: Option<String>,
177 #[arg(long)]
179 flake: Option<String>,
180 #[arg(long)]
182 profile: Option<String>,
183 #[arg(long)]
185 watch: bool,
186 },
187 Run {
189 #[arg(long)]
191 flake: String,
192 #[arg(long)]
194 name: Option<String>,
195 #[arg(long)]
197 profile: Option<String>,
198 #[arg(long, default_value = "2")]
200 cpus: Option<u32>,
201 #[arg(long, default_value = "1024")]
203 memory: Option<u32>,
204 #[arg(long)]
206 config: Option<String>,
207 #[arg(long, short = 'v')]
209 volume: Vec<String>,
210 },
211 Up {
213 name: Option<String>,
215 #[arg(long, short = 'f')]
217 config: Option<String>,
218 #[arg(long)]
220 flake: Option<String>,
221 #[arg(long)]
223 profile: Option<String>,
224 #[arg(long)]
226 cpus: Option<u32>,
227 #[arg(long)]
229 memory: Option<u32>,
230 },
231 Down {
233 name: Option<String>,
235 #[arg(long, short = 'f')]
237 config: Option<String>,
238 },
239 Vm {
241 #[command(subcommand)]
242 action: VmCmd,
243 },
244 Completions {
246 #[arg(value_enum)]
248 shell: clap_complete::Shell,
249 },
250}
251
252#[derive(Subcommand)]
253enum TemplateCmd {
254 Create {
256 name: String,
257 #[arg(long, default_value = ".")]
258 flake: String,
259 #[arg(long, default_value = "default")]
260 profile: String,
261 #[arg(long, default_value = "worker")]
262 role: String,
263 #[arg(long, default_value = "2")]
264 cpus: u8,
265 #[arg(long, default_value = "1024")]
266 mem: u32,
267 #[arg(long, default_value = "0")]
268 data_disk: u32,
269 },
270 CreateMulti {
272 base: String,
273 #[arg(long, default_value = ".")]
274 flake: String,
275 #[arg(long, default_value = "default")]
276 profile: String,
277 #[arg(long)]
279 roles: String,
280 #[arg(long, default_value = "2")]
281 cpus: u8,
282 #[arg(long, default_value = "1024")]
283 mem: u32,
284 #[arg(long, default_value = "0")]
285 data_disk: u32,
286 },
287 Build {
289 name: String,
290 #[arg(long)]
291 force: bool,
292 #[arg(long)]
294 config: Option<String>,
295 },
296 Push {
298 name: String,
299 #[arg(long)]
301 revision: Option<String>,
302 },
303 Pull {
305 name: String,
306 #[arg(long)]
308 revision: Option<String>,
309 },
310 Verify {
312 name: String,
313 #[arg(long)]
315 revision: Option<String>,
316 },
317 List {
319 #[arg(long)]
320 json: bool,
321 },
322 Info {
324 name: String,
325 #[arg(long)]
326 json: bool,
327 },
328 Delete {
330 name: String,
331 #[arg(long)]
332 force: bool,
333 },
334 Init {
336 name: String,
338 #[arg(long)]
340 local: bool,
341 #[arg(long)]
343 vm: bool,
344 #[arg(long, default_value = ".")]
346 dir: String,
347 },
348}
349
350#[derive(Subcommand)]
351enum VmCmd {
352 Ping {
354 name: Option<String>,
356 },
357 Status {
359 name: Option<String>,
361 #[arg(long)]
363 json: bool,
364 },
365}
366
367pub fn run() -> Result<()> {
372 let cli = Cli::parse();
373
374 if let Some(ref version) = cli.fc_version {
377 unsafe { std::env::set_var("MVM_FC_VERSION", version) };
378 }
379
380 let log_format = match cli.log_format.as_deref() {
382 Some("json") => LogFormat::Json,
383 Some("human") => LogFormat::Human,
384 Some(other) => {
385 eprintln!(
386 "Unknown --log-format '{}', using 'human'. Valid: human, json",
387 other
388 );
389 LogFormat::Human
390 }
391 None => LogFormat::Human,
392 };
393 logging::init(log_format);
394
395 let result = match cli.command {
396 Commands::Bootstrap { production } => cmd_bootstrap(production),
397 Commands::Setup {
398 recreate,
399 force,
400 lima_cpus,
401 lima_mem,
402 } => cmd_setup(recreate, force, lima_cpus, lima_mem),
403 Commands::Dev {
404 lima_cpus,
405 lima_mem,
406 project,
407 } => cmd_dev(lima_cpus, lima_mem, project.as_deref()),
408 Commands::Start {
409 image,
410 config,
411 volume,
412 cpus,
413 memory,
414 } => match image {
415 Some(ref elf) => cmd_start_image(elf, config.as_deref(), &volume, cpus, memory),
416 None => cmd_start(),
417 },
418 Commands::Stop { name, all } => cmd_stop(name.as_deref(), all),
419 Commands::Ssh => cmd_ssh(),
420 Commands::SshConfig => cmd_ssh_config(),
421 Commands::Shell {
422 project,
423 lima_cpus,
424 lima_mem,
425 } => cmd_shell(project.as_deref(), lima_cpus, lima_mem),
426 Commands::Sync {
427 debug,
428 skip_deps,
429 force,
430 } => cmd_sync(debug, skip_deps, force),
431 Commands::Logs {
432 name,
433 follow,
434 lines,
435 hypervisor,
436 } => cmd_logs(&name, follow, lines, hypervisor),
437 Commands::Status => cmd_status(),
438 Commands::Destroy { yes } => cmd_destroy(yes),
439 Commands::Upgrade { check, force } => cmd_upgrade(check, force),
440 Commands::Doctor { json } => cmd_doctor(json),
441 Commands::Release {
442 dry_run,
443 guard_only,
444 } => cmd_release(dry_run, guard_only),
445 Commands::Build {
446 path,
447 output,
448 flake,
449 profile,
450 watch,
451 } => {
452 if let Some(flake_ref) = flake {
453 cmd_build_flake(&flake_ref, profile.as_deref(), watch)
454 } else {
455 cmd_build(&path, output.as_deref())
456 }
457 }
458 Commands::Run {
459 flake,
460 name,
461 profile,
462 cpus,
463 memory,
464 config,
465 volume,
466 } => cmd_run(
467 &flake,
468 name.as_deref(),
469 profile.as_deref(),
470 cpus,
471 memory,
472 config.as_deref(),
473 &volume,
474 ),
475 Commands::Up {
476 name,
477 config,
478 flake,
479 profile,
480 cpus,
481 memory,
482 } => cmd_up(
483 name.as_deref(),
484 config.as_deref(),
485 flake.as_deref(),
486 profile.as_deref(),
487 cpus,
488 memory,
489 ),
490 Commands::Down { name, config } => cmd_down(name.as_deref(), config.as_deref()),
491 Commands::Completions { shell } => cmd_completions(shell),
492 Commands::Template { action } => cmd_template(action),
493 Commands::Vm { action } => cmd_vm(action),
494 };
495
496 with_hints(result)
497}
498
499fn cmd_bootstrap(production: bool) -> Result<()> {
504 ui::info("Bootstrapping full environment...\n");
505
506 if !production {
507 bootstrap::check_package_manager()?;
508 }
509
510 ui::info("\nInstalling prerequisites...");
511 bootstrap::ensure_lima()?;
512
513 run_setup_steps(false, 8, 16)?;
515
516 ui::success("\nBootstrap complete! Run 'mvm dev' to enter the development environment.");
517 Ok(())
518}
519
520fn cmd_setup(recreate: bool, force: bool, lima_cpus: u32, lima_mem: u32) -> Result<()> {
521 if recreate {
522 recreate_rootfs()?;
523 ui::success("\nRootfs recreated! Run 'mvm start' or 'mvm dev' to launch.");
524 return Ok(());
525 }
526
527 if !bootstrap::is_lima_required() {
528 run_setup_steps(force, lima_cpus, lima_mem)?;
530 ui::success("\nSetup complete! Run 'mvm start' to launch a microVM.");
531 return Ok(());
532 }
533
534 which::which("limactl").map_err(|_| {
535 anyhow::anyhow!(
536 "'limactl' not found. Install Lima first: brew install lima\n\
537 Or run 'mvm bootstrap' for full automatic setup."
538 )
539 })?;
540
541 run_setup_steps(force, lima_cpus, lima_mem)?;
542
543 ui::success("\nSetup complete! Run 'mvm start' to launch a microVM.");
544 Ok(())
545}
546
547fn recreate_rootfs() -> Result<()> {
549 if bootstrap::is_lima_required() {
550 lima::require_running()?;
551 }
552
553 if firecracker::is_running()? {
555 ui::info("Stopping running microVM...");
556 microvm::stop()?;
557 }
558
559 ui::info("Removing existing rootfs...");
560 shell::run_in_vm(&format!(
561 "rm -f {dir}/ubuntu-*.ext4",
562 dir = config::MICROVM_DIR,
563 ))?;
564
565 ui::info("Rebuilding rootfs...");
566 firecracker::prepare_rootfs()?;
567 firecracker::write_state()?;
568
569 Ok(())
570}
571
572fn cmd_dev(lima_cpus: u32, lima_mem: u32, project: Option<&str>) -> Result<()> {
573 ui::info("Launching development environment...\n");
574
575 if bootstrap::is_lima_required() {
576 if which::which("limactl").is_err() {
578 ui::info("Lima not found. Running bootstrap...\n");
579 cmd_bootstrap(false)?;
580 } else {
581 let lima_status = lima::get_status()?;
582 match lima_status {
583 lima::LimaStatus::NotFound => {
584 ui::info("Lima VM not found. Running setup...\n");
585 run_setup_steps(false, lima_cpus, lima_mem)?;
586 }
587 lima::LimaStatus::Stopped => {
588 ui::info("Lima VM is stopped. Starting...");
589 lima::start()?;
590 }
591 lima::LimaStatus::Running => {}
592 }
593 }
594 }
595
596 if !firecracker::is_installed()? {
598 ui::info("Firecracker not installed. Running setup steps...\n");
599 firecracker::install()?;
600 firecracker::download_assets()?;
601 firecracker::prepare_rootfs()?;
602 firecracker::write_state()?;
603 }
604
605 cmd_shell(project, lima_cpus, lima_mem)
607}
608
609fn run_setup_steps(force: bool, lima_cpus: u32, lima_mem: u32) -> Result<()> {
610 if bootstrap::is_lima_required() {
612 let lima_status = lima::get_status()?;
613 if !force && matches!(lima_status, lima::LimaStatus::Running) {
614 ui::step(1, 4, "Lima VM already running — skipping.");
615 } else {
616 let opts = config::LimaRenderOptions {
617 cpus: Some(lima_cpus),
618 memory_gib: Some(lima_mem),
619 ..Default::default()
620 };
621 let lima_yaml = config::render_lima_yaml_with(&opts)?;
622 ui::info(&format!(
623 "Lima VM resources: {} vCPUs, {} GiB memory",
624 lima_cpus, lima_mem,
625 ));
626 ui::step(1, 4, "Setting up Lima VM...");
627 lima::ensure_running(lima_yaml.path())?;
628 }
629 } else {
630 ui::step(1, 4, "Native Linux detected — skipping Lima VM setup.");
631 }
632
633 if !force && firecracker::is_installed()? {
635 ui::step(2, 4, "Firecracker already installed — skipping.");
636 } else {
637 ui::step(2, 4, "Installing Firecracker...");
638 firecracker::install()?;
639 }
640
641 ui::step(3, 4, "Downloading kernel and rootfs...");
643 firecracker::download_assets()?;
644
645 if !firecracker::validate_rootfs_squashfs()? {
646 ui::warn("Downloaded rootfs is corrupted. Re-downloading...");
647 shell::run_in_vm(&format!(
648 "rm -f {dir}/ubuntu-*.squashfs.upstream",
649 dir = config::MICROVM_DIR,
650 ))?;
651 firecracker::download_assets()?;
652 }
653
654 ui::step(4, 4, "Preparing root filesystem...");
656 firecracker::prepare_rootfs()?;
657
658 firecracker::write_state()?;
659 Ok(())
660}
661
662fn cmd_start() -> Result<()> {
663 microvm::start()
664}
665
666fn cmd_start_image(
667 elf_path: &str,
668 config_path: Option<&str>,
669 volumes: &[String],
670 cpus: Option<u32>,
671 memory: Option<u32>,
672) -> Result<()> {
673 let limactl_present = shell::run_host("which", &["limactl"])
675 .map(|o| o.status.success())
676 .unwrap_or(false);
677 if limactl_present {
678 lima::require_running()?;
679 } else {
680 ui::warn("limactl not found; assuming we're already inside the Lima VM and proceeding.");
681 }
682
683 let rt_config = match config_path {
684 Some(p) => image::parse_runtime_config(p)?,
685 None => image::RuntimeConfig::default(),
686 };
687
688 let mut elf_args = Vec::new();
689
690 let final_cpus = cpus.or(rt_config.cpus);
691 let final_memory = memory.or(rt_config.memory);
692 if let Some(c) = final_cpus {
693 elf_args.push("--cpus".to_string());
694 elf_args.push(c.to_string());
695 }
696 if let Some(m) = final_memory {
697 elf_args.push("--memory".to_string());
698 elf_args.push(m.to_string());
699 }
700
701 if !volumes.is_empty() {
702 for v in volumes {
703 elf_args.push("--volume".to_string());
704 elf_args.push(v.clone());
705 }
706 } else {
707 for v in &rt_config.volumes {
708 elf_args.push("--volume".to_string());
709 elf_args.push(format!("{}:{}:{}", v.host, v.guest, v.size));
710 }
711 }
712
713 let args_str = elf_args
714 .iter()
715 .map(|a| shell_escape(a))
716 .collect::<Vec<_>>()
717 .join(" ");
718
719 let cmd = if args_str.is_empty() {
720 elf_path.to_string()
721 } else {
722 format!("{} {}", elf_path, args_str)
723 };
724
725 ui::info(&format!("Starting image: {}", elf_path));
726 shell::replace_process("limactl", &["shell", config::VM_NAME, "bash", "-c", &cmd])
727}
728
729fn shell_escape(s: &str) -> String {
730 if s.contains(' ') || s.contains('\'') || s.contains('"') {
731 format!("'{}'", s.replace('\'', "'\\''"))
732 } else {
733 s.to_string()
734 }
735}
736
737fn cmd_stop(name: Option<&str>, all: bool) -> Result<()> {
738 match (name, all) {
739 (Some(n), _) => microvm::stop_vm(n),
740 (None, true) => microvm::stop_all_vms(),
741 (None, false) => {
742 let vms = microvm::list_vms().unwrap_or_default();
744 if !vms.is_empty() {
745 microvm::stop_all_vms()
746 } else {
747 microvm::stop()
748 }
749 }
750 }
751}
752
753fn cmd_ssh() -> Result<()> {
754 cmd_shell(None, 8, 16)
757}
758
759fn cmd_ssh_config() -> Result<()> {
760 let home_dir = std::env::var("HOME").unwrap_or_else(|_| "~".to_string());
761 let lima_ssh_config = format!("{}/.lima/{}/ssh.config", home_dir, config::VM_NAME);
762
763 let (hostname, port, user, identity) =
765 parse_lima_ssh_config(&lima_ssh_config).unwrap_or_else(|| {
766 (
767 "127.0.0.1".to_string(),
768 "# <port> # run 'mvm setup' first".to_string(),
769 std::env::var("USER").unwrap_or_else(|_| "lima".to_string()),
770 format!("{}/.lima/_config/user", home_dir),
771 )
772 });
773
774 println!(
775 r#"# mvm Lima VM — add to ~/.ssh/config
776Host mvm
777 HostName {hostname}
778 Port {port}
779 User {user}
780 IdentityFile {identity}
781 StrictHostKeyChecking no
782 UserKnownHostsFile /dev/null
783 LogLevel ERROR"#,
784 hostname = hostname,
785 port = port,
786 user = user,
787 identity = identity,
788 );
789 Ok(())
790}
791
792fn parse_lima_ssh_config(path: &str) -> Option<(String, String, String, String)> {
794 let content = std::fs::read_to_string(path).ok()?;
795 let mut hostname = None;
796 let mut port = None;
797 let mut user = None;
798 let mut identity = None;
799
800 for line in content.lines() {
801 let line = line.trim();
802 if let Some(val) = line.strip_prefix("Hostname ") {
803 hostname = Some(val.trim().to_string());
804 } else if let Some(val) = line.strip_prefix("Port ") {
805 port = Some(val.trim().to_string());
806 } else if let Some(val) = line.strip_prefix("User ") {
807 user = Some(val.trim().to_string());
808 } else if let Some(val) = line.strip_prefix("IdentityFile ") {
809 identity = Some(val.trim().trim_matches('"').to_string());
810 }
811 }
812
813 Some((hostname?, port?, user?, identity?))
814}
815
816fn cmd_shell(project: Option<&str>, _lima_cpus: u32, _lima_mem: u32) -> Result<()> {
817 lima::require_running()?;
818
819 let fc_ver =
821 shell::run_in_vm_stdout("firecracker --version 2>/dev/null | head -1").unwrap_or_default();
822 let nix_ver = shell::run_in_vm_stdout("nix --version 2>/dev/null").unwrap_or_default();
823
824 ui::info("mvm development shell");
825 ui::info(&format!(
826 " Firecracker: {}",
827 if fc_ver.trim().is_empty() {
828 "not installed"
829 } else {
830 fc_ver.trim()
831 }
832 ));
833 ui::info(&format!(
834 " Nix: {}",
835 if nix_ver.trim().is_empty() {
836 "not installed"
837 } else {
838 nix_ver.trim()
839 }
840 ));
841 let mvm_in_vm = shell::run_in_vm_stdout("test -f /usr/local/bin/mvm && echo yes || echo no")
842 .unwrap_or_default();
843 if mvm_in_vm.trim() == "yes" {
844 let mvm_ver =
845 shell::run_in_vm_stdout("/usr/local/bin/mvm --version 2>/dev/null").unwrap_or_default();
846 ui::info(&format!(
847 " mvm: {}",
848 if mvm_ver.trim().is_empty() {
849 "installed"
850 } else {
851 mvm_ver.trim()
852 }
853 ));
854 } else {
855 ui::warn(" mvm not installed in VM. Run 'mvm sync' to build and install it.");
856 }
857
858 ui::info(&format!(" Lima VM: {}\n", config::VM_NAME));
859
860 match project {
861 Some(path) => {
862 let cmd = format!("cd {} && exec bash -l", shell_escape(path));
863 shell::replace_process("limactl", &["shell", config::VM_NAME, "bash", "-c", &cmd])
864 }
865 None => shell::replace_process("limactl", &["shell", config::VM_NAME]),
866 }
867}
868
869fn sync_deps_script() -> String {
870 "dpkg -s build-essential binutils lld pkg-config libssl-dev >/dev/null 2>&1 || \
871 (sudo apt-get update -qq && \
872 sudo apt-get install -y -qq build-essential binutils lld pkg-config libssl-dev)"
873 .to_string()
874}
875
876fn sync_rustup_script() -> String {
877 "export PATH=\"$HOME/.cargo/bin:$PATH\"; \
878 if command -v rustup >/dev/null 2>&1; then \
879 rustup update stable --no-self-update 2>/dev/null || true; \
880 else \
881 curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable; \
882 fi && \
883 if [ -f \"$HOME/.cargo/env\" ]; then . \"$HOME/.cargo/env\"; fi && \
884 rustc --version"
885 .to_string()
886}
887
888fn sync_build_script(source_dir: &str, debug: bool, vm_arch: &str) -> String {
889 let release_flag = if debug { "" } else { " --release" };
890 let target_dir = format!("target/linux-{}", vm_arch);
891 format!(
892 "export PATH=\"$HOME/.cargo/bin:$PATH\" && \
893 if [ -f \"$HOME/.cargo/env\" ]; then . \"$HOME/.cargo/env\"; fi && \
894 cd '{}' && \
895 CARGO_TARGET_DIR='{}' cargo build{} --bin mvm",
896 source_dir.replace('\'', "'\\''"),
897 target_dir,
898 release_flag,
899 )
900}
901
902fn sync_install_script(source_dir: &str, debug: bool, vm_arch: &str) -> String {
903 let profile = if debug { "debug" } else { "release" };
904 let target_dir = format!("target/linux-{}", vm_arch);
905 format!(
906 "sudo install -m 0755 \
907 '{src}/{target}/{profile}/mvm' \
908 /usr/local/bin/",
909 src = source_dir.replace('\'', "'\\''"),
910 target = target_dir,
911 profile = profile,
912 )
913}
914
915fn cmd_sync(debug: bool, skip_deps: bool, force: bool) -> Result<()> {
916 if !bootstrap::is_lima_required() && !force {
917 ui::info("Native Linux detected. The host mvm binary is already Linux-native.");
918 ui::info("No sync needed — mvm is already available. Use --force to rebuild anyway.");
919 return Ok(());
920 }
921
922 let limactl_available = shell::run_host("which", &["limactl"])
923 .map(|o| o.status.success())
924 .unwrap_or(false);
925
926 if limactl_available {
927 lima::require_running()?;
928 } else if shell::inside_lima() {
929 ui::info("Running inside Lima guest; skipping limactl check.");
930 } else if bootstrap::is_lima_required() {
931 anyhow::bail!(
932 "Lima is required but 'limactl' is not available. Install Lima or run inside the Lima VM."
933 );
934 } else {
935 ui::warn("limactl not found; proceeding on native host.");
936 }
937
938 let vm_arch = shell::run_in_vm_stdout("uname -m")
939 .unwrap_or_else(|_| "unknown".to_string())
940 .trim()
941 .to_string();
942
943 let source_dir = std::env::current_dir()
944 .context("Failed to determine current directory")?
945 .to_string_lossy()
946 .to_string();
947
948 let profile_name = if debug { "debug" } else { "release" };
949 let total_steps: u32 = if skip_deps { 2 } else { 4 };
950 let mut step = 0u32;
951
952 if !force {
954 let desired_version = env!("CARGO_PKG_VERSION");
955 if let Ok(current) =
956 shell::run_in_vm_stdout("/usr/local/bin/mvm --version 2>/dev/null || true")
957 && current.contains(desired_version)
958 {
959 ui::success(&format!(
960 "mvm {} already installed inside Lima VM. Use --force to rebuild.",
961 desired_version
962 ));
963 return Ok(());
964 }
965 }
966
967 if !skip_deps {
968 step += 1;
969 ui::step(step, total_steps, "Ensuring build dependencies (apt)...");
970 shell::run_in_vm_visible(&sync_deps_script())?;
971
972 step += 1;
973 ui::step(step, total_steps, "Ensuring Rust toolchain...");
974 shell::run_in_vm_visible(&sync_rustup_script())?;
975 }
976
977 step += 1;
978 let build_msg = format!("Building mvm ({profile_name} profile)...");
979 ui::step(step, total_steps, &build_msg);
980 shell::run_in_vm_visible(&sync_build_script(&source_dir, debug, &vm_arch))?;
981
982 step += 1;
983 ui::step(
984 step,
985 total_steps,
986 "Installing binaries to /usr/local/bin/...",
987 );
988 shell::run_in_vm_visible(&sync_install_script(&source_dir, debug, &vm_arch))?;
989
990 let version = shell::run_in_vm_stdout("/usr/local/bin/mvm --version")
991 .unwrap_or_else(|_| "unknown".to_string());
992 ui::success(&format!("Sync complete! Installed: {}", version.trim()));
993 ui::info("The mvm binary is now available inside 'mvm shell'.");
994
995 Ok(())
996}
997
998fn cmd_logs(name: &str, follow: bool, lines: u32, hypervisor: bool) -> Result<()> {
999 microvm::logs(name, follow, lines, hypervisor)
1000}
1001
1002fn cmd_status() -> Result<()> {
1003 ui::status_header();
1004
1005 ui::status_line("Platform:", &mvm_core::platform::current().to_string());
1006
1007 if bootstrap::is_lima_required() {
1008 let lima_status = lima::get_status()?;
1009 match lima_status {
1010 lima::LimaStatus::NotFound => {
1011 ui::status_line("Lima VM:", "Not created (run 'mvm setup')");
1012 ui::status_line("Firecracker:", "-");
1013 ui::status_line("MicroVM:", "-");
1014 return Ok(());
1015 }
1016 lima::LimaStatus::Stopped => {
1017 ui::status_line("Lima VM:", "Stopped");
1018 ui::status_line("Firecracker:", "-");
1019 ui::status_line("MicroVM:", "-");
1020 return Ok(());
1021 }
1022 lima::LimaStatus::Running => {
1023 ui::status_line("Lima VM:", "Running");
1024 }
1025 }
1026 } else {
1027 ui::status_line("Lima VM:", "Not required (native KVM)");
1028 }
1029
1030 let nix_ver = shell::run_in_vm_stdout("nix --version 2>/dev/null").unwrap_or_default();
1032 let nix_display = nix_ver.trim();
1033 ui::status_line(
1034 "Nix:",
1035 if nix_display.is_empty() {
1036 "Not installed"
1037 } else {
1038 nix_display
1039 },
1040 );
1041
1042 if firecracker::is_running()? {
1043 ui::status_line("Firecracker:", "Running");
1044 } else {
1045 if firecracker::is_installed()? {
1046 let fc_ver = shell::run_in_vm_stdout("firecracker --version 2>/dev/null | head -1")
1047 .unwrap_or_default();
1048 let fc_display = fc_ver.trim();
1049 let status = if fc_display.is_empty() {
1050 "Installed, not running".to_string()
1051 } else {
1052 format!("{}, not running", fc_display)
1053 };
1054 ui::status_line("Firecracker:", &status);
1055 } else {
1056 ui::status_line("Firecracker:", "Not installed");
1057 }
1058 ui::status_line("MicroVM:", "Not running");
1059 return Ok(());
1060 }
1061
1062 let vms = microvm::list_vms().unwrap_or_default();
1064 if !vms.is_empty() {
1065 let abs_vms =
1067 shell::run_in_vm_stdout(&format!("echo {}", config::VMS_DIR)).unwrap_or_default();
1068 let vsock_check = shell::run_in_vm_stdout(&format!(
1069 "for d in {dir}/*/; do \
1070 name=$(basename \"$d\"); \
1071 [ -S \"$d/v.sock\" ] && echo \"$name:yes\" || echo \"$name:no\"; \
1072 done",
1073 dir = abs_vms,
1074 ))
1075 .unwrap_or_default();
1076 let vsock_map: std::collections::HashMap<&str, &str> = vsock_check
1077 .lines()
1078 .filter_map(|line| line.split_once(':'))
1079 .collect();
1080
1081 ui::status_line("MicroVMs:", &format!("{} running", vms.len()));
1082 println!();
1083 println!(
1084 " {:<16} {:<10} {:<16} {:<14} {:<8} STATUS",
1085 "NAME", "PROFILE", "GUEST IP", "REVISION", "VSOCK"
1086 );
1087 println!(" {}", "-".repeat(78));
1088 for vm in &vms {
1089 let name = vm.name.as_deref().unwrap_or("?");
1090 let profile = vm.profile.as_deref().unwrap_or("default");
1091 let ip = vm.guest_ip.as_deref().unwrap_or("?");
1092 let rev = vm
1093 .revision
1094 .as_deref()
1095 .map(|r| if r.len() > 10 { &r[..10] } else { r })
1096 .unwrap_or("?");
1097 let vsock = vsock_map.get(name).copied().unwrap_or("?");
1098 println!(
1099 " {:<16} {:<10} {:<16} {:<14} {:<8} Running",
1100 name, profile, ip, rev, vsock
1101 );
1102 }
1103 } else if let Some(info) = microvm::read_run_info()
1104 && info.mode == "flake"
1105 {
1106 let rev = info.revision.as_deref().unwrap_or("unknown");
1108 let ip = info.guest_ip.as_deref().unwrap_or(config::GUEST_IP);
1109 ui::status_line(
1110 "MicroVM:",
1111 &format!("Running — flake (revision {}, guest IP {})", rev, ip),
1112 );
1113 } else {
1114 ui::status_line(
1115 "MicroVM:",
1116 &format!("Running (guest IP {})", config::GUEST_IP),
1117 );
1118 }
1119
1120 Ok(())
1121}
1122
1123fn cmd_upgrade(check: bool, force: bool) -> Result<()> {
1124 upgrade::upgrade(check, force)
1125}
1126
1127fn cmd_doctor(json: bool) -> Result<()> {
1128 crate::doctor::run(json)
1129}
1130
1131fn with_hints(result: Result<()>) -> Result<()> {
1137 if let Err(ref e) = result {
1138 let msg = format!("{:#}", e);
1139 if msg.contains("limactl: command not found") || msg.contains("limactl: not found") {
1140 ui::warn("Hint: Install Lima with 'brew install lima' or run 'mvm bootstrap'.");
1141 } else if msg.contains("firecracker: command not found")
1142 || msg.contains("firecracker: not found")
1143 {
1144 ui::warn("Hint: Run 'mvm setup' to install Firecracker.");
1145 } else if msg.contains("/dev/kvm") {
1146 ui::warn(
1147 "Hint: Enable KVM/virtualization in your BIOS or VM settings.\n \
1148 On macOS, KVM is available inside the Lima VM.",
1149 );
1150 } else if msg.contains("Permission denied") && msg.contains(".mvm") {
1151 ui::warn("Hint: Check directory permissions on ~/.mvm (set MVM_DATA_DIR to override).");
1152 } else if msg.contains("nix: command not found") || msg.contains("nix: not found") {
1153 ui::warn("Hint: Nix is installed inside the Lima VM. Run 'mvm shell' first.");
1154 }
1155 }
1156 result
1157}
1158
1159const PUBLISH_CRATES: &[&str] = &[
1165 "mvm-core",
1166 "mvm-guest",
1167 "mvm-build",
1168 "mvm-runtime",
1169 "mvm-cli",
1170 "mvm",
1171];
1172
1173fn cmd_release(dry_run: bool, guard_only: bool) -> Result<()> {
1174 let workspace_root = find_workspace_root()?;
1175
1176 ui::info("Running deploy guard checks...\n");
1177
1178 let cargo_toml_path = workspace_root.join("Cargo.toml");
1180 let cargo_toml =
1181 std::fs::read_to_string(&cargo_toml_path).context("Failed to read workspace Cargo.toml")?;
1182
1183 let workspace_version = extract_workspace_version(&cargo_toml)?;
1184 ui::status_line("Workspace version:", &workspace_version);
1185
1186 let crates_dir = workspace_root.join("crates");
1188 check_no_hardcoded_versions(&crates_dir)?;
1189 ui::success("All crates use version.workspace = true");
1190
1191 check_inter_crate_versions(&workspace_root, &workspace_version)?;
1193 ui::success(&format!(
1194 "All inter-crate dependencies use version {}",
1195 workspace_version
1196 ));
1197
1198 let tag_name = format!("v{}", workspace_version);
1200 match check_git_tag(&tag_name) {
1201 Ok(()) => ui::success(&format!("HEAD is tagged with {}", tag_name)),
1202 Err(e) => ui::warn(&format!("Tag check: {} (ok for pre-release)", e)),
1203 }
1204
1205 if guard_only {
1206 ui::success("\nDeploy guard checks passed.");
1207 return Ok(());
1208 }
1209
1210 if !dry_run {
1211 anyhow::bail!(
1212 "Live publish not supported from CLI. Use --dry-run for local validation,\n\
1213 or trigger the publish-crates GitHub Action for real releases."
1214 );
1215 }
1216
1217 ui::info("\nRunning cargo publish --dry-run for all crates...\n");
1219
1220 let mut failed = Vec::new();
1221 for (idx, crate_name) in PUBLISH_CRATES.iter().enumerate() {
1222 ui::step(
1223 (idx + 1) as u32,
1224 PUBLISH_CRATES.len() as u32,
1225 &format!("Checking {}", crate_name),
1226 );
1227
1228 let output = std::process::Command::new("cargo")
1229 .args([
1230 "publish",
1231 "-p",
1232 crate_name,
1233 "--dry-run",
1234 "--allow-dirty",
1235 "--no-verify",
1236 ])
1237 .current_dir(&workspace_root)
1238 .output()
1239 .with_context(|| format!("Failed to run cargo publish for {}", crate_name))?;
1240
1241 if output.status.success() {
1242 ui::success(&format!(" {} passed", crate_name));
1243 } else {
1244 let stderr = String::from_utf8_lossy(&output.stderr);
1245 ui::warn(&format!(" {} failed: {}", crate_name, stderr.trim()));
1246 failed.push(*crate_name);
1247 }
1248 }
1249
1250 println!();
1251 if failed.is_empty() {
1252 ui::success("All crates passed dry-run! Ready to publish.");
1253 } else {
1254 ui::warn(&format!(
1255 "{} crate(s) failed dry-run (expected if deps not yet on crates.io):",
1256 failed.len()
1257 ));
1258 for name in &failed {
1259 ui::warn(&format!(" - {}", name));
1260 }
1261 }
1262
1263 Ok(())
1264}
1265
1266fn find_workspace_root() -> Result<std::path::PathBuf> {
1268 let mut dir = std::env::current_dir()?;
1269 loop {
1270 let candidate = dir.join("Cargo.toml");
1271 if candidate.is_file() {
1272 let content = std::fs::read_to_string(&candidate)?;
1273 if content.contains("[workspace]") {
1274 return Ok(dir);
1275 }
1276 }
1277 if !dir.pop() {
1278 anyhow::bail!("Could not find workspace root (no Cargo.toml with [workspace])");
1279 }
1280 }
1281}
1282
1283fn extract_workspace_version(cargo_toml: &str) -> Result<String> {
1285 let mut in_workspace_package = false;
1286 for line in cargo_toml.lines() {
1287 let trimmed = line.trim();
1288 if trimmed == "[workspace.package]" {
1289 in_workspace_package = true;
1290 continue;
1291 }
1292 if trimmed.starts_with('[') {
1293 in_workspace_package = false;
1294 continue;
1295 }
1296 if in_workspace_package
1297 && trimmed.starts_with("version")
1298 && let Some(version) = trimmed.split('"').nth(1)
1299 {
1300 return Ok(version.to_string());
1301 }
1302 }
1303 anyhow::bail!("Could not find version in [workspace.package]")
1304}
1305
1306fn check_no_hardcoded_versions(crates_dir: &std::path::Path) -> Result<()> {
1308 for entry in std::fs::read_dir(crates_dir)? {
1309 let entry = entry?;
1310 let cargo_toml = entry.path().join("Cargo.toml");
1311 if !cargo_toml.is_file() {
1312 continue;
1313 }
1314 let content = std::fs::read_to_string(&cargo_toml)?;
1315 for line in content.lines() {
1316 let trimmed = line.trim();
1317 if trimmed.starts_with("version = \"") {
1319 let crate_name = entry.file_name().to_string_lossy().to_string();
1320 anyhow::bail!(
1321 "Hardcoded version in {}: {}\nUse 'version.workspace = true' instead.",
1322 crate_name,
1323 trimmed
1324 );
1325 }
1326 }
1327 }
1328 Ok(())
1329}
1330
1331fn check_inter_crate_versions(workspace_root: &std::path::Path, expected: &str) -> Result<()> {
1333 let mut files_to_check = vec![workspace_root.join("Cargo.toml")];
1334 let crates_dir = workspace_root.join("crates");
1335 if crates_dir.is_dir() {
1336 for entry in std::fs::read_dir(&crates_dir)? {
1337 let entry = entry?;
1338 let cargo_toml = entry.path().join("Cargo.toml");
1339 if cargo_toml.is_file() {
1340 files_to_check.push(cargo_toml);
1341 }
1342 }
1343 }
1344
1345 for path in &files_to_check {
1346 let content = std::fs::read_to_string(path)?;
1347 for line in content.lines() {
1348 let trimmed = line.trim();
1349 if trimmed.starts_with("mvm-")
1351 && trimmed.contains("version = \"")
1352 && let Some(version) = trimmed
1353 .split("version = \"")
1354 .nth(1)
1355 .and_then(|s| s.split('"').next())
1356 && version != expected
1357 {
1358 let file_name = path.file_name().unwrap_or_default().to_string_lossy();
1359 anyhow::bail!(
1360 "Version mismatch in {}: found '{}', expected '{}'\n Line: {}",
1361 file_name,
1362 version,
1363 expected,
1364 trimmed
1365 );
1366 }
1367 }
1368 }
1369 Ok(())
1370}
1371
1372fn check_git_tag(expected_tag: &str) -> Result<()> {
1374 let output = std::process::Command::new("git")
1375 .args(["tag", "--points-at", "HEAD"])
1376 .output()
1377 .context("Failed to run git tag")?;
1378
1379 let tags = String::from_utf8_lossy(&output.stdout);
1380 let tag_list: Vec<&str> = tags.lines().collect();
1381
1382 if tag_list.contains(&expected_tag) {
1383 Ok(())
1384 } else {
1385 let current = if tag_list.is_empty() {
1386 "<none>".to_string()
1387 } else {
1388 tag_list.join(", ")
1389 };
1390 anyhow::bail!(
1391 "HEAD is not tagged with {}. Current tags: {}",
1392 expected_tag,
1393 current
1394 )
1395 }
1396}
1397
1398fn cmd_build(path: &str, output: Option<&str>) -> Result<()> {
1399 let elf_path = image::build(path, output)?;
1400 ui::success(&format!("\nImage ready: {}", elf_path));
1401 ui::info(&format!("Run with: mvm start {}", elf_path));
1402 Ok(())
1403}
1404
1405fn cmd_build_flake(flake_ref: &str, profile: Option<&str>, watch: bool) -> Result<()> {
1406 if bootstrap::is_lima_required() {
1407 lima::require_running()?;
1408 }
1409
1410 let resolved = resolve_flake_ref(flake_ref)?;
1411
1412 let env = mvm_runtime::build_env::RuntimeBuildEnv;
1413 let watch_enabled = watch && !resolved.contains(':');
1414
1415 if watch && resolved.contains(':') {
1416 ui::warn("Watch mode requires a local flake; running a single build instead.");
1417 }
1418
1419 let mut last_mtime = std::fs::metadata(format!("{}/flake.lock", resolved))
1420 .and_then(|m| m.modified())
1421 .ok();
1422
1423 loop {
1424 let profile_display = profile.unwrap_or("default");
1425 ui::step(
1426 1,
1427 2,
1428 &format!("Building flake {} (profile={})", resolved, profile_display),
1429 );
1430
1431 let result = mvm_build::dev_build::dev_build(&env, &resolved, profile)?;
1432 mvm_build::dev_build::ensure_guest_agent_if_needed(&env, &result)?;
1433
1434 ui::step(2, 2, "Build complete");
1435
1436 if result.cached {
1437 ui::success(&format!("\nCache hit — revision {}", result.revision_hash));
1438 } else {
1439 ui::success(&format!(
1440 "\nBuild complete — revision {}",
1441 result.revision_hash
1442 ));
1443 }
1444
1445 ui::info(&format!(" Kernel: {}", result.vmlinux_path));
1446 ui::info(&format!(" Rootfs: {}", result.rootfs_path));
1447 ui::info(&format!("\nRun with: mvm run --flake {}", flake_ref));
1448
1449 if !watch_enabled {
1450 return Ok(());
1451 }
1452
1453 ui::info("Watching flake.lock for changes (Ctrl+C to exit)...");
1455 loop {
1456 std::thread::sleep(std::time::Duration::from_secs(2));
1457 let new_mtime = std::fs::metadata(format!("{}/flake.lock", resolved))
1458 .and_then(|m| m.modified())
1459 .ok();
1460 if new_mtime.is_some() && new_mtime != last_mtime {
1461 last_mtime = new_mtime;
1462 break;
1463 }
1464 }
1465 }
1466}
1467
1468fn resolve_flake_ref(flake_ref: &str) -> Result<String> {
1471 if flake_ref.contains(':') {
1472 return Ok(flake_ref.to_string());
1474 }
1475
1476 let path = std::path::Path::new(flake_ref);
1478 let canonical = path
1479 .canonicalize()
1480 .with_context(|| format!("Flake path '{}' does not exist", flake_ref))?;
1481
1482 Ok(canonical.to_string_lossy().to_string())
1483}
1484
1485fn cmd_run(
1486 flake_ref: &str,
1487 name: Option<&str>,
1488 profile: Option<&str>,
1489 cpus: Option<u32>,
1490 memory: Option<u32>,
1491 config_path: Option<&str>,
1492 volumes: &[String],
1493) -> Result<()> {
1494 if bootstrap::is_lima_required() {
1495 lima::require_running()?;
1496 }
1497
1498 let resolved = resolve_flake_ref(flake_ref)?;
1499 let profile_display = profile.unwrap_or("default");
1500
1501 let vm_name = match name {
1503 Some(n) => n.to_string(),
1504 None => {
1505 let mut generator = names::Generator::default();
1506 generator.next().unwrap_or_else(|| "vm-0".to_string())
1507 }
1508 };
1509
1510 ui::step(
1511 1,
1512 2,
1513 &format!(
1514 "Building flake {} (profile={}, name={})",
1515 resolved, profile_display, vm_name
1516 ),
1517 );
1518
1519 let env = mvm_runtime::build_env::RuntimeBuildEnv;
1520 let result = mvm_build::dev_build::dev_build(&env, &resolved, profile)?;
1521 mvm_build::dev_build::ensure_guest_agent_if_needed(&env, &result)?;
1522
1523 if result.cached {
1524 ui::info(&format!("Cache hit — revision {}", result.revision_hash));
1525 } else {
1526 ui::info(&format!(
1527 "Build complete — revision {}",
1528 result.revision_hash
1529 ));
1530 }
1531
1532 ui::step(2, 2, &format!("Booting Firecracker VM '{}'", vm_name));
1533
1534 let rt_config = match config_path {
1535 Some(p) => image::parse_runtime_config(p)?,
1536 None => image::RuntimeConfig::default(),
1537 };
1538
1539 let volume_cfg: Vec<image::RuntimeVolume> = if !volumes.is_empty() {
1540 volumes
1541 .iter()
1542 .map(|v| parse_runtime_volume(v))
1543 .collect::<Result<_>>()?
1544 } else {
1545 rt_config.volumes.clone()
1546 };
1547
1548 const DEFAULT_CPUS: u32 = 2;
1549 const DEFAULT_MEM: u32 = 1024;
1550
1551 let final_cpus = cpus.or(rt_config.cpus).unwrap_or(DEFAULT_CPUS);
1552 let final_memory = memory.or(rt_config.memory).unwrap_or(DEFAULT_MEM);
1553
1554 let slot = microvm::allocate_slot(&vm_name)?;
1556
1557 let run_config = microvm::FlakeRunConfig {
1558 name: vm_name,
1559 slot,
1560 vmlinux_path: result.vmlinux_path,
1561 initrd_path: result.initrd_path,
1562 rootfs_path: result.rootfs_path,
1563 revision_hash: result.revision_hash,
1564 flake_ref: flake_ref.to_string(),
1565 profile: profile.map(|s| s.to_string()),
1566 cpus: final_cpus,
1567 memory: final_memory,
1568 volumes: volume_cfg,
1569 };
1570
1571 microvm::run_from_build(&run_config)
1572}
1573
1574fn parse_runtime_volume(spec: &str) -> Result<image::RuntimeVolume> {
1575 let parts: Vec<&str> = spec.splitn(3, ':').collect();
1576 if parts.len() != 3 {
1577 anyhow::bail!(
1578 "Invalid volume '{}'. Expected format host_path:guest_mount:size",
1579 spec
1580 );
1581 }
1582 Ok(image::RuntimeVolume {
1583 host: parts[0].to_string(),
1584 guest: parts[1].to_string(),
1585 size: parts[2].to_string(),
1586 })
1587}
1588
1589fn cmd_up(
1594 name: Option<&str>,
1595 config_path: Option<&str>,
1596 flake: Option<&str>,
1597 profile: Option<&str>,
1598 cpus: Option<u32>,
1599 memory: Option<u32>,
1600) -> Result<()> {
1601 if bootstrap::is_lima_required() {
1602 lima::require_running()?;
1603 }
1604
1605 let fleet_found = load_fleet_config(config_path)?;
1607
1608 match (fleet_found, flake) {
1609 (Some((fleet_config, base_dir)), _) => {
1611 let flake_ref = match flake {
1613 Some(f) => resolve_flake_ref(f)?,
1614 None => {
1615 let flake_path = base_dir.join(&fleet_config.flake);
1616 resolve_flake_ref(&flake_path.to_string_lossy())?
1617 }
1618 };
1619
1620 let vm_names: Vec<String> = match name {
1622 Some(n) => {
1623 if !fleet_config.vms.contains_key(n) {
1624 let available: Vec<&str> =
1625 fleet_config.vms.keys().map(|s| s.as_str()).collect();
1626 anyhow::bail!(
1627 "VM '{}' not defined in config. Available: {:?}",
1628 n,
1629 available
1630 );
1631 }
1632 vec![n.to_string()]
1633 }
1634 None => fleet_config.vms.keys().cloned().collect(),
1635 };
1636
1637 if vm_names.is_empty() {
1638 anyhow::bail!("No VMs defined in config. Add [vms.<name>] sections.");
1639 }
1640
1641 let profiles: std::collections::BTreeSet<Option<String>> = vm_names
1644 .iter()
1645 .filter_map(|n| fleet_config.vms.get(n))
1646 .map(|vm| {
1647 profile.map(|p| p.to_string()).or_else(|| {
1648 vm.profile
1649 .clone()
1650 .or_else(|| fleet_config.defaults.profile.clone())
1651 })
1652 })
1653 .collect();
1654
1655 let builds = build_profiles(&profiles, &flake_ref)?;
1656
1657 let total = vm_names.len();
1659 for (idx, vm_name) in vm_names.iter().enumerate() {
1660 let mut resolved = fleet::resolve_vm(&fleet_config, vm_name)?;
1661
1662 if let Some(p) = profile {
1664 resolved.profile = Some(p.to_string());
1665 }
1666 if let Some(c) = cpus {
1667 resolved.cpus = c;
1668 }
1669 if let Some(m) = memory {
1670 resolved.memory = m;
1671 }
1672
1673 let build_result = builds.get(&resolved.profile).ok_or_else(|| {
1674 anyhow::anyhow!("No build for profile {:?}", resolved.profile)
1675 })?;
1676
1677 let volumes: Vec<image::RuntimeVolume> = resolved
1678 .volumes
1679 .iter()
1680 .map(|v| parse_runtime_volume(v))
1681 .collect::<Result<_>>()?;
1682
1683 ui::step(
1684 (idx + 1) as u32,
1685 total as u32,
1686 &format!("Launching VM '{}'", vm_name),
1687 );
1688
1689 let slot = microvm::allocate_slot(vm_name)?;
1690
1691 let run_config = microvm::FlakeRunConfig {
1692 name: vm_name.clone(),
1693 slot,
1694 vmlinux_path: build_result.vmlinux_path.clone(),
1695 initrd_path: build_result.initrd_path.clone(),
1696 rootfs_path: build_result.rootfs_path.clone(),
1697 revision_hash: build_result.revision_hash.clone(),
1698 flake_ref: flake_ref.clone(),
1699 profile: resolved.profile,
1700 cpus: resolved.cpus,
1701 memory: resolved.memory,
1702 volumes,
1703 };
1704
1705 microvm::run_from_build(&run_config)?;
1706 }
1707
1708 ui::success(&format!("{} VMs running", vm_names.len()));
1709 Ok(())
1710 }
1711
1712 (None, Some(flake_ref)) => {
1714 let resolved_flake = resolve_flake_ref(flake_ref)?;
1715
1716 let vm_name = match name {
1717 Some(n) => n.to_string(),
1718 None => {
1719 let mut generator = names::Generator::default();
1720 generator.next().unwrap_or_else(|| "vm-0".to_string())
1721 }
1722 };
1723
1724 const DEFAULT_CPUS: u32 = 2;
1725 const DEFAULT_MEM: u32 = 1024;
1726
1727 let env = mvm_runtime::build_env::RuntimeBuildEnv;
1728 let result = mvm_build::dev_build::dev_build(&env, &resolved_flake, profile)?;
1729 mvm_build::dev_build::ensure_guest_agent_if_needed(&env, &result)?;
1730
1731 let slot = microvm::allocate_slot(&vm_name)?;
1732
1733 let run_config = microvm::FlakeRunConfig {
1734 name: vm_name,
1735 slot,
1736 vmlinux_path: result.vmlinux_path,
1737 initrd_path: result.initrd_path,
1738 rootfs_path: result.rootfs_path,
1739 revision_hash: result.revision_hash,
1740 flake_ref: flake_ref.to_string(),
1741 profile: profile.map(|s| s.to_string()),
1742 cpus: cpus.unwrap_or(DEFAULT_CPUS),
1743 memory: memory.unwrap_or(DEFAULT_MEM),
1744 volumes: vec![],
1745 };
1746
1747 microvm::run_from_build(&run_config)
1748 }
1749
1750 (None, None) => {
1752 anyhow::bail!(
1753 "No mvm.toml found and no --flake specified.\n\
1754 Use 'mvm up --flake <path>' or create an mvm.toml."
1755 );
1756 }
1757 }
1758}
1759
1760fn load_fleet_config(
1762 config_path: Option<&str>,
1763) -> Result<Option<(fleet::FleetConfig, std::path::PathBuf)>> {
1764 match config_path {
1765 Some(path) => {
1766 let content = std::fs::read_to_string(path)
1767 .with_context(|| format!("Failed to read {}", path))?;
1768 let config: fleet::FleetConfig =
1769 toml::from_str(&content).with_context(|| format!("Failed to parse {}", path))?;
1770 let dir = std::path::Path::new(path)
1771 .parent()
1772 .unwrap_or(std::path::Path::new("."))
1773 .to_path_buf();
1774 Ok(Some((config, dir)))
1775 }
1776 None => fleet::find_fleet_config(),
1777 }
1778}
1779
1780fn build_profiles(
1782 profiles: &std::collections::BTreeSet<Option<String>>,
1783 resolved_flake: &str,
1784) -> Result<std::collections::HashMap<Option<String>, mvm_build::dev_build::DevBuildResult>> {
1785 let mut builds = std::collections::HashMap::new();
1786 let env = mvm_runtime::build_env::RuntimeBuildEnv;
1787
1788 for (idx, profile) in profiles.iter().enumerate() {
1789 let label = profile.as_deref().unwrap_or("default");
1790 ui::step(
1791 (idx + 1) as u32,
1792 profiles.len() as u32,
1793 &format!("Building profile '{}'", label),
1794 );
1795
1796 let result = mvm_build::dev_build::dev_build(&env, resolved_flake, profile.as_deref())?;
1797 mvm_build::dev_build::ensure_guest_agent_if_needed(&env, &result)?;
1798
1799 if result.cached {
1800 ui::info(&format!("Cache hit — revision {}", result.revision_hash));
1801 } else {
1802 ui::info(&format!(
1803 "Build complete — revision {}",
1804 result.revision_hash
1805 ));
1806 }
1807
1808 builds.insert(profile.clone(), result);
1809 }
1810 Ok(builds)
1811}
1812
1813fn cmd_down(name: Option<&str>, config_path: Option<&str>) -> Result<()> {
1814 match name {
1815 Some(n) => microvm::stop_vm(n),
1816 None => {
1817 let found = load_fleet_config(config_path)?;
1818 if let Some((fleet_config, _base_dir)) = found {
1819 let mut stopped = 0;
1820 for vm_name in fleet_config.vms.keys() {
1821 if microvm::stop_vm(vm_name).is_ok() {
1822 stopped += 1;
1823 }
1824 }
1825
1826 let remaining = microvm::list_vms().unwrap_or_default();
1828 if remaining.is_empty() {
1829 let _ = mvm_runtime::vm::network::bridge_teardown();
1830 }
1831
1832 ui::success(&format!("Stopped {} VMs", stopped));
1833 Ok(())
1834 } else {
1835 microvm::stop_all_vms()
1836 }
1837 }
1838 }
1839}
1840
1841fn cmd_completions(shell: clap_complete::Shell) -> Result<()> {
1842 let mut cmd = Cli::command();
1843 clap_complete::generate(shell, &mut cmd, "mvm", &mut std::io::stdout());
1844 Ok(())
1845}
1846
1847fn cmd_destroy(yes: bool) -> Result<()> {
1848 let status = lima::get_status()?;
1849
1850 if matches!(status, lima::LimaStatus::NotFound) {
1851 ui::info("Nothing to destroy. Lima VM does not exist.");
1852 return Ok(());
1853 }
1854
1855 if matches!(status, lima::LimaStatus::Running) && firecracker::is_running()? {
1856 microvm::stop()?;
1857 }
1858
1859 if !yes && !ui::confirm("This will delete the Lima VM and all microVM data. Continue?") {
1860 ui::info("Cancelled.");
1861 return Ok(());
1862 }
1863
1864 ui::info("Destroying Lima VM...");
1865 lima::destroy()?;
1866 ui::success("Destroyed.");
1867 Ok(())
1868}
1869
1870fn cmd_template(action: TemplateCmd) -> Result<()> {
1875 match action {
1876 TemplateCmd::Create {
1877 name,
1878 flake,
1879 profile,
1880 role,
1881 cpus,
1882 mem,
1883 data_disk,
1884 } => template_cmd::create_single(&name, &flake, &profile, &role, cpus, mem, data_disk),
1885 TemplateCmd::CreateMulti {
1886 base,
1887 flake,
1888 profile,
1889 roles,
1890 cpus,
1891 mem,
1892 data_disk,
1893 } => {
1894 let role_list: Vec<String> = roles.split(',').map(|s| s.trim().to_string()).collect();
1895 template_cmd::create_multi(&base, &flake, &profile, &role_list, cpus, mem, data_disk)
1896 }
1897 TemplateCmd::Build {
1898 name,
1899 force,
1900 config,
1901 } => template_cmd::build(&name, force, config.as_deref()),
1902 TemplateCmd::Push { name, revision } => template_cmd::push(&name, revision.as_deref()),
1903 TemplateCmd::Pull { name, revision } => template_cmd::pull(&name, revision.as_deref()),
1904 TemplateCmd::Verify { name, revision } => template_cmd::verify(&name, revision.as_deref()),
1905 TemplateCmd::List { json } => template_cmd::list(json),
1906 TemplateCmd::Info { name, json } => template_cmd::info(&name, json),
1907 TemplateCmd::Delete { name, force } => template_cmd::delete(&name, force),
1908 TemplateCmd::Init {
1909 name,
1910 local,
1911 vm,
1912 dir,
1913 } => {
1914 let use_local = local && !vm;
1915 template_cmd::init(&name, use_local, &dir)
1916 }
1917 }
1918}
1919
1920fn cmd_vm(action: VmCmd) -> Result<()> {
1925 match action {
1926 VmCmd::Ping { name: Some(name) } => cmd_vm_ping(&name),
1927 VmCmd::Ping { name: None } => cmd_vm_ping_all(),
1928 VmCmd::Status {
1929 name: Some(name),
1930 json,
1931 } => cmd_vm_status(&name, json),
1932 VmCmd::Status { name: None, json } => cmd_vm_status_all(json),
1933 }
1934}
1935
1936fn resolve_running_vm(name: &str) -> Result<String> {
1939 if bootstrap::is_lima_required() {
1940 lima::require_running()?;
1941 }
1942
1943 let abs_vms = shell::run_in_vm_stdout(&format!("echo {}", config::VMS_DIR))?;
1944 let abs_dir = format!("{}/{}", abs_vms, name);
1945 let pid_file = format!("{}/fc.pid", abs_dir);
1946
1947 if !firecracker::is_vm_running(&pid_file)? {
1948 anyhow::bail!(
1949 "VM '{}' is not running. Use 'mvm status' to list running VMs.",
1950 name
1951 );
1952 }
1953
1954 Ok(abs_dir)
1955}
1956
1957fn cmd_vm_ping(name: &str) -> Result<()> {
1958 let abs_dir = resolve_running_vm(name)?;
1959
1960 if bootstrap::is_lima_required() {
1962 let mvm_installed =
1963 shell::run_in_vm_stdout("test -f /usr/local/bin/mvm && echo yes || echo no")?;
1964 if mvm_installed.trim() != "yes" {
1965 anyhow::bail!("mvm is not installed inside the Lima VM. Run 'mvm sync' first.");
1966 }
1967 shell::run_in_vm_visible(&format!("/usr/local/bin/mvm vm ping {}", name))?;
1968 return Ok(());
1969 }
1970
1971 let vsock_path = format!("{}/v.sock", abs_dir);
1973 match mvm_guest::vsock::ping_at(&vsock_path) {
1974 Ok(true) => {
1975 ui::success(&format!("VM '{}' is alive (pong received)", name));
1976 Ok(())
1977 }
1978 Ok(false) => {
1979 ui::error(&format!("VM '{}' did not respond to ping", name));
1980 anyhow::bail!("Ping failed")
1981 }
1982 Err(e) => {
1983 ui::error(&format!("Failed to connect to VM '{}': {}", name, e));
1984 Err(e)
1985 }
1986 }
1987}
1988
1989fn cmd_vm_status(name: &str, json: bool) -> Result<()> {
1990 let abs_dir = resolve_running_vm(name)?;
1991
1992 if bootstrap::is_lima_required() {
1994 let mvm_installed =
1995 shell::run_in_vm_stdout("test -f /usr/local/bin/mvm && echo yes || echo no")?;
1996 if mvm_installed.trim() != "yes" {
1997 anyhow::bail!("mvm is not installed inside the Lima VM. Run 'mvm sync' first.");
1998 }
1999 let json_flag = if json { " --json" } else { "" };
2000 shell::run_in_vm_visible(&format!(
2001 "/usr/local/bin/mvm vm status {}{}",
2002 name, json_flag
2003 ))?;
2004 return Ok(());
2005 }
2006
2007 let vsock_path = format!("{}/v.sock", abs_dir);
2009 let resp = mvm_guest::vsock::query_worker_status_at(&vsock_path)
2010 .with_context(|| format!("Failed to query status for VM '{}'", name))?;
2011
2012 match resp {
2013 mvm_guest::vsock::GuestResponse::WorkerStatus {
2014 status,
2015 last_busy_at,
2016 } => {
2017 let integrations =
2019 mvm_guest::vsock::query_integration_status_at(&vsock_path).unwrap_or_default();
2020
2021 if json {
2022 let integration_json: Vec<serde_json::Value> = integrations
2023 .iter()
2024 .map(|ig| {
2025 serde_json::json!({
2026 "name": ig.name,
2027 "status": ig.status,
2028 "healthy": ig.health.as_ref().map(|h| h.healthy),
2029 "detail": ig.health.as_ref().map(|h| &h.detail),
2030 "checked_at": ig.health.as_ref().map(|h| &h.checked_at),
2031 })
2032 })
2033 .collect();
2034 let obj = serde_json::json!({
2035 "name": name,
2036 "worker_status": status,
2037 "last_busy_at": last_busy_at,
2038 "integrations": integration_json,
2039 });
2040 println!("{}", serde_json::to_string_pretty(&obj)?);
2041 } else {
2042 ui::status_line("VM:", name);
2043 ui::status_line("Worker status:", &status);
2044 let busy = last_busy_at.as_deref().unwrap_or("never");
2045 ui::status_line("Last busy:", busy);
2046 if !integrations.is_empty() {
2047 println!();
2048 ui::status_line(
2049 "Integrations:",
2050 &format!("{} registered", integrations.len()),
2051 );
2052 for ig in &integrations {
2053 let health_str = match &ig.health {
2054 Some(h) if h.healthy => "healthy".to_string(),
2055 Some(h) => format!("unhealthy: {}", h.detail),
2056 None => "pending".to_string(),
2057 };
2058 println!(" {:<24} {}", ig.name, health_str);
2059 }
2060 }
2061 }
2062 Ok(())
2063 }
2064 mvm_guest::vsock::GuestResponse::Error { message } => {
2065 anyhow::bail!("Guest agent error: {}", message)
2066 }
2067 _ => anyhow::bail!("Unexpected response from guest agent"),
2068 }
2069}
2070
2071fn list_running_vm_names() -> Result<Vec<String>> {
2073 if bootstrap::is_lima_required() {
2074 lima::require_running()?;
2075 }
2076 let vms = microvm::list_vms().unwrap_or_default();
2077 Ok(vms.into_iter().filter_map(|vm| vm.name).collect())
2078}
2079
2080fn cmd_vm_ping_all() -> Result<()> {
2081 if bootstrap::is_lima_required() {
2083 lima::require_running()?;
2084 shell::run_in_vm_visible("/usr/local/bin/mvm vm ping")?;
2085 return Ok(());
2086 }
2087
2088 let names = list_running_vm_names()?;
2089 if names.is_empty() {
2090 ui::info("No running VMs found.");
2091 return Ok(());
2092 }
2093
2094 let mut any_failed = false;
2095 for name in &names {
2096 let abs_dir = format!(
2097 "{}/{}",
2098 shell::run_in_vm_stdout(&format!("echo {}", config::VMS_DIR))?,
2099 name
2100 );
2101 let vsock_path = format!("{}/v.sock", abs_dir);
2102 match mvm_guest::vsock::ping_at(&vsock_path) {
2103 Ok(true) => ui::success(&format!("VM '{}' is alive (pong received)", name)),
2104 Ok(false) => {
2105 ui::error(&format!("VM '{}' did not respond to ping", name));
2106 any_failed = true;
2107 }
2108 Err(e) => {
2109 ui::error(&format!("VM '{}': {}", name, e));
2110 any_failed = true;
2111 }
2112 }
2113 }
2114 if any_failed {
2115 anyhow::bail!("Some VMs did not respond to ping");
2116 }
2117 Ok(())
2118}
2119
2120fn cmd_vm_status_all(json: bool) -> Result<()> {
2121 if bootstrap::is_lima_required() {
2123 lima::require_running()?;
2124 let json_flag = if json { " --json" } else { "" };
2125 shell::run_in_vm_visible(&format!("/usr/local/bin/mvm vm status{}", json_flag))?;
2126 return Ok(());
2127 }
2128
2129 let names = list_running_vm_names()?;
2130 if names.is_empty() {
2131 if json {
2132 println!("[]");
2133 } else {
2134 ui::info("No running VMs found.");
2135 }
2136 return Ok(());
2137 }
2138
2139 if json {
2140 let mut results = Vec::new();
2141 for name in &names {
2142 match cmd_vm_status_json(name) {
2143 Ok(obj) => results.push(obj),
2144 Err(e) => results.push(serde_json::json!({
2145 "name": name,
2146 "error": e.to_string(),
2147 })),
2148 }
2149 }
2150 println!("{}", serde_json::to_string_pretty(&results)?);
2151 } else {
2152 let integ_header = "INTEGRATIONS";
2153 println!(
2154 " {:<16} {:<10} {:<24} {}",
2155 "NAME", "STATUS", "LAST BUSY", integ_header
2156 );
2157 println!(" {}", "-".repeat(66));
2158 for name in &names {
2159 match cmd_vm_status_row(name) {
2160 Ok((status, last_busy, integrations)) => {
2161 let busy = last_busy.as_deref().unwrap_or("never");
2162 println!(
2163 " {:<16} {:<10} {:<24} {}",
2164 name, status, busy, integrations
2165 );
2166 }
2167 Err(e) => {
2168 println!(" {:<16} {:<10} {}", name, "error", e);
2169 }
2170 }
2171 }
2172 }
2173 Ok(())
2174}
2175
2176fn cmd_vm_status_json(name: &str) -> Result<serde_json::Value> {
2178 let abs_dir = resolve_running_vm(name)?;
2179 let vsock_path = format!("{}/v.sock", abs_dir);
2180 let resp = mvm_guest::vsock::query_worker_status_at(&vsock_path)?;
2181 match resp {
2182 mvm_guest::vsock::GuestResponse::WorkerStatus {
2183 status,
2184 last_busy_at,
2185 } => {
2186 let integrations =
2187 mvm_guest::vsock::query_integration_status_at(&vsock_path).unwrap_or_default();
2188 let integration_json: Vec<serde_json::Value> = integrations
2189 .iter()
2190 .map(|ig| {
2191 serde_json::json!({
2192 "name": ig.name,
2193 "status": ig.status,
2194 "healthy": ig.health.as_ref().map(|h| h.healthy),
2195 "detail": ig.health.as_ref().map(|h| &h.detail),
2196 "checked_at": ig.health.as_ref().map(|h| &h.checked_at),
2197 })
2198 })
2199 .collect();
2200 Ok(serde_json::json!({
2201 "name": name,
2202 "worker_status": status,
2203 "last_busy_at": last_busy_at,
2204 "integrations": integration_json,
2205 }))
2206 }
2207 mvm_guest::vsock::GuestResponse::Error { message } => {
2208 anyhow::bail!("Guest agent error: {}", message)
2209 }
2210 _ => anyhow::bail!("Unexpected response"),
2211 }
2212}
2213
2214fn cmd_vm_status_row(name: &str) -> Result<(String, Option<String>, String)> {
2216 let abs_dir = resolve_running_vm(name)?;
2217 let vsock_path = format!("{}/v.sock", abs_dir);
2218 let resp = mvm_guest::vsock::query_worker_status_at(&vsock_path)?;
2219 match resp {
2220 mvm_guest::vsock::GuestResponse::WorkerStatus {
2221 status,
2222 last_busy_at,
2223 } => {
2224 let integrations =
2225 mvm_guest::vsock::query_integration_status_at(&vsock_path).unwrap_or_default();
2226 let summary = if integrations.is_empty() {
2227 "-".to_string()
2228 } else {
2229 let healthy = integrations
2230 .iter()
2231 .filter(|ig| ig.health.as_ref().is_some_and(|h| h.healthy))
2232 .count();
2233 format!("{}/{} healthy", healthy, integrations.len())
2234 };
2235 Ok((status, last_busy_at, summary))
2236 }
2237 mvm_guest::vsock::GuestResponse::Error { message } => {
2238 anyhow::bail!("Guest agent error: {}", message)
2239 }
2240 _ => anyhow::bail!("Unexpected response"),
2241 }
2242}
2243
2244#[cfg(test)]
2249mod tests {
2250 use super::*;
2251 use clap::Parser;
2252
2253 #[test]
2254 fn test_sync_command_parses() {
2255 let cli = Cli::try_parse_from(["mvm", "sync"]).unwrap();
2256 assert!(matches!(
2257 cli.command,
2258 Commands::Sync {
2259 debug: false,
2260 skip_deps: false,
2261 force: false,
2262 }
2263 ));
2264 }
2265
2266 #[test]
2267 fn test_sync_debug_flag() {
2268 let cli = Cli::try_parse_from(["mvm", "sync", "--debug"]).unwrap();
2269 assert!(matches!(
2270 cli.command,
2271 Commands::Sync {
2272 debug: true,
2273 skip_deps: false,
2274 force: false,
2275 }
2276 ));
2277 }
2278
2279 #[test]
2280 fn test_sync_skip_deps_flag() {
2281 let cli = Cli::try_parse_from(["mvm", "sync", "--skip-deps"]).unwrap();
2282 assert!(matches!(
2283 cli.command,
2284 Commands::Sync {
2285 debug: false,
2286 skip_deps: true,
2287 force: false
2288 }
2289 ));
2290 }
2291
2292 #[test]
2293 fn test_sync_both_flags() {
2294 let cli = Cli::try_parse_from(["mvm", "sync", "--debug", "--skip-deps"]).unwrap();
2295 assert!(matches!(
2296 cli.command,
2297 Commands::Sync {
2298 debug: true,
2299 skip_deps: true,
2300 force: false,
2301 }
2302 ));
2303 }
2304
2305 #[test]
2306 fn test_sync_build_script_release() {
2307 let script = sync_build_script("/home/user/mvm", false, "aarch64");
2308 assert!(script.contains("--release"));
2309 assert!(script.contains("CARGO_TARGET_DIR='target/linux-aarch64'"));
2310 assert!(script.contains("--bin mvm"));
2311 assert!(script.contains("cd '/home/user/mvm'"));
2312 }
2313
2314 #[test]
2315 fn test_sync_build_script_debug() {
2316 let script = sync_build_script("/home/user/mvm", true, "aarch64");
2317 assert!(!script.contains("--release"));
2318 assert!(script.contains("CARGO_TARGET_DIR='target/linux-aarch64'"));
2319 assert!(script.contains("--bin mvm"));
2320 }
2321
2322 #[test]
2323 fn test_sync_build_script_x86_64() {
2324 let script = sync_build_script("/home/user/mvm", false, "x86_64");
2325 assert!(script.contains("CARGO_TARGET_DIR='target/linux-x86_64'"));
2326 }
2327
2328 #[test]
2329 fn test_sync_install_script_release() {
2330 let script = sync_install_script("/home/user/mvm", false, "aarch64");
2331 assert!(script.contains("/target/linux-aarch64/release/mvm"));
2332 assert!(script.contains("/usr/local/bin/"));
2333 assert!(script.contains("install -m 0755"));
2334 }
2335
2336 #[test]
2337 fn test_sync_install_script_debug() {
2338 let script = sync_install_script("/home/user/mvm", true, "aarch64");
2339 assert!(script.contains("/target/linux-aarch64/debug/mvm"));
2340 }
2341
2342 #[test]
2343 fn test_sync_deps_script_checks_before_installing() {
2344 let script = sync_deps_script();
2345 assert!(script.contains("dpkg -s"));
2346 assert!(script.contains("apt-get install"));
2347 }
2348
2349 #[test]
2350 fn test_sync_rustup_script_idempotent() {
2351 let script = sync_rustup_script();
2352 assert!(script.contains("command -v rustup"));
2353 assert!(script.contains("rustup update stable"));
2354 assert!(script.contains("rustup.rs"));
2355 assert!(script.contains("rustc --version"));
2356 }
2357
2358 #[test]
2361 fn test_build_flake_with_profile() {
2362 let cli =
2363 Cli::try_parse_from(["mvm", "build", "--flake", ".", "--profile", "gateway"]).unwrap();
2364 match cli.command {
2365 Commands::Build { flake, profile, .. } => {
2366 assert_eq!(flake.as_deref(), Some("."));
2367 assert_eq!(profile.as_deref(), Some("gateway"));
2368 }
2369 _ => panic!("Expected Build command"),
2370 }
2371 }
2372
2373 #[test]
2374 fn test_build_flake_defaults_to_no_profile() {
2375 let cli = Cli::try_parse_from(["mvm", "build", "--flake", "."]).unwrap();
2376 match cli.command {
2377 Commands::Build { flake, profile, .. } => {
2378 assert_eq!(flake.as_deref(), Some("."));
2379 assert!(profile.is_none(), "profile should be None when omitted");
2380 }
2381 _ => panic!("Expected Build command"),
2382 }
2383 }
2384
2385 #[test]
2386 fn test_build_mvmfile_mode_still_works() {
2387 let cli = Cli::try_parse_from(["mvm", "build", "myimage"]).unwrap();
2388 match cli.command {
2389 Commands::Build { path, flake, .. } => {
2390 assert_eq!(path, "myimage");
2391 assert!(flake.is_none(), "Mvmfile mode should have no --flake");
2392 }
2393 _ => panic!("Expected Build command"),
2394 }
2395 }
2396
2397 #[test]
2398 fn test_resolve_flake_ref_remote_passthrough() {
2399 let resolved = resolve_flake_ref("github:user/repo").unwrap();
2400 assert_eq!(resolved, "github:user/repo");
2401 }
2402
2403 #[test]
2404 fn test_resolve_flake_ref_remote_with_path() {
2405 let resolved = resolve_flake_ref("github:user/repo#attr").unwrap();
2406 assert_eq!(resolved, "github:user/repo#attr");
2407 }
2408
2409 #[test]
2410 fn test_resolve_flake_ref_absolute_path() {
2411 let resolved = resolve_flake_ref("/tmp").unwrap();
2412 assert!(
2414 resolved == "/tmp" || resolved == "/private/tmp",
2415 "unexpected resolved path: {}",
2416 resolved
2417 );
2418 }
2419
2420 #[test]
2421 fn test_resolve_flake_ref_nonexistent_fails() {
2422 let result = resolve_flake_ref("/nonexistent/path/that/does/not/exist");
2423 assert!(result.is_err());
2424 }
2425
2426 #[test]
2429 fn test_run_parses_all_flags() {
2430 let cli = Cli::try_parse_from([
2431 "mvm",
2432 "run",
2433 "--flake",
2434 ".",
2435 "--profile",
2436 "full",
2437 "--cpus",
2438 "4",
2439 "--memory",
2440 "2048",
2441 ])
2442 .unwrap();
2443 match cli.command {
2444 Commands::Run {
2445 flake,
2446 profile,
2447 cpus,
2448 memory,
2449 name: _,
2450 volume: _,
2451 config: _,
2452 } => {
2453 assert_eq!(flake, ".");
2454 assert_eq!(profile.as_deref(), Some("full"));
2455 assert_eq!(cpus, Some(4));
2456 assert_eq!(memory, Some(2048));
2457 }
2458 _ => panic!("Expected Run command"),
2459 }
2460 }
2461
2462 #[test]
2463 fn test_run_defaults() {
2464 let cli = Cli::try_parse_from(["mvm", "run", "--flake", "."]).unwrap();
2465 match cli.command {
2466 Commands::Run {
2467 flake,
2468 name,
2469 profile,
2470 cpus,
2471 memory,
2472 config: _,
2473 volume,
2474 } => {
2475 assert_eq!(flake, ".");
2476 assert!(name.is_none(), "name should be None when omitted");
2477 assert!(profile.is_none(), "profile should be None when omitted");
2478 assert_eq!(cpus, Some(2));
2479 assert_eq!(memory, Some(1024));
2480 assert_eq!(volume.len(), 0);
2481 }
2482 _ => panic!("Expected Run command"),
2483 }
2484 }
2485
2486 #[test]
2487 fn test_run_requires_flake() {
2488 let result = Cli::try_parse_from(["mvm", "run"]);
2489 assert!(result.is_err(), "run should require --flake");
2490 }
2491
2492 #[test]
2495 fn test_vm_ping_parses() {
2496 let cli = Cli::try_parse_from(["mvm", "vm", "ping", "happy-panda"]).unwrap();
2497 match cli.command {
2498 Commands::Vm {
2499 action: VmCmd::Ping { name },
2500 } => {
2501 assert_eq!(name.as_deref(), Some("happy-panda"));
2502 }
2503 _ => panic!("Expected Vm Ping command"),
2504 }
2505 }
2506
2507 #[test]
2508 fn test_vm_ping_no_name_targets_all() {
2509 let cli = Cli::try_parse_from(["mvm", "vm", "ping"]).unwrap();
2510 match cli.command {
2511 Commands::Vm {
2512 action: VmCmd::Ping { name },
2513 } => {
2514 assert!(name.is_none(), "no name means ping all");
2515 }
2516 _ => panic!("Expected Vm Ping command"),
2517 }
2518 }
2519
2520 #[test]
2521 fn test_vm_status_parses() {
2522 let cli = Cli::try_parse_from(["mvm", "vm", "status", "my-vm"]).unwrap();
2523 match cli.command {
2524 Commands::Vm {
2525 action: VmCmd::Status { name, json },
2526 } => {
2527 assert_eq!(name.as_deref(), Some("my-vm"));
2528 assert!(!json);
2529 }
2530 _ => panic!("Expected Vm Status command"),
2531 }
2532 }
2533
2534 #[test]
2535 fn test_vm_status_no_name_targets_all() {
2536 let cli = Cli::try_parse_from(["mvm", "vm", "status"]).unwrap();
2537 match cli.command {
2538 Commands::Vm {
2539 action: VmCmd::Status { name, json },
2540 } => {
2541 assert!(name.is_none(), "no name means status all");
2542 assert!(!json);
2543 }
2544 _ => panic!("Expected Vm Status command"),
2545 }
2546 }
2547
2548 #[test]
2549 fn test_vm_status_json_flag() {
2550 let cli = Cli::try_parse_from(["mvm", "vm", "status", "my-vm", "--json"]).unwrap();
2551 match cli.command {
2552 Commands::Vm {
2553 action: VmCmd::Status { name, json },
2554 } => {
2555 assert_eq!(name.as_deref(), Some("my-vm"));
2556 assert!(json);
2557 }
2558 _ => panic!("Expected Vm Status command"),
2559 }
2560 }
2561
2562 #[test]
2563 fn test_vm_requires_subcommand() {
2564 let result = Cli::try_parse_from(["mvm", "vm"]);
2565 assert!(result.is_err(), "vm should require a subcommand");
2566 }
2567
2568 #[test]
2571 fn test_up_parses_no_args() {
2572 let cli = Cli::try_parse_from(["mvm", "up"]).unwrap();
2573 match cli.command {
2574 Commands::Up {
2575 name,
2576 config,
2577 flake,
2578 profile,
2579 cpus,
2580 memory,
2581 } => {
2582 assert!(name.is_none());
2583 assert!(config.is_none());
2584 assert!(flake.is_none());
2585 assert!(profile.is_none());
2586 assert!(cpus.is_none());
2587 assert!(memory.is_none());
2588 }
2589 _ => panic!("Expected Up command"),
2590 }
2591 }
2592
2593 #[test]
2594 fn test_up_parses_with_flake() {
2595 let cli = Cli::try_parse_from(["mvm", "up", "--flake", "./nix/openclaw/"]).unwrap();
2596 match cli.command {
2597 Commands::Up { flake, name, .. } => {
2598 assert_eq!(flake.as_deref(), Some("./nix/openclaw/"));
2599 assert!(name.is_none());
2600 }
2601 _ => panic!("Expected Up command"),
2602 }
2603 }
2604
2605 #[test]
2606 fn test_up_parses_with_all_flags() {
2607 let cli = Cli::try_parse_from([
2608 "mvm",
2609 "up",
2610 "gw",
2611 "-f",
2612 "fleet.toml",
2613 "--flake",
2614 ".",
2615 "--profile",
2616 "gateway",
2617 "--cpus",
2618 "4",
2619 "--memory",
2620 "2048",
2621 ])
2622 .unwrap();
2623 match cli.command {
2624 Commands::Up {
2625 name,
2626 config,
2627 flake,
2628 profile,
2629 cpus,
2630 memory,
2631 } => {
2632 assert_eq!(name.as_deref(), Some("gw"));
2633 assert_eq!(config.as_deref(), Some("fleet.toml"));
2634 assert_eq!(flake.as_deref(), Some("."));
2635 assert_eq!(profile.as_deref(), Some("gateway"));
2636 assert_eq!(cpus, Some(4));
2637 assert_eq!(memory, Some(2048));
2638 }
2639 _ => panic!("Expected Up command"),
2640 }
2641 }
2642
2643 #[test]
2644 fn test_down_parses_no_args() {
2645 let cli = Cli::try_parse_from(["mvm", "down"]).unwrap();
2646 match cli.command {
2647 Commands::Down { name, config } => {
2648 assert!(name.is_none());
2649 assert!(config.is_none());
2650 }
2651 _ => panic!("Expected Down command"),
2652 }
2653 }
2654
2655 #[test]
2656 fn test_down_parses_with_name() {
2657 let cli = Cli::try_parse_from(["mvm", "down", "gw"]).unwrap();
2658 match cli.command {
2659 Commands::Down { name, config } => {
2660 assert_eq!(name.as_deref(), Some("gw"));
2661 assert!(config.is_none());
2662 }
2663 _ => panic!("Expected Down command"),
2664 }
2665 }
2666
2667 #[test]
2668 fn test_down_parses_with_config() {
2669 let cli = Cli::try_parse_from(["mvm", "down", "-f", "my-fleet.toml"]).unwrap();
2670 match cli.command {
2671 Commands::Down { name, config } => {
2672 assert!(name.is_none());
2673 assert_eq!(config.as_deref(), Some("my-fleet.toml"));
2674 }
2675 _ => panic!("Expected Down command"),
2676 }
2677 }
2678
2679 #[test]
2682 fn test_release_dry_run_parses() {
2683 let cli = Cli::try_parse_from(["mvm", "release", "--dry-run"]).unwrap();
2684 match cli.command {
2685 Commands::Release {
2686 dry_run,
2687 guard_only,
2688 } => {
2689 assert!(dry_run);
2690 assert!(!guard_only);
2691 }
2692 _ => panic!("Expected Release command"),
2693 }
2694 }
2695
2696 #[test]
2697 fn test_release_guard_only_parses() {
2698 let cli = Cli::try_parse_from(["mvm", "release", "--guard-only"]).unwrap();
2699 match cli.command {
2700 Commands::Release {
2701 dry_run,
2702 guard_only,
2703 } => {
2704 assert!(!dry_run);
2705 assert!(guard_only);
2706 }
2707 _ => panic!("Expected Release command"),
2708 }
2709 }
2710
2711 #[test]
2712 fn test_release_no_flags_parses() {
2713 let cli = Cli::try_parse_from(["mvm", "release"]).unwrap();
2714 match cli.command {
2715 Commands::Release {
2716 dry_run,
2717 guard_only,
2718 } => {
2719 assert!(!dry_run);
2720 assert!(!guard_only);
2721 }
2722 _ => panic!("Expected Release command"),
2723 }
2724 }
2725
2726 #[test]
2727 fn test_extract_workspace_version() {
2728 let toml = r#"
2729[workspace]
2730members = ["crates/mvm-core"]
2731
2732[workspace.package]
2733version = "1.2.3"
2734edition = "2024"
2735"#;
2736 let version = extract_workspace_version(toml).unwrap();
2737 assert_eq!(version, "1.2.3");
2738 }
2739
2740 #[test]
2741 fn test_extract_workspace_version_missing() {
2742 let toml = "[workspace]\nmembers = []";
2743 let result = extract_workspace_version(toml);
2744 assert!(result.is_err());
2745 }
2746
2747 #[test]
2748 fn test_publish_crates_order() {
2749 assert_eq!(PUBLISH_CRATES[0], "mvm-core");
2751 assert_eq!(*PUBLISH_CRATES.last().unwrap(), "mvm");
2752 }
2753}