1use std::path::Path;
7
8#[derive(Debug, Clone)]
10pub struct CommandOutput {
11 pub stdout: String,
13 pub stderr: String,
15 pub exit_code: i32,
17 pub success: bool,
19}
20
21impl CommandOutput {
22 #[must_use]
24 pub fn success(stdout: impl Into<String>) -> Self {
25 Self {
26 stdout: stdout.into(),
27 stderr: String::new(),
28 exit_code: 0,
29 success: true,
30 }
31 }
32
33 #[must_use]
35 pub fn failure(exit_code: i32, stderr: impl Into<String>) -> Self {
36 Self {
37 stdout: String::new(),
38 stderr: stderr.into(),
39 exit_code,
40 success: false,
41 }
42 }
43
44 #[must_use]
46 pub fn with_output(
47 stdout: impl Into<String>,
48 stderr: impl Into<String>,
49 exit_code: i32,
50 ) -> Self {
51 Self {
52 stdout: stdout.into(),
53 stderr: stderr.into(),
54 exit_code,
55 success: exit_code == 0,
56 }
57 }
58}
59
60pub trait CommandRunner: Send + Sync {
64 fn run_inference(
66 &self,
67 model_path: &Path,
68 prompt: &str,
69 max_tokens: u32,
70 no_gpu: bool,
71 extra_args: &[&str],
72 ) -> CommandOutput;
73
74 fn convert_model(&self, source: &Path, target: &Path) -> CommandOutput;
76
77 fn inspect_model(&self, model_path: &Path) -> CommandOutput;
79
80 fn validate_model(&self, model_path: &Path) -> CommandOutput;
82
83 fn bench_model(&self, model_path: &Path) -> CommandOutput;
85
86 fn check_model(&self, model_path: &Path) -> CommandOutput;
88
89 fn profile_model(&self, model_path: &Path, warmup: u32, measure: u32) -> CommandOutput;
91
92 fn profile_ci(
94 &self,
95 model_path: &Path,
96 min_throughput: Option<f64>,
97 max_p99: Option<f64>,
98 warmup: u32,
99 measure: u32,
100 ) -> CommandOutput;
101
102 fn diff_tensors(&self, model_a: &Path, model_b: &Path, json: bool) -> CommandOutput;
104
105 fn compare_inference(
107 &self,
108 model_a: &Path,
109 model_b: &Path,
110 prompt: &str,
111 max_tokens: u32,
112 tolerance: f64,
113 ) -> CommandOutput;
114
115 fn profile_with_flamegraph(
117 &self,
118 model_path: &Path,
119 output_path: &Path,
120 no_gpu: bool,
121 ) -> CommandOutput;
122
123 fn profile_with_focus(&self, model_path: &Path, focus: &str, no_gpu: bool) -> CommandOutput;
125
126 fn validate_model_strict(&self, model_path: &Path) -> CommandOutput;
131
132 fn fingerprint_model(&self, model_path: &Path, json: bool) -> CommandOutput;
134
135 fn validate_stats(&self, fp_a: &Path, fp_b: &Path) -> CommandOutput;
137
138 fn pull_model(&self, hf_repo: &str) -> CommandOutput;
140
141 fn inspect_model_json(&self, model_path: &Path) -> CommandOutput;
146
147 fn run_ollama_inference(
149 &self,
150 model_tag: &str,
151 prompt: &str,
152 temperature: f64,
153 ) -> CommandOutput;
154
155 fn pull_ollama_model(&self, model_tag: &str) -> CommandOutput;
157
158 fn create_ollama_model(&self, model_tag: &str, modelfile_path: &Path) -> CommandOutput;
160
161 fn serve_model(&self, model_path: &Path, port: u16) -> CommandOutput;
165
166 fn http_get(&self, url: &str) -> CommandOutput;
168
169 fn profile_memory(&self, model_path: &Path) -> CommandOutput;
171
172 fn run_chat(
174 &self,
175 model_path: &Path,
176 prompt: &str,
177 no_gpu: bool,
178 extra_args: &[&str],
179 ) -> CommandOutput;
180
181 fn http_post(&self, url: &str, body: &str) -> CommandOutput;
183
184 fn spawn_serve(&self, model_path: &Path, port: u16, no_gpu: bool) -> CommandOutput;
189}
190
191#[derive(Debug, Clone)]
193pub struct RealCommandRunner {
194 pub apr_binary: String,
196}
197
198impl Default for RealCommandRunner {
199 fn default() -> Self {
200 Self::new()
201 }
202}
203
204impl RealCommandRunner {
205 #[must_use]
207 pub fn new() -> Self {
208 Self {
209 apr_binary: "apr".to_string(),
210 }
211 }
212
213 #[must_use]
215 pub fn with_binary(apr_binary: impl Into<String>) -> Self {
216 Self {
217 apr_binary: apr_binary.into(),
218 }
219 }
220
221 fn execute(&self, args: &[&str]) -> CommandOutput {
222 use std::process::Command;
223
224 match Command::new(&self.apr_binary).args(args).output() {
225 Ok(output) => CommandOutput {
226 stdout: String::from_utf8_lossy(&output.stdout).to_string(),
227 stderr: String::from_utf8_lossy(&output.stderr).to_string(),
228 exit_code: output.status.code().unwrap_or(-1),
229 success: output.status.success(),
230 },
231 Err(e) => CommandOutput::failure(-1, format!("Failed to execute command: {e}")),
232 }
233 }
234}
235
236impl CommandRunner for RealCommandRunner {
237 fn run_inference(
238 &self,
239 model_path: &Path,
240 prompt: &str,
241 max_tokens: u32,
242 no_gpu: bool,
243 extra_args: &[&str],
244 ) -> CommandOutput {
245 let model_str = model_path.display().to_string();
246 let max_tokens_str = max_tokens.to_string();
247
248 let mut args = vec![
249 "run",
250 &model_str,
251 "-p",
252 prompt,
253 "--max-tokens",
254 &max_tokens_str,
255 ];
256
257 if no_gpu {
258 args.push("--no-gpu");
259 }
260
261 args.extend(extra_args.iter());
262 self.execute(&args)
263 }
264
265 fn convert_model(&self, source: &Path, target: &Path) -> CommandOutput {
266 let source_str = source.display().to_string();
267 let target_str = target.display().to_string();
268 self.execute(&["rosetta", "convert", &source_str, &target_str])
269 }
270
271 fn inspect_model(&self, model_path: &Path) -> CommandOutput {
272 let path_str = model_path.display().to_string();
273 self.execute(&["rosetta", "inspect", &path_str])
274 }
275
276 fn validate_model(&self, model_path: &Path) -> CommandOutput {
277 let path_str = model_path.display().to_string();
278 self.execute(&["validate", &path_str])
279 }
280
281 fn validate_model_strict(&self, model_path: &Path) -> CommandOutput {
282 let path_str = model_path.display().to_string();
283 self.execute(&["validate", "--strict", "--json", &path_str])
284 }
285
286 fn bench_model(&self, model_path: &Path) -> CommandOutput {
287 let path_str = model_path.display().to_string();
288 self.execute(&["bench", &path_str])
289 }
290
291 fn check_model(&self, model_path: &Path) -> CommandOutput {
292 let path_str = model_path.display().to_string();
293 self.execute(&["check", &path_str])
294 }
295
296 fn profile_model(&self, model_path: &Path, warmup: u32, measure: u32) -> CommandOutput {
297 let path_str = model_path.display().to_string();
298 let warmup_str = warmup.to_string();
299 let measure_str = measure.to_string();
300 self.execute(&[
301 "profile",
302 &path_str,
303 "--warmup",
304 &warmup_str,
305 "--measure",
306 &measure_str,
307 ])
308 }
309
310 fn profile_ci(
311 &self,
312 model_path: &Path,
313 min_throughput: Option<f64>,
314 max_p99: Option<f64>,
315 warmup: u32,
316 measure: u32,
317 ) -> CommandOutput {
318 let path_str = model_path.display().to_string();
319 let warmup_str = warmup.to_string();
320 let measure_str = measure.to_string();
321
322 let mut args = vec![
323 "profile",
324 &path_str,
325 "--ci",
326 "--warmup",
327 &warmup_str,
328 "--measure",
329 &measure_str,
330 "--json",
331 ];
332
333 let throughput_str;
334 if let Some(t) = min_throughput {
335 throughput_str = t.to_string();
336 args.push("--assert-throughput");
337 args.push(&throughput_str);
338 }
339
340 let p99_str;
341 if let Some(p) = max_p99 {
342 p99_str = p.to_string();
343 args.push("--assert-p99");
344 args.push(&p99_str);
345 }
346
347 self.execute(&args)
348 }
349
350 fn diff_tensors(&self, model_a: &Path, model_b: &Path, json: bool) -> CommandOutput {
351 let a_str = model_a.display().to_string();
352 let b_str = model_b.display().to_string();
353
354 let mut args = vec!["rosetta", "diff-tensors", &a_str, &b_str];
355 if json {
356 args.push("--json");
357 }
358 self.execute(&args)
359 }
360
361 fn compare_inference(
362 &self,
363 model_a: &Path,
364 model_b: &Path,
365 prompt: &str,
366 max_tokens: u32,
367 tolerance: f64,
368 ) -> CommandOutput {
369 let a_str = model_a.display().to_string();
370 let b_str = model_b.display().to_string();
371 let max_tokens_str = max_tokens.to_string();
372 let tolerance_str = tolerance.to_string();
373
374 self.execute(&[
375 "rosetta",
376 "compare-inference",
377 &a_str,
378 &b_str,
379 "--prompt",
380 prompt,
381 "--max-tokens",
382 &max_tokens_str,
383 "--tolerance",
384 &tolerance_str,
385 "--json",
386 ])
387 }
388
389 fn profile_with_flamegraph(
390 &self,
391 model_path: &Path,
392 output_path: &Path,
393 no_gpu: bool,
394 ) -> CommandOutput {
395 let model_str = model_path.display().to_string();
396 let output_str = output_path.display().to_string();
397
398 let mut args = vec![
399 "run",
400 &model_str,
401 "-p",
402 "Hello",
403 "--max-tokens",
404 "4",
405 "--profile",
406 "--profile-output",
407 &output_str,
408 ];
409
410 if no_gpu {
411 args.push("--no-gpu");
412 }
413
414 self.execute(&args)
415 }
416
417 fn profile_with_focus(&self, model_path: &Path, focus: &str, no_gpu: bool) -> CommandOutput {
418 let model_str = model_path.display().to_string();
419
420 let mut args = vec![
421 "run",
422 &model_str,
423 "-p",
424 "Hello",
425 "--max-tokens",
426 "4",
427 "--profile",
428 "--focus",
429 focus,
430 ];
431
432 if no_gpu {
433 args.push("--no-gpu");
434 }
435
436 self.execute(&args)
437 }
438
439 fn fingerprint_model(&self, model_path: &Path, json: bool) -> CommandOutput {
440 let path_str = model_path.display().to_string();
441 let mut args = vec!["rosetta", "fingerprint", &path_str];
442 if json {
443 args.push("--json");
444 }
445 self.execute(&args)
446 }
447
448 fn validate_stats(&self, fp_a: &Path, fp_b: &Path) -> CommandOutput {
449 let a_str = fp_a.display().to_string();
450 let b_str = fp_b.display().to_string();
451 self.execute(&["rosetta", "validate-stats", &a_str, "--reference", &b_str])
452 }
453
454 fn pull_model(&self, hf_repo: &str) -> CommandOutput {
455 self.execute(&["pull", "--json", hf_repo])
456 }
457
458 fn inspect_model_json(&self, model_path: &Path) -> CommandOutput {
459 let path_str = model_path.display().to_string();
460 self.execute(&["rosetta", "inspect", "--json", &path_str])
461 }
462
463 fn run_ollama_inference(
464 &self,
465 model_tag: &str,
466 prompt: &str,
467 temperature: f64,
468 ) -> CommandOutput {
469 use std::process::Command;
470
471 let temp_str = temperature.to_string();
472 match Command::new("ollama")
473 .args(["run", model_tag, "--temp", &temp_str])
474 .arg(prompt)
475 .output()
476 {
477 Ok(output) => CommandOutput {
478 stdout: String::from_utf8_lossy(&output.stdout).to_string(),
479 stderr: String::from_utf8_lossy(&output.stderr).to_string(),
480 exit_code: output.status.code().unwrap_or(-1),
481 success: output.status.success(),
482 },
483 Err(e) => CommandOutput::failure(-1, format!("Failed to execute ollama: {e}")),
484 }
485 }
486
487 fn pull_ollama_model(&self, model_tag: &str) -> CommandOutput {
488 use std::process::Command;
489
490 match Command::new("ollama").args(["pull", model_tag]).output() {
491 Ok(output) => CommandOutput {
492 stdout: String::from_utf8_lossy(&output.stdout).to_string(),
493 stderr: String::from_utf8_lossy(&output.stderr).to_string(),
494 exit_code: output.status.code().unwrap_or(-1),
495 success: output.status.success(),
496 },
497 Err(e) => CommandOutput::failure(-1, format!("Failed to execute ollama: {e}")),
498 }
499 }
500
501 fn create_ollama_model(&self, model_tag: &str, modelfile_path: &Path) -> CommandOutput {
502 use std::process::Command;
503
504 let path_str = modelfile_path.display().to_string();
505 match Command::new("ollama")
506 .args(["create", model_tag, "-f", &path_str])
507 .output()
508 {
509 Ok(output) => CommandOutput {
510 stdout: String::from_utf8_lossy(&output.stdout).to_string(),
511 stderr: String::from_utf8_lossy(&output.stderr).to_string(),
512 exit_code: output.status.code().unwrap_or(-1),
513 success: output.status.success(),
514 },
515 Err(e) => CommandOutput::failure(-1, format!("Failed to execute ollama create: {e}")),
516 }
517 }
518
519 fn serve_model(&self, model_path: &Path, port: u16) -> CommandOutput {
520 let model_str = model_path.display().to_string();
521 let port_str = port.to_string();
522 self.execute(&["serve", &model_str, "--port", &port_str])
523 }
524
525 fn http_get(&self, url: &str) -> CommandOutput {
526 use std::process::Command;
527
528 match Command::new("curl").args(["-s", "-m", "10", url]).output() {
529 Ok(output) => CommandOutput {
530 stdout: String::from_utf8_lossy(&output.stdout).to_string(),
531 stderr: String::from_utf8_lossy(&output.stderr).to_string(),
532 exit_code: output.status.code().unwrap_or(-1),
533 success: output.status.success(),
534 },
535 Err(e) => CommandOutput::failure(-1, format!("Failed to execute curl: {e}")),
536 }
537 }
538
539 fn profile_memory(&self, model_path: &Path) -> CommandOutput {
540 let path_str = model_path.display().to_string();
541 self.execute(&["profile", &path_str, "--memory", "--json"])
542 }
543
544 fn run_chat(
545 &self,
546 model_path: &Path,
547 prompt: &str,
548 no_gpu: bool,
549 extra_args: &[&str],
550 ) -> CommandOutput {
551 use std::io::Write;
552 use std::process::{Command, Stdio};
553
554 let model_str = model_path.display().to_string();
555 let mut args = vec!["chat", &model_str];
556 if no_gpu {
557 args.push("--no-gpu");
558 }
559 args.extend(extra_args.iter());
560
561 match Command::new(&self.apr_binary)
562 .args(&args)
563 .stdin(Stdio::piped())
564 .stdout(Stdio::piped())
565 .stderr(Stdio::piped())
566 .spawn()
567 {
568 Ok(mut child) => {
569 if let Some(mut stdin) = child.stdin.take() {
570 let _ = stdin.write_all(prompt.as_bytes());
571 let _ = stdin.write_all(b"\n");
572 }
573 match child.wait_with_output() {
574 Ok(output) => CommandOutput {
575 stdout: String::from_utf8_lossy(&output.stdout).to_string(),
576 stderr: String::from_utf8_lossy(&output.stderr).to_string(),
577 exit_code: output.status.code().unwrap_or(-1),
578 success: output.status.success(),
579 },
580 Err(e) => CommandOutput::failure(-1, format!("Failed to wait for chat: {e}")),
581 }
582 }
583 Err(e) => CommandOutput::failure(-1, format!("Failed to execute chat: {e}")),
584 }
585 }
586
587 fn http_post(&self, url: &str, body: &str) -> CommandOutput {
588 use std::process::Command;
589
590 match Command::new("curl")
591 .args([
592 "-s",
593 "-m",
594 "120",
595 "-X",
596 "POST",
597 "-H",
598 "Content-Type: application/json",
599 "-d",
600 body,
601 url,
602 ])
603 .output()
604 {
605 Ok(output) => CommandOutput {
606 stdout: String::from_utf8_lossy(&output.stdout).to_string(),
607 stderr: String::from_utf8_lossy(&output.stderr).to_string(),
608 exit_code: output.status.code().unwrap_or(-1),
609 success: output.status.success(),
610 },
611 Err(e) => CommandOutput::failure(-1, format!("Failed to execute curl POST: {e}")),
612 }
613 }
614
615 fn spawn_serve(&self, model_path: &Path, port: u16, no_gpu: bool) -> CommandOutput {
616 use std::process::{Command, Stdio};
617
618 let model_str = model_path.display().to_string();
619 let port_str = port.to_string();
620 let mut args = vec!["serve", &model_str, "--port", &port_str];
621 if no_gpu {
622 args.push("--no-gpu");
623 }
624
625 match Command::new(&self.apr_binary)
626 .args(&args)
627 .stdout(Stdio::null())
628 .stderr(Stdio::null())
629 .spawn()
630 {
631 Ok(child) => CommandOutput::success(format!("{}", child.id())),
632 Err(e) => CommandOutput::failure(-1, format!("Failed to spawn serve: {e}")),
633 }
634 }
635}
636
637#[derive(Debug, Clone)]
642#[allow(clippy::struct_excessive_bools)]
643pub struct MockCommandRunner {
644 pub inference_response: String,
646 pub inference_success: bool,
648 pub convert_success: bool,
650 pub tps: f64,
652 pub crash: bool,
654 pub inference_stderr: Option<String>,
656 pub profile_ci_unavailable: bool,
658 pub profile_ci_stderr: Option<String>,
660 pub inspect_success: bool,
662 pub validate_success: bool,
664 pub bench_success: bool,
666 pub check_success: bool,
668 pub profile_success: bool,
670 pub diff_tensors_success: bool,
672 pub compare_inference_success: bool,
674 pub custom_exit_code: Option<i32>,
676 pub profile_flamegraph_success: bool,
678 pub profile_focus_success: bool,
680 pub fingerprint_success: bool,
682 pub validate_stats_success: bool,
684 pub validate_strict_success: bool,
686 pub pull_success: bool,
688 pub pull_model_path: String,
690 pub inspect_json_success: bool,
692 pub inspect_tensor_names: Vec<String>,
694 pub ollama_success: bool,
696 pub ollama_response: String,
698 pub ollama_pull_success: bool,
700 pub ollama_create_success: bool,
702 pub serve_success: bool,
704 pub http_get_success: bool,
706 pub http_get_response: String,
708 pub profile_memory_success: bool,
710 pub chat_success: bool,
712 pub chat_response: String,
714 pub http_post_success: bool,
716 pub http_post_response: String,
718 pub spawn_serve_success: bool,
720}
721
722impl Default for MockCommandRunner {
723 fn default() -> Self {
724 Self {
725 inference_response: "The answer is 4.".to_string(),
726 inference_success: true,
727 convert_success: true,
728 tps: 25.0,
729 crash: false,
730 inference_stderr: None,
731 profile_ci_unavailable: false,
732 profile_ci_stderr: None,
733 inspect_success: true,
734 validate_success: true,
735 bench_success: true,
736 check_success: true,
737 profile_success: true,
738 diff_tensors_success: true,
739 compare_inference_success: true,
740 custom_exit_code: None,
741 profile_flamegraph_success: true,
742 profile_focus_success: true,
743 fingerprint_success: true,
744 validate_stats_success: true,
745 validate_strict_success: true,
746 pull_success: true,
747 pull_model_path: "/mock/model.safetensors".to_string(),
748 inspect_json_success: true,
749 inspect_tensor_names: vec![
750 "model.embed_tokens.weight".to_string(),
751 "model.layers.0.self_attn.q_proj.weight".to_string(),
752 "model.layers.0.self_attn.k_proj.weight".to_string(),
753 "model.layers.0.self_attn.v_proj.weight".to_string(),
754 "model.layers.0.self_attn.o_proj.weight".to_string(),
755 "model.layers.0.mlp.gate_proj.weight".to_string(),
756 "model.layers.0.mlp.up_proj.weight".to_string(),
757 "model.layers.0.mlp.down_proj.weight".to_string(),
758 "model.norm.weight".to_string(),
759 "lm_head.weight".to_string(),
760 ],
761 ollama_success: true,
762 ollama_response: "The answer is 4.".to_string(),
763 ollama_pull_success: true,
764 ollama_create_success: true,
765 serve_success: true,
766 http_get_success: true,
767 http_get_response: r#"{"models":[]}"#.to_string(),
768 profile_memory_success: true,
769 chat_success: true,
770 chat_response: "The answer is 4.".to_string(),
771 http_post_success: true,
772 http_post_response: r#"{"choices":[{"text":"The answer is 4."}]}"#.to_string(),
773 spawn_serve_success: true,
774 }
775 }
776}
777
778impl MockCommandRunner {
779 #[must_use]
781 pub fn new() -> Self {
782 Self::default()
783 }
784
785 #[must_use]
787 pub fn with_inference_response(mut self, response: impl Into<String>) -> Self {
788 self.inference_response = response.into();
789 self
790 }
791
792 #[must_use]
794 pub fn with_inference_failure(mut self) -> Self {
795 self.inference_success = false;
796 self
797 }
798
799 #[must_use]
801 pub fn with_convert_failure(mut self) -> Self {
802 self.convert_success = false;
803 self
804 }
805
806 #[must_use]
808 pub fn with_tps(mut self, tps: f64) -> Self {
809 self.tps = tps;
810 self
811 }
812
813 #[must_use]
815 pub fn with_crash(mut self) -> Self {
816 self.crash = true;
817 self
818 }
819
820 #[must_use]
822 pub fn with_inference_response_and_stderr(
823 mut self,
824 response: impl Into<String>,
825 stderr: impl Into<String>,
826 ) -> Self {
827 self.inference_response = response.into();
828 self.inference_stderr = Some(stderr.into());
829 self
830 }
831
832 #[must_use]
834 pub fn with_profile_ci_unavailable(mut self) -> Self {
835 self.profile_ci_unavailable = true;
836 self
837 }
838
839 #[must_use]
841 pub fn with_profile_ci_stderr(mut self, stderr: impl Into<String>) -> Self {
842 self.profile_ci_stderr = Some(stderr.into());
843 self
844 }
845
846 #[must_use]
848 pub fn with_inspect_failure(mut self) -> Self {
849 self.inspect_success = false;
850 self
851 }
852
853 #[must_use]
855 pub fn with_validate_failure(mut self) -> Self {
856 self.validate_success = false;
857 self
858 }
859
860 #[must_use]
862 pub fn with_bench_failure(mut self) -> Self {
863 self.bench_success = false;
864 self
865 }
866
867 #[must_use]
869 pub fn with_check_failure(mut self) -> Self {
870 self.check_success = false;
871 self
872 }
873
874 #[must_use]
876 pub fn with_profile_failure(mut self) -> Self {
877 self.profile_success = false;
878 self
879 }
880
881 #[must_use]
883 pub fn with_diff_tensors_failure(mut self) -> Self {
884 self.diff_tensors_success = false;
885 self
886 }
887
888 #[must_use]
890 pub fn with_compare_inference_failure(mut self) -> Self {
891 self.compare_inference_success = false;
892 self
893 }
894
895 #[must_use]
897 pub fn with_exit_code(mut self, code: i32) -> Self {
898 self.custom_exit_code = Some(code);
899 self
900 }
901
902 #[must_use]
904 pub fn with_profile_flamegraph_failure(mut self) -> Self {
905 self.profile_flamegraph_success = false;
906 self
907 }
908
909 #[must_use]
911 pub fn with_profile_focus_failure(mut self) -> Self {
912 self.profile_focus_success = false;
913 self
914 }
915
916 #[must_use]
918 pub fn with_fingerprint_failure(mut self) -> Self {
919 self.fingerprint_success = false;
920 self
921 }
922
923 #[must_use]
925 pub fn with_validate_stats_failure(mut self) -> Self {
926 self.validate_stats_success = false;
927 self
928 }
929
930 #[must_use]
932 pub fn with_validate_strict_failure(mut self) -> Self {
933 self.validate_strict_success = false;
934 self
935 }
936
937 #[must_use]
939 pub fn with_pull_failure(mut self) -> Self {
940 self.pull_success = false;
941 self
942 }
943
944 #[must_use]
946 pub fn with_pull_model_path(mut self, path: impl Into<String>) -> Self {
947 self.pull_model_path = path.into();
948 self
949 }
950
951 #[must_use]
953 pub fn with_inspect_json_failure(mut self) -> Self {
954 self.inspect_json_success = false;
955 self
956 }
957
958 #[must_use]
960 pub fn with_tensor_names(mut self, names: Vec<String>) -> Self {
961 self.inspect_tensor_names = names;
962 self
963 }
964
965 #[must_use]
967 pub fn with_ollama_response(mut self, response: impl Into<String>) -> Self {
968 self.ollama_response = response.into();
969 self
970 }
971
972 #[must_use]
974 pub fn with_ollama_failure(mut self) -> Self {
975 self.ollama_success = false;
976 self
977 }
978
979 #[must_use]
981 pub fn with_ollama_pull_failure(mut self) -> Self {
982 self.ollama_pull_success = false;
983 self
984 }
985
986 #[must_use]
988 pub fn with_ollama_create_failure(mut self) -> Self {
989 self.ollama_create_success = false;
990 self
991 }
992
993 #[must_use]
995 pub fn with_serve_failure(mut self) -> Self {
996 self.serve_success = false;
997 self
998 }
999
1000 #[must_use]
1002 pub fn with_http_get_failure(mut self) -> Self {
1003 self.http_get_success = false;
1004 self
1005 }
1006
1007 #[must_use]
1009 pub fn with_http_get_response(mut self, response: impl Into<String>) -> Self {
1010 self.http_get_response = response.into();
1011 self
1012 }
1013
1014 #[must_use]
1016 pub fn with_profile_memory_failure(mut self) -> Self {
1017 self.profile_memory_success = false;
1018 self
1019 }
1020
1021 #[must_use]
1023 pub fn with_chat_failure(mut self) -> Self {
1024 self.chat_success = false;
1025 self
1026 }
1027
1028 #[must_use]
1030 pub fn with_chat_response(mut self, response: impl Into<String>) -> Self {
1031 self.chat_response = response.into();
1032 self
1033 }
1034
1035 #[must_use]
1037 pub fn with_http_post_failure(mut self) -> Self {
1038 self.http_post_success = false;
1039 self
1040 }
1041
1042 #[must_use]
1044 pub fn with_http_post_response(mut self, response: impl Into<String>) -> Self {
1045 self.http_post_response = response.into();
1046 self
1047 }
1048
1049 #[must_use]
1051 pub fn with_spawn_serve_failure(mut self) -> Self {
1052 self.spawn_serve_success = false;
1053 self
1054 }
1055}
1056
1057impl CommandRunner for MockCommandRunner {
1058 fn run_inference(
1059 &self,
1060 _model_path: &Path,
1061 prompt: &str,
1062 _max_tokens: u32,
1063 _no_gpu: bool,
1064 _extra_args: &[&str],
1065 ) -> CommandOutput {
1066 if let Some(exit_code) = self.custom_exit_code {
1068 return CommandOutput {
1069 stdout: String::new(),
1070 stderr: "Custom exit code error".to_string(),
1071 exit_code,
1072 success: exit_code == 0,
1073 };
1074 }
1075
1076 if self.crash {
1078 return CommandOutput {
1079 stdout: String::new(),
1080 stderr: "SIGSEGV: Segmentation fault".to_string(),
1081 exit_code: -11, success: false,
1083 };
1084 }
1085
1086 if !self.inference_success {
1087 return CommandOutput::failure(1, "Inference failed");
1088 }
1089
1090 let response = if prompt.contains("2+2") || prompt.contains("2 + 2") {
1092 "The answer is 4.".to_string()
1093 } else if prompt.starts_with("def ") || prompt.starts_with("fn ") {
1094 " return result".to_string()
1095 } else if prompt.is_empty() {
1096 String::new()
1097 } else {
1098 self.inference_response.clone()
1099 };
1100
1101 let stdout = format!(
1102 "Output:\n{}\nCompleted in 1.5s\ntok/s: {:.1}",
1103 response, self.tps
1104 );
1105
1106 if let Some(ref stderr) = self.inference_stderr {
1108 CommandOutput::with_output(stdout, stderr.clone(), 0)
1109 } else {
1110 CommandOutput::success(stdout)
1111 }
1112 }
1113
1114 fn convert_model(&self, _source: &Path, _target: &Path) -> CommandOutput {
1115 if self.convert_success {
1116 CommandOutput::success("Conversion successful")
1117 } else {
1118 CommandOutput::failure(1, "Conversion failed")
1119 }
1120 }
1121
1122 fn inspect_model(&self, _model_path: &Path) -> CommandOutput {
1123 if self.inspect_success {
1124 CommandOutput::success(r#"{"format":"GGUF","tensors":100,"parameters":"1.5B"}"#)
1125 } else {
1126 CommandOutput::failure(1, "Inspect failed: invalid model format")
1127 }
1128 }
1129
1130 fn validate_model(&self, _model_path: &Path) -> CommandOutput {
1131 if self.validate_success {
1132 CommandOutput::success("Model validation passed")
1133 } else {
1134 CommandOutput::failure(1, "Validation failed: corrupted tensors")
1135 }
1136 }
1137
1138 fn validate_model_strict(&self, _model_path: &Path) -> CommandOutput {
1139 if self.validate_strict_success {
1140 CommandOutput::success(r#"{"valid":true,"tensors_checked":100,"issues":[]}"#)
1141 } else {
1142 CommandOutput::with_output(
1143 r#"{"valid":false,"tensors_checked":100,"issues":["all-zeros tensor: lm_head.weight (6.7GB F32)","expected BF16 but found F32"]}"#,
1144 "Validation failed: corrupt model detected",
1145 1,
1146 )
1147 }
1148 }
1149
1150 fn bench_model(&self, _model_path: &Path) -> CommandOutput {
1151 if self.bench_success {
1152 let output = format!(
1153 r#"{{"throughput_tps":{:.1},"latency_p50_ms":78.2,"latency_p99_ms":156.5}}"#,
1154 self.tps
1155 );
1156 CommandOutput::success(output)
1157 } else {
1158 CommandOutput::failure(1, "Benchmark failed: model load error")
1159 }
1160 }
1161
1162 fn check_model(&self, _model_path: &Path) -> CommandOutput {
1163 if self.check_success {
1164 CommandOutput::success("All checks passed")
1165 } else {
1166 CommandOutput::failure(1, "Check failed: safety issues detected")
1167 }
1168 }
1169
1170 fn profile_model(&self, _model_path: &Path, _warmup: u32, _measure: u32) -> CommandOutput {
1171 if self.profile_success {
1172 let output = format!(
1173 r#"{{"throughput_tps":{:.1},"latency_p50_ms":78.2,"latency_p99_ms":156.5}}"#,
1174 self.tps
1175 );
1176 CommandOutput::success(output)
1177 } else {
1178 CommandOutput::failure(1, "Profile failed: insufficient memory")
1179 }
1180 }
1181
1182 fn profile_ci(
1183 &self,
1184 _model_path: &Path,
1185 min_throughput: Option<f64>,
1186 max_p99: Option<f64>,
1187 _warmup: u32,
1188 _measure: u32,
1189 ) -> CommandOutput {
1190 if self.profile_ci_unavailable {
1192 let stderr = self.profile_ci_stderr.clone().unwrap_or_else(|| {
1193 "unexpected argument '--ci': apr profile does not support --ci mode".to_string()
1194 });
1195 return CommandOutput::with_output("", stderr, 1);
1196 }
1197
1198 let throughput_pass = min_throughput.is_none_or(|t| self.tps >= t);
1199 let p99_pass = max_p99.is_none_or(|p| 156.5 <= p);
1200 let passed = throughput_pass && p99_pass;
1201
1202 let output = format!(
1203 r#"{{"throughput_tps":{:.1},"latency_p50_ms":78.2,"latency_p99_ms":156.5,"passed":{}}}"#,
1204 self.tps, passed
1205 );
1206
1207 if passed {
1208 CommandOutput::success(output)
1209 } else {
1210 CommandOutput::with_output(output, "", 1)
1211 }
1212 }
1213
1214 fn diff_tensors(&self, _model_a: &Path, _model_b: &Path, json: bool) -> CommandOutput {
1215 if !self.diff_tensors_success {
1216 return CommandOutput::failure(1, "Diff tensors failed: incompatible models");
1217 }
1218 if json {
1219 CommandOutput::success(
1220 r#"{"total_tensors":100,"mismatched_tensors":0,"transposed_tensors":0,"mismatches":[],"passed":true}"#,
1221 )
1222 } else {
1223 CommandOutput::success("All tensors match")
1224 }
1225 }
1226
1227 fn compare_inference(
1228 &self,
1229 _model_a: &Path,
1230 _model_b: &Path,
1231 _prompt: &str,
1232 _max_tokens: u32,
1233 _tolerance: f64,
1234 ) -> CommandOutput {
1235 if self.compare_inference_success {
1236 CommandOutput::success(
1237 r#"{"total_tokens":10,"matching_tokens":10,"max_logit_diff":0.0001,"passed":true,"token_comparisons":[]}"#,
1238 )
1239 } else {
1240 CommandOutput::failure(1, "Compare inference failed: output mismatch")
1241 }
1242 }
1243
1244 fn profile_with_flamegraph(
1245 &self,
1246 _model_path: &Path,
1247 _output_path: &Path,
1248 _no_gpu: bool,
1249 ) -> CommandOutput {
1250 if self.profile_flamegraph_success {
1251 CommandOutput::success("Profile complete, flamegraph written")
1252 } else {
1253 CommandOutput::failure(1, "Profile flamegraph failed: profiler error")
1254 }
1255 }
1256
1257 fn profile_with_focus(&self, _model_path: &Path, _focus: &str, _no_gpu: bool) -> CommandOutput {
1258 if self.profile_focus_success {
1259 let output = format!(
1260 r#"{{"throughput_tps":{:.1},"latency_p50_ms":78.2,"latency_p99_ms":156.5}}"#,
1261 self.tps
1262 );
1263 CommandOutput::success(output)
1264 } else {
1265 CommandOutput::failure(1, "Profile focus failed: invalid focus target")
1266 }
1267 }
1268
1269 fn fingerprint_model(&self, _model_path: &Path, json: bool) -> CommandOutput {
1270 if self.fingerprint_success {
1271 if json {
1272 CommandOutput::success(
1273 r#"{"tensors":{"0.q_proj.weight":{"mean":0.001,"std":0.05,"min":-0.2,"max":0.2}}}"#,
1274 )
1275 } else {
1276 CommandOutput::success("Fingerprint: 100 tensors captured")
1277 }
1278 } else {
1279 CommandOutput::failure(1, "Fingerprint failed: model load error")
1280 }
1281 }
1282
1283 fn validate_stats(&self, _fp_a: &Path, _fp_b: &Path) -> CommandOutput {
1284 if self.validate_stats_success {
1285 CommandOutput::success(
1286 r#"{"passed":true,"total_tensors":100,"failed_tensors":0,"details":[]}"#,
1287 )
1288 } else {
1289 CommandOutput::failure(1, "Stats validation failed: 3 tensors exceed tolerance")
1290 }
1291 }
1292
1293 fn pull_model(&self, _hf_repo: &str) -> CommandOutput {
1294 if self.pull_success {
1295 CommandOutput::success(format!("Path: {}", self.pull_model_path))
1296 } else {
1297 CommandOutput::failure(1, "Pull failed: model not found in registry")
1298 }
1299 }
1300
1301 fn inspect_model_json(&self, _model_path: &Path) -> CommandOutput {
1302 if self.inspect_json_success {
1303 let tensor_names_json: String = self
1304 .inspect_tensor_names
1305 .iter()
1306 .map(|s| format!("\"{s}\""))
1307 .collect::<Vec<_>>()
1308 .join(", ");
1309 CommandOutput::success(format!(
1310 r#"{{"format":"SafeTensors","tensor_count":{},"tensor_names":[{}],"parameters":"1.5B"}}"#,
1311 self.inspect_tensor_names.len(),
1312 tensor_names_json
1313 ))
1314 } else {
1315 CommandOutput::failure(1, "Inspect failed: invalid model format")
1316 }
1317 }
1318
1319 fn run_ollama_inference(
1320 &self,
1321 _model_tag: &str,
1322 _prompt: &str,
1323 _temperature: f64,
1324 ) -> CommandOutput {
1325 if self.ollama_success {
1326 CommandOutput::success(format!(
1327 "Output:\n{}\nCompleted in 1.0s",
1328 self.ollama_response
1329 ))
1330 } else {
1331 CommandOutput::failure(1, "Ollama inference failed: model not found")
1332 }
1333 }
1334
1335 fn pull_ollama_model(&self, _model_tag: &str) -> CommandOutput {
1336 if self.ollama_pull_success {
1337 CommandOutput::success("pulling manifest... done")
1338 } else {
1339 CommandOutput::failure(1, "Ollama pull failed: model not found in registry")
1340 }
1341 }
1342
1343 fn create_ollama_model(&self, _model_tag: &str, _modelfile_path: &Path) -> CommandOutput {
1344 if self.ollama_create_success {
1345 CommandOutput::success("creating model... done")
1346 } else {
1347 CommandOutput::failure(1, "Ollama create failed: invalid modelfile")
1348 }
1349 }
1350
1351 fn serve_model(&self, _model_path: &Path, _port: u16) -> CommandOutput {
1352 if self.serve_success {
1353 CommandOutput::success(r#"{"status":"listening","port":8080}"#)
1354 } else {
1355 CommandOutput::failure(1, "Serve failed: port in use")
1356 }
1357 }
1358
1359 fn http_get(&self, _url: &str) -> CommandOutput {
1360 if self.http_get_success {
1361 CommandOutput::success(&self.http_get_response)
1362 } else {
1363 CommandOutput::failure(1, "HTTP request failed: connection refused")
1364 }
1365 }
1366
1367 fn profile_memory(&self, _model_path: &Path) -> CommandOutput {
1368 if self.profile_memory_success {
1369 CommandOutput::success(r#"{"peak_rss_mb":1024,"model_size_mb":512,"kv_cache_mb":256}"#)
1370 } else {
1371 CommandOutput::failure(1, "Profile memory failed: insufficient memory")
1372 }
1373 }
1374
1375 fn run_chat(
1376 &self,
1377 _model_path: &Path,
1378 prompt: &str,
1379 _no_gpu: bool,
1380 _extra_args: &[&str],
1381 ) -> CommandOutput {
1382 if !self.chat_success {
1383 return CommandOutput::failure(1, "Chat failed");
1384 }
1385
1386 let response = if prompt.contains("2+2") || prompt.contains("2 + 2") {
1387 "The answer is 4.".to_string()
1388 } else {
1389 self.chat_response.clone()
1390 };
1391
1392 let stdout = format!(
1393 "Output:\n{}\nCompleted in 1.5s\ntok/s: {:.1}",
1394 response, self.tps
1395 );
1396 CommandOutput::success(stdout)
1397 }
1398
1399 fn http_post(&self, _url: &str, _body: &str) -> CommandOutput {
1400 if self.http_post_success {
1401 CommandOutput::success(&self.http_post_response)
1402 } else {
1403 CommandOutput::failure(1, "HTTP POST failed: connection refused")
1404 }
1405 }
1406
1407 fn spawn_serve(&self, _model_path: &Path, _port: u16, _no_gpu: bool) -> CommandOutput {
1408 if self.spawn_serve_success {
1409 CommandOutput::success("12345") } else {
1411 CommandOutput::failure(1, "Spawn serve failed: port in use")
1412 }
1413 }
1414}
1415
1416#[cfg(test)]
1417mod tests {
1418 use super::*;
1419 use std::path::PathBuf;
1420
1421 #[test]
1422 fn test_command_output_success() {
1423 let output = CommandOutput::success("hello");
1424 assert!(output.success);
1425 assert_eq!(output.exit_code, 0);
1426 assert_eq!(output.stdout, "hello");
1427 assert!(output.stderr.is_empty());
1428 }
1429
1430 #[test]
1431 fn test_command_output_failure() {
1432 let output = CommandOutput::failure(1, "error message");
1433 assert!(!output.success);
1434 assert_eq!(output.exit_code, 1);
1435 assert!(output.stdout.is_empty());
1436 assert_eq!(output.stderr, "error message");
1437 }
1438
1439 #[test]
1440 fn test_command_output_with_output() {
1441 let output = CommandOutput::with_output("out", "err", 0);
1442 assert!(output.success);
1443 assert_eq!(output.stdout, "out");
1444 assert_eq!(output.stderr, "err");
1445
1446 let output2 = CommandOutput::with_output("out", "err", 1);
1447 assert!(!output2.success);
1448 }
1449
1450 #[test]
1451 fn test_mock_runner_default() {
1452 let runner = MockCommandRunner::new();
1453 assert!(runner.inference_success);
1454 assert!(runner.convert_success);
1455 assert!((runner.tps - 25.0).abs() < f64::EPSILON);
1456 }
1457
1458 #[test]
1459 fn test_mock_runner_inference_2plus2() {
1460 let runner = MockCommandRunner::new();
1461 let path = PathBuf::from("model.gguf");
1462 let output = runner.run_inference(&path, "What is 2+2?", 32, false, &[]);
1463 assert!(output.success);
1464 assert!(output.stdout.contains("4"));
1465 }
1466
1467 #[test]
1468 fn test_mock_runner_inference_code() {
1469 let runner = MockCommandRunner::new();
1470 let path = PathBuf::from("model.gguf");
1471 let output = runner.run_inference(&path, "def fibonacci(n):", 32, false, &[]);
1472 assert!(output.success);
1473 assert!(output.stdout.contains("return"));
1474 }
1475
1476 #[test]
1477 fn test_mock_runner_inference_empty() {
1478 let runner = MockCommandRunner::new();
1479 let path = PathBuf::from("model.gguf");
1480 let output = runner.run_inference(&path, "", 32, false, &[]);
1481 assert!(output.success);
1482 }
1484
1485 #[test]
1486 fn test_mock_runner_inference_generic() {
1487 let runner = MockCommandRunner::new().with_inference_response("Custom response");
1488 let path = PathBuf::from("model.gguf");
1489 let output = runner.run_inference(&path, "Hello world", 32, false, &[]);
1490 assert!(output.success);
1491 assert!(output.stdout.contains("Custom response"));
1492 }
1493
1494 #[test]
1495 fn test_mock_runner_inference_failure() {
1496 let runner = MockCommandRunner::new().with_inference_failure();
1497 let path = PathBuf::from("model.gguf");
1498 let output = runner.run_inference(&path, "test", 32, false, &[]);
1499 assert!(!output.success);
1500 assert_eq!(output.exit_code, 1);
1501 }
1502
1503 #[test]
1504 fn test_mock_runner_convert_success() {
1505 let runner = MockCommandRunner::new();
1506 let source = PathBuf::from("source.gguf");
1507 let target = PathBuf::from("target.apr");
1508 let output = runner.convert_model(&source, &target);
1509 assert!(output.success);
1510 }
1511
1512 #[test]
1513 fn test_mock_runner_convert_failure() {
1514 let runner = MockCommandRunner::new().with_convert_failure();
1515 let source = PathBuf::from("source.gguf");
1516 let target = PathBuf::from("target.apr");
1517 let output = runner.convert_model(&source, &target);
1518 assert!(!output.success);
1519 }
1520
1521 #[test]
1522 fn test_mock_runner_inspect() {
1523 let runner = MockCommandRunner::new();
1524 let path = PathBuf::from("model.gguf");
1525 let output = runner.inspect_model(&path);
1526 assert!(output.success);
1527 assert!(output.stdout.contains("GGUF"));
1528 }
1529
1530 #[test]
1531 fn test_mock_runner_validate() {
1532 let runner = MockCommandRunner::new();
1533 let path = PathBuf::from("model.gguf");
1534 let output = runner.validate_model(&path);
1535 assert!(output.success);
1536 }
1537
1538 #[test]
1539 fn test_mock_runner_bench() {
1540 let runner = MockCommandRunner::new().with_tps(30.0);
1541 let path = PathBuf::from("model.gguf");
1542 let output = runner.bench_model(&path);
1543 assert!(output.success);
1544 assert!(output.stdout.contains("30.0"));
1545 }
1546
1547 #[test]
1548 fn test_mock_runner_check() {
1549 let runner = MockCommandRunner::new();
1550 let path = PathBuf::from("model.gguf");
1551 let output = runner.check_model(&path);
1552 assert!(output.success);
1553 }
1554
1555 #[test]
1556 fn test_mock_runner_profile() {
1557 let runner = MockCommandRunner::new();
1558 let path = PathBuf::from("model.gguf");
1559 let output = runner.profile_model(&path, 1, 2);
1560 assert!(output.success);
1561 assert!(output.stdout.contains("throughput_tps"));
1562 }
1563
1564 #[test]
1565 fn test_mock_runner_profile_ci_pass() {
1566 let runner = MockCommandRunner::new().with_tps(20.0);
1567 let path = PathBuf::from("model.gguf");
1568 let output = runner.profile_ci(&path, Some(10.0), Some(200.0), 1, 2);
1569 assert!(output.success);
1570 assert!(output.stdout.contains("\"passed\":true"));
1571 }
1572
1573 #[test]
1574 fn test_mock_runner_profile_ci_fail_throughput() {
1575 let runner = MockCommandRunner::new().with_tps(5.0);
1576 let path = PathBuf::from("model.gguf");
1577 let output = runner.profile_ci(&path, Some(10.0), None, 1, 2);
1578 assert!(!output.success);
1579 assert!(output.stdout.contains("\"passed\":false"));
1580 }
1581
1582 #[test]
1583 fn test_mock_runner_profile_ci_fail_p99() {
1584 let runner = MockCommandRunner::new();
1585 let path = PathBuf::from("model.gguf");
1586 let output = runner.profile_ci(&path, None, Some(100.0), 1, 2);
1588 assert!(!output.success);
1589 }
1590
1591 #[test]
1592 fn test_mock_runner_diff_tensors_json() {
1593 let runner = MockCommandRunner::new();
1594 let a = PathBuf::from("a.gguf");
1595 let b = PathBuf::from("b.apr");
1596 let output = runner.diff_tensors(&a, &b, true);
1597 assert!(output.success);
1598 assert!(output.stdout.contains("\"passed\":true"));
1599 }
1600
1601 #[test]
1602 fn test_mock_runner_diff_tensors_text() {
1603 let runner = MockCommandRunner::new();
1604 let a = PathBuf::from("a.gguf");
1605 let b = PathBuf::from("b.apr");
1606 let output = runner.diff_tensors(&a, &b, false);
1607 assert!(output.success);
1608 assert!(output.stdout.contains("match"));
1609 }
1610
1611 #[test]
1612 fn test_mock_runner_compare_inference() {
1613 let runner = MockCommandRunner::new();
1614 let a = PathBuf::from("a.gguf");
1615 let b = PathBuf::from("b.apr");
1616 let output = runner.compare_inference(&a, &b, "test prompt", 10, 1e-5);
1617 assert!(output.success);
1618 assert!(output.stdout.contains("\"passed\":true"));
1619 }
1620
1621 #[test]
1622 fn test_real_runner_new() {
1623 let runner = RealCommandRunner::new();
1624 assert_eq!(runner.apr_binary, "apr");
1625 }
1626
1627 #[test]
1628 fn test_real_runner_with_binary() {
1629 let runner = RealCommandRunner::with_binary("/custom/apr");
1630 assert_eq!(runner.apr_binary, "/custom/apr");
1631 }
1632
1633 #[test]
1634 fn test_mock_runner_with_tps() {
1635 let runner = MockCommandRunner::new().with_tps(100.0);
1636 assert!((runner.tps - 100.0).abs() < f64::EPSILON);
1637 }
1638
1639 #[test]
1640 fn test_mock_runner_chained_config() {
1641 let runner = MockCommandRunner::new()
1642 .with_tps(50.0)
1643 .with_inference_response("Custom")
1644 .with_convert_failure();
1645
1646 assert!((runner.tps - 50.0).abs() < f64::EPSILON);
1647 assert_eq!(runner.inference_response, "Custom");
1648 assert!(!runner.convert_success);
1649 }
1650
1651 #[test]
1652 fn test_command_output_clone() {
1653 let output = CommandOutput::success("test");
1654 let cloned = output.clone();
1655 assert_eq!(cloned.stdout, output.stdout);
1656 assert_eq!(cloned.success, output.success);
1657 }
1658
1659 #[test]
1660 fn test_command_output_debug() {
1661 let output = CommandOutput::success("test");
1662 let debug_str = format!("{output:?}");
1663 assert!(debug_str.contains("CommandOutput"));
1664 }
1665
1666 #[test]
1667 fn test_mock_runner_clone() {
1668 let runner = MockCommandRunner::new().with_tps(42.0);
1669 let cloned = runner.clone();
1670 assert!((cloned.tps - 42.0).abs() < f64::EPSILON);
1671 }
1672
1673 #[test]
1674 fn test_mock_runner_debug() {
1675 let runner = MockCommandRunner::new();
1676 let debug_str = format!("{runner:?}");
1677 assert!(debug_str.contains("MockCommandRunner"));
1678 }
1679
1680 #[test]
1681 fn test_real_runner_clone() {
1682 let runner = RealCommandRunner::with_binary("custom");
1683 let cloned = runner.clone();
1684 assert_eq!(cloned.apr_binary, "custom");
1685 }
1686
1687 #[test]
1688 fn test_real_runner_debug() {
1689 let runner = RealCommandRunner::new();
1690 let debug_str = format!("{runner:?}");
1691 assert!(debug_str.contains("RealCommandRunner"));
1692 }
1693
1694 #[test]
1695 fn test_real_runner_default() {
1696 let runner = RealCommandRunner::default();
1697 assert_eq!(runner.apr_binary, "apr");
1698 }
1699
1700 #[test]
1701 fn test_mock_runner_with_crash() {
1702 let runner = MockCommandRunner::new().with_crash();
1703 assert!(runner.crash);
1704 let path = PathBuf::from("model.gguf");
1705 let output = runner.run_inference(&path, "test", 32, false, &[]);
1706 assert!(!output.success);
1707 assert_eq!(output.exit_code, -11); assert!(output.stderr.contains("SIGSEGV"));
1709 }
1710
1711 #[test]
1712 fn test_mock_runner_with_inference_response_and_stderr() {
1713 let runner =
1714 MockCommandRunner::new().with_inference_response_and_stderr("Response", "Warning");
1715 assert_eq!(runner.inference_response, "Response");
1716 assert_eq!(runner.inference_stderr.as_deref(), Some("Warning"));
1717
1718 let path = PathBuf::from("model.gguf");
1719 let output = runner.run_inference(&path, "Hello", 32, false, &[]);
1720 assert!(output.success);
1721 assert!(output.stdout.contains("Response"));
1722 assert_eq!(output.stderr, "Warning");
1723 }
1724
1725 #[test]
1726 fn test_mock_runner_inference_fn_code() {
1727 let runner = MockCommandRunner::new();
1728 let path = PathBuf::from("model.gguf");
1729 let output = runner.run_inference(&path, "fn main() {}", 32, false, &[]);
1730 assert!(output.success);
1731 assert!(output.stdout.contains("return"));
1732 }
1733
1734 #[test]
1735 fn test_mock_runner_inference_2_plus_2_spaced() {
1736 let runner = MockCommandRunner::new();
1737 let path = PathBuf::from("model.gguf");
1738 let output = runner.run_inference(&path, "What is 2 + 2?", 32, false, &[]);
1739 assert!(output.success);
1740 assert!(output.stdout.contains("4"));
1741 }
1742
1743 #[test]
1744 fn test_mock_runner_crash_takes_priority() {
1745 let runner = MockCommandRunner::new()
1747 .with_crash()
1748 .with_inference_failure();
1749 let path = PathBuf::from("model.gguf");
1750 let output = runner.run_inference(&path, "test", 32, false, &[]);
1751 assert_eq!(output.exit_code, -11);
1753 }
1754
1755 #[test]
1756 fn test_command_output_with_output_success_on_zero() {
1757 let output = CommandOutput::with_output("stdout", "stderr", 0);
1758 assert!(output.success);
1759 assert_eq!(output.exit_code, 0);
1760 }
1761
1762 #[test]
1763 fn test_command_output_with_output_failure_on_nonzero() {
1764 let output = CommandOutput::with_output("", "error", 42);
1765 assert!(!output.success);
1766 assert_eq!(output.exit_code, 42);
1767 }
1768
1769 #[test]
1770 fn test_mock_runner_profile_ci_no_assertions() {
1771 let runner = MockCommandRunner::new().with_tps(15.0);
1772 let path = PathBuf::from("model.gguf");
1773 let output = runner.profile_ci(&path, None, None, 1, 2);
1775 assert!(output.success);
1776 assert!(output.stdout.contains("\"passed\":true"));
1777 }
1778
1779 #[test]
1780 fn test_mock_runner_fields_after_default() {
1781 let runner = MockCommandRunner::default();
1782 assert!(!runner.crash);
1783 assert!(runner.inference_stderr.is_none());
1784 }
1785
1786 #[test]
1787 fn test_command_output_failure_negative_exit_code() {
1788 let output = CommandOutput::failure(-9, "killed");
1789 assert!(!output.success);
1790 assert_eq!(output.exit_code, -9);
1791 assert_eq!(output.stderr, "killed");
1792 }
1793
1794 #[test]
1795 fn test_mock_runner_with_all_options() {
1796 let runner = MockCommandRunner::new()
1797 .with_tps(100.0)
1798 .with_inference_response("Custom response")
1799 .with_crash();
1800
1801 assert!((runner.tps - 100.0).abs() < f64::EPSILON);
1802 assert_eq!(runner.inference_response, "Custom response");
1803 assert!(runner.crash);
1804 }
1805
1806 #[test]
1807 fn test_mock_runner_profile_ci_both_assertions_pass() {
1808 let runner = MockCommandRunner::new().with_tps(200.0);
1809 let path = PathBuf::from("model.gguf");
1810 let output = runner.profile_ci(&path, Some(100.0), Some(500.0), 1, 2);
1812 assert!(output.success);
1813 assert!(output.stdout.contains("\"passed\":true"));
1814 }
1815
1816 #[test]
1817 fn test_mock_runner_profile_ci_both_assertions_fail() {
1818 let runner = MockCommandRunner::new().with_tps(5.0);
1819 let path = PathBuf::from("model.gguf");
1820 let output = runner.profile_ci(&path, Some(100.0), Some(100.0), 1, 2);
1822 assert!(!output.success);
1823 assert!(output.stdout.contains("\"passed\":false"));
1824 }
1825
1826 #[test]
1827 fn test_mock_runner_profile_ci_unavailable() {
1828 let runner = MockCommandRunner::new().with_profile_ci_unavailable();
1829 let path = PathBuf::from("model.gguf");
1830 let output = runner.profile_ci(&path, Some(10.0), None, 1, 2);
1831 assert!(!output.success);
1832 assert!(output.stderr.contains("unexpected argument"));
1833 }
1834
1835 #[test]
1836 fn test_mock_runner_profile_ci_custom_stderr() {
1837 let runner = MockCommandRunner::new()
1838 .with_profile_ci_unavailable()
1839 .with_profile_ci_stderr("Custom error: --ci not supported");
1840 let path = PathBuf::from("model.gguf");
1841 let output = runner.profile_ci(&path, None, None, 1, 2);
1842 assert!(!output.success);
1843 assert!(output.stderr.contains("Custom error"));
1844 }
1845
1846 #[test]
1847 fn test_mock_runner_inspect_failure() {
1848 let runner = MockCommandRunner::new().with_inspect_failure();
1849 let path = PathBuf::from("model.gguf");
1850 let output = runner.inspect_model(&path);
1851 assert!(!output.success);
1852 assert!(output.stderr.contains("invalid model format"));
1853 }
1854
1855 #[test]
1856 fn test_mock_runner_validate_failure() {
1857 let runner = MockCommandRunner::new().with_validate_failure();
1858 let path = PathBuf::from("model.gguf");
1859 let output = runner.validate_model(&path);
1860 assert!(!output.success);
1861 assert!(output.stderr.contains("corrupted tensors"));
1862 }
1863
1864 #[test]
1865 fn test_mock_runner_bench_failure() {
1866 let runner = MockCommandRunner::new().with_bench_failure();
1867 let path = PathBuf::from("model.gguf");
1868 let output = runner.bench_model(&path);
1869 assert!(!output.success);
1870 assert!(output.stderr.contains("model load error"));
1871 }
1872
1873 #[test]
1874 fn test_mock_runner_check_failure() {
1875 let runner = MockCommandRunner::new().with_check_failure();
1876 let path = PathBuf::from("model.gguf");
1877 let output = runner.check_model(&path);
1878 assert!(!output.success);
1879 assert!(output.stderr.contains("safety issues"));
1880 }
1881
1882 #[test]
1883 fn test_mock_runner_profile_failure() {
1884 let runner = MockCommandRunner::new().with_profile_failure();
1885 let path = PathBuf::from("model.gguf");
1886 let output = runner.profile_model(&path, 1, 2);
1887 assert!(!output.success);
1888 assert!(output.stderr.contains("insufficient memory"));
1889 }
1890
1891 #[test]
1892 fn test_mock_runner_diff_tensors_failure() {
1893 let runner = MockCommandRunner::new().with_diff_tensors_failure();
1894 let a = PathBuf::from("a.gguf");
1895 let b = PathBuf::from("b.apr");
1896 let output = runner.diff_tensors(&a, &b, true);
1897 assert!(!output.success);
1898 assert!(output.stderr.contains("incompatible models"));
1899 }
1900
1901 #[test]
1902 fn test_mock_runner_compare_inference_failure() {
1903 let runner = MockCommandRunner::new().with_compare_inference_failure();
1904 let a = PathBuf::from("a.gguf");
1905 let b = PathBuf::from("b.apr");
1906 let output = runner.compare_inference(&a, &b, "test", 10, 1e-5);
1907 assert!(!output.success);
1908 assert!(output.stderr.contains("output mismatch"));
1909 }
1910
1911 #[test]
1912 fn test_mock_runner_default_new_fields() {
1913 let runner = MockCommandRunner::default();
1914 assert!(!runner.profile_ci_unavailable);
1915 assert!(runner.profile_ci_stderr.is_none());
1916 assert!(runner.inspect_success);
1917 assert!(runner.validate_success);
1918 assert!(runner.bench_success);
1919 assert!(runner.check_success);
1920 assert!(runner.profile_success);
1921 assert!(runner.diff_tensors_success);
1922 assert!(runner.compare_inference_success);
1923 }
1924
1925 #[test]
1926 fn test_mock_runner_chained_failures() {
1927 let runner = MockCommandRunner::new()
1928 .with_inspect_failure()
1929 .with_validate_failure()
1930 .with_bench_failure()
1931 .with_check_failure()
1932 .with_profile_failure()
1933 .with_diff_tensors_failure()
1934 .with_compare_inference_failure();
1935
1936 assert!(!runner.inspect_success);
1937 assert!(!runner.validate_success);
1938 assert!(!runner.bench_success);
1939 assert!(!runner.check_success);
1940 assert!(!runner.profile_success);
1941 assert!(!runner.diff_tensors_success);
1942 assert!(!runner.compare_inference_success);
1943 }
1944
1945 #[test]
1947 fn test_real_runner_execute_nonexistent_binary() {
1948 let runner = RealCommandRunner::with_binary("/nonexistent/binary/path");
1949 let path = PathBuf::from("model.gguf");
1950 let output = runner.run_inference(&path, "test", 32, false, &[]);
1951 assert!(!output.success);
1952 assert_eq!(output.exit_code, -1);
1953 assert!(output.stderr.contains("Failed to execute"));
1954 }
1955
1956 #[test]
1957 fn test_real_runner_run_inference_with_no_gpu() {
1958 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
1959 let path = PathBuf::from("model.gguf");
1960 let output = runner.run_inference(&path, "test", 32, true, &[]);
1961 assert!(!output.success);
1962 }
1963
1964 #[test]
1965 fn test_real_runner_run_inference_with_extra_args() {
1966 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
1967 let path = PathBuf::from("model.gguf");
1968 let output = runner.run_inference(&path, "test", 32, false, &["--temp", "0.8"]);
1969 assert!(!output.success);
1970 }
1971
1972 #[test]
1973 fn test_real_runner_convert_model() {
1974 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
1975 let source = PathBuf::from("source.gguf");
1976 let target = PathBuf::from("target.apr");
1977 let output = runner.convert_model(&source, &target);
1978 assert!(!output.success);
1979 }
1980
1981 #[test]
1982 fn test_real_runner_inspect_model() {
1983 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
1984 let path = PathBuf::from("model.gguf");
1985 let output = runner.inspect_model(&path);
1986 assert!(!output.success);
1987 }
1988
1989 #[test]
1990 fn test_real_runner_validate_model() {
1991 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
1992 let path = PathBuf::from("model.gguf");
1993 let output = runner.validate_model(&path);
1994 assert!(!output.success);
1995 }
1996
1997 #[test]
1998 fn test_real_runner_bench_model() {
1999 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2000 let path = PathBuf::from("model.gguf");
2001 let output = runner.bench_model(&path);
2002 assert!(!output.success);
2003 }
2004
2005 #[test]
2006 fn test_real_runner_check_model() {
2007 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2008 let path = PathBuf::from("model.gguf");
2009 let output = runner.check_model(&path);
2010 assert!(!output.success);
2011 }
2012
2013 #[test]
2014 fn test_real_runner_profile_model() {
2015 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2016 let path = PathBuf::from("model.gguf");
2017 let output = runner.profile_model(&path, 5, 10);
2018 assert!(!output.success);
2019 }
2020
2021 #[test]
2022 fn test_real_runner_profile_ci_all_options() {
2023 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2024 let path = PathBuf::from("model.gguf");
2025 let output = runner.profile_ci(&path, Some(10.0), Some(100.0), 5, 10);
2026 assert!(!output.success);
2027 }
2028
2029 #[test]
2030 fn test_real_runner_profile_ci_throughput_only() {
2031 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2032 let path = PathBuf::from("model.gguf");
2033 let output = runner.profile_ci(&path, Some(50.0), None, 1, 1);
2034 assert!(!output.success);
2035 }
2036
2037 #[test]
2038 fn test_real_runner_profile_ci_p99_only() {
2039 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2040 let path = PathBuf::from("model.gguf");
2041 let output = runner.profile_ci(&path, None, Some(200.0), 1, 1);
2042 assert!(!output.success);
2043 }
2044
2045 #[test]
2046 fn test_real_runner_profile_ci_no_options() {
2047 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2048 let path = PathBuf::from("model.gguf");
2049 let output = runner.profile_ci(&path, None, None, 1, 1);
2050 assert!(!output.success);
2051 }
2052
2053 #[test]
2054 fn test_real_runner_diff_tensors_json() {
2055 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2056 let a = PathBuf::from("a.gguf");
2057 let b = PathBuf::from("b.apr");
2058 let output = runner.diff_tensors(&a, &b, true);
2059 assert!(!output.success);
2060 }
2061
2062 #[test]
2063 fn test_real_runner_diff_tensors_text() {
2064 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2065 let a = PathBuf::from("a.gguf");
2066 let b = PathBuf::from("b.apr");
2067 let output = runner.diff_tensors(&a, &b, false);
2068 assert!(!output.success);
2069 }
2070
2071 #[test]
2072 fn test_real_runner_compare_inference() {
2073 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2074 let a = PathBuf::from("a.gguf");
2075 let b = PathBuf::from("b.apr");
2076 let output = runner.compare_inference(&a, &b, "prompt", 10, 1e-5);
2077 assert!(!output.success);
2078 }
2079
2080 #[test]
2081 fn test_mock_runner_profile_flamegraph_success() {
2082 let runner = MockCommandRunner::new();
2083 let model = PathBuf::from("model.gguf");
2084 let output_path = PathBuf::from("/tmp/profile.svg");
2085 let output = runner.profile_with_flamegraph(&model, &output_path, false);
2086 assert!(output.success);
2087 assert!(output.stdout.contains("flamegraph"));
2088 }
2089
2090 #[test]
2091 fn test_mock_runner_profile_flamegraph_failure() {
2092 let runner = MockCommandRunner::new().with_profile_flamegraph_failure();
2093 let model = PathBuf::from("model.gguf");
2094 let output_path = PathBuf::from("/tmp/profile.svg");
2095 let output = runner.profile_with_flamegraph(&model, &output_path, false);
2096 assert!(!output.success);
2097 assert!(output.stderr.contains("profiler error"));
2098 }
2099
2100 #[test]
2101 fn test_mock_runner_profile_focus_success() {
2102 let runner = MockCommandRunner::new().with_tps(42.0);
2103 let model = PathBuf::from("model.gguf");
2104 let output = runner.profile_with_focus(&model, "attention", false);
2105 assert!(output.success);
2106 assert!(output.stdout.contains("42.0"));
2107 }
2108
2109 #[test]
2110 fn test_mock_runner_profile_focus_failure() {
2111 let runner = MockCommandRunner::new().with_profile_focus_failure();
2112 let model = PathBuf::from("model.gguf");
2113 let output = runner.profile_with_focus(&model, "attention", false);
2114 assert!(!output.success);
2115 assert!(output.stderr.contains("invalid focus target"));
2116 }
2117
2118 #[test]
2119 fn test_real_runner_profile_flamegraph() {
2120 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2121 let model = PathBuf::from("model.gguf");
2122 let output_path = PathBuf::from("/tmp/profile.svg");
2123 let output = runner.profile_with_flamegraph(&model, &output_path, false);
2124 assert!(!output.success);
2125 }
2126
2127 #[test]
2128 fn test_real_runner_profile_flamegraph_no_gpu() {
2129 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2130 let model = PathBuf::from("model.gguf");
2131 let output_path = PathBuf::from("/tmp/profile.svg");
2132 let output = runner.profile_with_flamegraph(&model, &output_path, true);
2133 assert!(!output.success);
2134 }
2135
2136 #[test]
2137 fn test_real_runner_profile_focus() {
2138 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2139 let model = PathBuf::from("model.gguf");
2140 let output = runner.profile_with_focus(&model, "attention", false);
2141 assert!(!output.success);
2142 }
2143
2144 #[test]
2145 fn test_real_runner_profile_focus_no_gpu() {
2146 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2147 let model = PathBuf::from("model.gguf");
2148 let output = runner.profile_with_focus(&model, "matmul", true);
2149 assert!(!output.success);
2150 }
2151
2152 #[test]
2153 fn test_mock_runner_default_new_profile_fields() {
2154 let runner = MockCommandRunner::default();
2155 assert!(runner.profile_flamegraph_success);
2156 assert!(runner.profile_focus_success);
2157 }
2158
2159 #[test]
2160 fn test_mock_runner_chained_profile_failures() {
2161 let runner = MockCommandRunner::new()
2162 .with_profile_flamegraph_failure()
2163 .with_profile_focus_failure();
2164 assert!(!runner.profile_flamegraph_success);
2165 assert!(!runner.profile_focus_success);
2166 }
2167
2168 #[test]
2169 fn test_mock_runner_validate_strict_success() {
2170 let runner = MockCommandRunner::new();
2171 let path = PathBuf::from("model.gguf");
2172 let output = runner.validate_model_strict(&path);
2173 assert!(output.success);
2174 assert!(output.stdout.contains("\"valid\":true"));
2175 }
2176
2177 #[test]
2178 fn test_mock_runner_validate_strict_failure() {
2179 let runner = MockCommandRunner::new().with_validate_strict_failure();
2180 let path = PathBuf::from("model.gguf");
2181 let output = runner.validate_model_strict(&path);
2182 assert!(!output.success);
2183 assert!(output.stdout.contains("\"valid\":false"));
2184 assert!(output.stdout.contains("all-zeros"));
2185 }
2186
2187 #[test]
2188 fn test_mock_runner_validate_strict_default() {
2189 let runner = MockCommandRunner::default();
2190 assert!(runner.validate_strict_success);
2191 }
2192
2193 #[test]
2194 fn test_real_runner_validate_strict() {
2195 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2196 let path = PathBuf::from("model.gguf");
2197 let output = runner.validate_model_strict(&path);
2198 assert!(!output.success);
2199 }
2200
2201 #[test]
2202 fn test_mock_runner_pull_success() {
2203 let runner = MockCommandRunner::new();
2204 let output = runner.pull_model("test/model");
2205 assert!(output.success);
2206 assert!(output.stdout.contains("Path: /mock/model.safetensors"));
2207 }
2208
2209 #[test]
2210 fn test_mock_runner_pull_failure() {
2211 let runner = MockCommandRunner::new().with_pull_failure();
2212 let output = runner.pull_model("test/model");
2213 assert!(!output.success);
2214 assert!(output.stderr.contains("Pull failed"));
2215 }
2216
2217 #[test]
2218 fn test_mock_runner_pull_custom_path() {
2219 let runner =
2220 MockCommandRunner::new().with_pull_model_path("/custom/path/model.safetensors");
2221 let output = runner.pull_model("test/model");
2222 assert!(output.success);
2223 assert!(
2224 output
2225 .stdout
2226 .contains("Path: /custom/path/model.safetensors")
2227 );
2228 }
2229
2230 #[test]
2231 fn test_mock_runner_pull_default() {
2232 let runner = MockCommandRunner::default();
2233 assert!(runner.pull_success);
2234 assert_eq!(runner.pull_model_path, "/mock/model.safetensors");
2235 }
2236
2237 #[test]
2238 fn test_real_runner_pull_model() {
2239 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2240 let output = runner.pull_model("test/model");
2241 assert!(!output.success);
2242 }
2243
2244 #[test]
2247 fn test_mock_runner_ollama_inference_success() {
2248 let runner = MockCommandRunner::new();
2249 let output = runner.run_ollama_inference("qwen2.5-coder:7b-q4_k_m", "What is 2+2?", 0.0);
2250 assert!(output.success);
2251 assert!(output.stdout.contains("The answer is 4."));
2252 }
2253
2254 #[test]
2255 fn test_mock_runner_ollama_inference_custom_response() {
2256 let runner = MockCommandRunner::new().with_ollama_response("Custom ollama response");
2257 let output = runner.run_ollama_inference("qwen2.5-coder:7b", "Hello", 0.7);
2258 assert!(output.success);
2259 assert!(output.stdout.contains("Custom ollama response"));
2260 }
2261
2262 #[test]
2263 fn test_mock_runner_ollama_inference_failure() {
2264 let runner = MockCommandRunner::new().with_ollama_failure();
2265 let output = runner.run_ollama_inference("qwen2.5-coder:7b", "test", 0.0);
2266 assert!(!output.success);
2267 assert!(output.stderr.contains("Ollama inference failed"));
2268 }
2269
2270 #[test]
2271 fn test_mock_runner_ollama_pull_success() {
2272 let runner = MockCommandRunner::new();
2273 let output = runner.pull_ollama_model("qwen2.5-coder:7b-q4_k_m");
2274 assert!(output.success);
2275 assert!(output.stdout.contains("pulling manifest"));
2276 }
2277
2278 #[test]
2279 fn test_mock_runner_ollama_pull_failure() {
2280 let runner = MockCommandRunner::new().with_ollama_pull_failure();
2281 let output = runner.pull_ollama_model("nonexistent:model");
2282 assert!(!output.success);
2283 assert!(output.stderr.contains("Ollama pull failed"));
2284 }
2285
2286 #[test]
2287 fn test_mock_runner_ollama_default_fields() {
2288 let runner = MockCommandRunner::default();
2289 assert!(runner.ollama_success);
2290 assert!(runner.ollama_pull_success);
2291 assert_eq!(runner.ollama_response, "The answer is 4.");
2292 }
2293
2294 #[test]
2297 fn test_mock_runner_create_ollama_success() {
2298 let runner = MockCommandRunner::new();
2299 let path = PathBuf::from("/tmp/Modelfile");
2300 let output = runner.create_ollama_model("test:latest", &path);
2301 assert!(output.success);
2302 assert!(output.stdout.contains("creating model"));
2303 }
2304
2305 #[test]
2306 fn test_mock_runner_create_ollama_failure() {
2307 let runner = MockCommandRunner::new().with_ollama_create_failure();
2308 let path = PathBuf::from("/tmp/Modelfile");
2309 let output = runner.create_ollama_model("test:latest", &path);
2310 assert!(!output.success);
2311 }
2312
2313 #[test]
2314 fn test_mock_runner_serve_success() {
2315 let runner = MockCommandRunner::new();
2316 let path = PathBuf::from("model.gguf");
2317 let output = runner.serve_model(&path, 8080);
2318 assert!(output.success);
2319 assert!(output.stdout.contains("listening"));
2320 }
2321
2322 #[test]
2323 fn test_mock_runner_serve_failure() {
2324 let runner = MockCommandRunner::new().with_serve_failure();
2325 let path = PathBuf::from("model.gguf");
2326 let output = runner.serve_model(&path, 8080);
2327 assert!(!output.success);
2328 }
2329
2330 #[test]
2331 fn test_mock_runner_http_get_success() {
2332 let runner = MockCommandRunner::new();
2333 let output = runner.http_get("http://localhost:8080/v1/models");
2334 assert!(output.success);
2335 assert!(output.stdout.contains("models"));
2336 }
2337
2338 #[test]
2339 fn test_mock_runner_http_get_failure() {
2340 let runner = MockCommandRunner::new().with_http_get_failure();
2341 let output = runner.http_get("http://localhost:8080/v1/models");
2342 assert!(!output.success);
2343 }
2344
2345 #[test]
2346 fn test_mock_runner_http_get_custom_response() {
2347 let runner = MockCommandRunner::new().with_http_get_response(r#"{"status":"ok"}"#);
2348 let output = runner.http_get("http://localhost:8080/health");
2349 assert!(output.success);
2350 assert!(output.stdout.contains("ok"));
2351 }
2352
2353 #[test]
2354 fn test_mock_runner_profile_memory_success() {
2355 let runner = MockCommandRunner::new();
2356 let path = PathBuf::from("model.gguf");
2357 let output = runner.profile_memory(&path);
2358 assert!(output.success);
2359 assert!(output.stdout.contains("peak_rss_mb"));
2360 }
2361
2362 #[test]
2363 fn test_mock_runner_profile_memory_failure() {
2364 let runner = MockCommandRunner::new().with_profile_memory_failure();
2365 let path = PathBuf::from("model.gguf");
2366 let output = runner.profile_memory(&path);
2367 assert!(!output.success);
2368 }
2369
2370 #[test]
2371 fn test_mock_runner_new_default_fields() {
2372 let runner = MockCommandRunner::default();
2373 assert!(runner.ollama_create_success);
2374 assert!(runner.serve_success);
2375 assert!(runner.http_get_success);
2376 assert!(runner.profile_memory_success);
2377 }
2378
2379 #[test]
2380 fn test_real_runner_create_ollama_model() {
2381 let runner = RealCommandRunner::new();
2384 let path = PathBuf::from("/nonexistent/path/Modelfile");
2385 let output = runner.create_ollama_model("apr-test-nonexistent:latest", &path);
2386 assert!(output.exit_code != 0 || !output.success || output.stderr.contains("Error"));
2389 }
2390
2391 #[test]
2392 fn test_real_runner_serve_model() {
2393 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2394 let path = PathBuf::from("model.gguf");
2395 let output = runner.serve_model(&path, 8080);
2396 assert!(!output.success);
2397 }
2398
2399 #[test]
2400 fn test_real_runner_profile_memory() {
2401 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2402 let path = PathBuf::from("model.gguf");
2403 let output = runner.profile_memory(&path);
2404 assert!(!output.success);
2405 }
2406
2407 #[test]
2410 fn test_mock_runner_chat_success_2plus2() {
2411 let runner = MockCommandRunner::new();
2412 let path = PathBuf::from("model.gguf");
2413 let output = runner.run_chat(&path, "What is 2+2?", false, &[]);
2414 assert!(output.success);
2415 assert!(output.stdout.contains("4"));
2416 }
2417
2418 #[test]
2419 fn test_mock_runner_chat_success_generic() {
2420 let runner = MockCommandRunner::new().with_chat_response("Custom chat response");
2421 let path = PathBuf::from("model.gguf");
2422 let output = runner.run_chat(&path, "Hello", false, &[]);
2423 assert!(output.success);
2424 assert!(output.stdout.contains("Custom chat response"));
2425 }
2426
2427 #[test]
2428 fn test_mock_runner_chat_failure() {
2429 let runner = MockCommandRunner::new().with_chat_failure();
2430 let path = PathBuf::from("model.gguf");
2431 let output = runner.run_chat(&path, "test", false, &[]);
2432 assert!(!output.success);
2433 assert!(output.stderr.contains("Chat failed"));
2434 }
2435
2436 #[test]
2437 fn test_mock_runner_http_post_success() {
2438 let runner = MockCommandRunner::new();
2439 let output = runner.http_post("http://localhost:8080/v1/completions", "{}");
2440 assert!(output.success);
2441 assert!(output.stdout.contains("choices"));
2442 }
2443
2444 #[test]
2445 fn test_mock_runner_http_post_failure() {
2446 let runner = MockCommandRunner::new().with_http_post_failure();
2447 let output = runner.http_post("http://localhost:8080/v1/completions", "{}");
2448 assert!(!output.success);
2449 }
2450
2451 #[test]
2452 fn test_mock_runner_http_post_custom_response() {
2453 let runner =
2454 MockCommandRunner::new().with_http_post_response(r#"{"text":"custom output"}"#);
2455 let output = runner.http_post("http://localhost:8080/v1/completions", "{}");
2456 assert!(output.success);
2457 assert!(output.stdout.contains("custom output"));
2458 }
2459
2460 #[test]
2461 fn test_mock_runner_spawn_serve_success() {
2462 let runner = MockCommandRunner::new();
2463 let path = PathBuf::from("model.gguf");
2464 let output = runner.spawn_serve(&path, 8080, false);
2465 assert!(output.success);
2466 assert!(output.stdout.contains("12345")); }
2468
2469 #[test]
2470 fn test_mock_runner_spawn_serve_failure() {
2471 let runner = MockCommandRunner::new().with_spawn_serve_failure();
2472 let path = PathBuf::from("model.gguf");
2473 let output = runner.spawn_serve(&path, 8080, false);
2474 assert!(!output.success);
2475 }
2476
2477 #[test]
2478 fn test_mock_runner_default_new_chat_fields() {
2479 let runner = MockCommandRunner::default();
2480 assert!(runner.chat_success);
2481 assert!(runner.http_post_success);
2482 assert!(runner.spawn_serve_success);
2483 }
2484
2485 #[test]
2486 fn test_real_runner_chat_nonexistent() {
2487 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2488 let path = PathBuf::from("model.gguf");
2489 let output = runner.run_chat(&path, "test", false, &[]);
2490 assert!(!output.success);
2491 }
2492
2493 #[test]
2494 fn test_real_runner_spawn_serve_nonexistent() {
2495 let runner = RealCommandRunner::with_binary("/nonexistent/binary");
2496 let path = PathBuf::from("model.gguf");
2497 let output = runner.spawn_serve(&path, 8080, false);
2498 assert!(!output.success);
2499 }
2500}