#![allow(dead_code, unused_imports, unused_variables)]
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum CaptureMethod {
Screen,
Window(String),
BrowserUrl(String),
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct VisualScore {
pub composition: f64,
pub hierarchy: f64,
pub readability: f64,
pub consistency: f64,
pub accessibility: f64,
pub overall: f64,
#[serde(default)]
pub suggestions: Vec<String>,
}
impl VisualScore {
pub fn compute_overall(&mut self) {
self.overall = self.composition * 0.20
+ self.hierarchy * 0.20
+ self.readability * 0.25
+ self.consistency * 0.15
+ self.accessibility * 0.20;
}
}
#[derive(Debug, Clone)]
pub struct VisualFeedbackLoop {
pub max_iterations: usize,
pub quality_threshold: f64,
pub vision_model_id: String,
pub capture_method: CaptureMethod,
}
impl Default for VisualFeedbackLoop {
fn default() -> Self {
Self {
max_iterations: 5,
quality_threshold: 0.8,
vision_model_id: "vision".to_string(),
capture_method: CaptureMethod::Screen,
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VisualLoopResult {
pub iterations: usize,
pub threshold_met: bool,
pub score_history: Vec<VisualScore>,
pub final_score: VisualScore,
}
pub fn build_critic_prompt(
context: &str,
previous_score: Option<&VisualScore>,
iteration: usize,
) -> String {
let mut prompt = format!(
"You are evaluating a visual design for the following task:\n\n{}\n\n\
This is iteration {} of the visual feedback loop.\n\n",
context,
iteration + 1
);
if let Some(prev) = previous_score {
prompt.push_str(&format!(
"Previous iteration scores:\n\
- Composition: {:.0}\n\
- Hierarchy: {:.0}\n\
- Readability: {:.0}\n\
- Consistency: {:.0}\n\
- Accessibility: {:.0}\n\
- Overall: {:.0}\n\
Previous suggestions: {}\n\n",
prev.composition,
prev.hierarchy,
prev.readability,
prev.consistency,
prev.accessibility,
prev.overall,
prev.suggestions.join("; "),
));
}
prompt.push_str(
"Analyze the screenshot and respond with ONLY a JSON object (no markdown, no explanation):\n\
```json\n\
{\n \
\"composition\": <0-100>,\n \
\"hierarchy\": <0-100>,\n \
\"readability\": <0-100>,\n \
\"consistency\": <0-100>,\n \
\"accessibility\": <0-100>,\n \
\"overall\": <weighted average>,\n \
\"suggestions\": [\"specific improvement 1\", \"specific improvement 2\"]\n\
}\n\
```"
);
prompt
}
pub fn parse_critic_response(response: &str) -> Result<VisualScore> {
let trimmed = response.trim();
let json_str = if let Some(start) = trimmed.find('{') {
if let Some(end) = trimmed.rfind('}') {
&trimmed[start..=end]
} else {
trimmed
}
} else {
trimmed
};
serde_json::from_str(json_str)
.context("Failed to parse VLM critic response as VisualScore JSON")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_visual_score_compute_overall() {
let mut score = VisualScore {
composition: 80.0,
hierarchy: 70.0,
readability: 90.0,
consistency: 85.0,
accessibility: 75.0,
overall: 0.0,
suggestions: vec![],
};
score.compute_overall();
assert!((score.overall - 80.25).abs() < 0.01);
}
#[test]
fn test_parse_critic_response_clean_json() {
let json = r#"{"composition":85,"hierarchy":70,"readability":90,"consistency":80,"accessibility":75,"overall":80,"suggestions":["Increase contrast"]}"#;
let score = parse_critic_response(json).unwrap();
assert_eq!(score.composition, 85.0);
assert_eq!(score.hierarchy, 70.0);
assert_eq!(score.suggestions.len(), 1);
}
#[test]
fn test_parse_critic_response_with_markdown_fences() {
let response = "Here is my analysis:\n```json\n{\"composition\":90,\"hierarchy\":85,\"readability\":88,\"consistency\":92,\"accessibility\":80,\"overall\":87,\"suggestions\":[\"Add focus indicators\"]}\n```\nDone.";
let score = parse_critic_response(response).unwrap();
assert_eq!(score.composition, 90.0);
assert_eq!(score.overall, 87.0);
}
#[test]
fn test_build_critic_prompt_first_iteration() {
let prompt = build_critic_prompt("Build a landing page", None, 0);
assert!(prompt.contains("landing page"));
assert!(prompt.contains("iteration 1"));
assert!(!prompt.contains("Previous iteration"));
}
#[test]
fn test_build_critic_prompt_with_previous() {
let prev = VisualScore {
composition: 60.0,
hierarchy: 50.0,
readability: 70.0,
consistency: 55.0,
accessibility: 65.0,
overall: 60.0,
suggestions: vec!["Fix alignment".into()],
};
let prompt = build_critic_prompt("Build a dashboard", Some(&prev), 1);
assert!(prompt.contains("iteration 2"));
assert!(prompt.contains("Previous iteration scores"));
assert!(prompt.contains("Fix alignment"));
}
#[test]
fn test_visual_feedback_loop_default() {
let vfl = VisualFeedbackLoop::default();
assert_eq!(vfl.max_iterations, 5);
assert!((vfl.quality_threshold - 0.8).abs() < f64::EPSILON);
assert_eq!(vfl.vision_model_id, "vision");
}
#[test]
fn test_capture_method_serde_roundtrip() {
let methods = vec![
CaptureMethod::Screen,
CaptureMethod::Window("Firefox".into()),
CaptureMethod::BrowserUrl("http://localhost:3000".into()),
];
for method in methods {
let json = serde_json::to_string(&method).unwrap();
let parsed: CaptureMethod = serde_json::from_str(&json).unwrap();
let _ = format!("{:?}", parsed);
}
}
#[test]
fn test_visual_loop_result_serde() {
let result = VisualLoopResult {
iterations: 3,
threshold_met: true,
score_history: vec![VisualScore::default()],
final_score: VisualScore {
overall: 85.0,
..Default::default()
},
};
let json = serde_json::to_string(&result).unwrap();
let parsed: VisualLoopResult = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.iterations, 3);
assert!(parsed.threshold_met);
}
}