use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use crate::error::{AiError, Result};
use crate::evaluator::{EvaluationResult, QualityEvaluator};
use crate::llm::{ChatRequest, LlmClient};
pub struct AiEvaluator {
llm: LlmClient,
config: EvaluatorConfig,
}
#[derive(Debug, Clone)]
pub struct EvaluatorConfig {
pub max_response_tokens: u32,
pub temperature: f32,
pub detailed_feedback: bool,
}
impl Default for EvaluatorConfig {
fn default() -> Self {
Self {
max_response_tokens: 1024,
temperature: 0.3, detailed_feedback: true,
}
}
}
impl AiEvaluator {
#[must_use]
pub fn new(llm: LlmClient) -> Self {
Self {
llm,
config: EvaluatorConfig::default(),
}
}
#[must_use]
pub fn with_config(llm: LlmClient, config: EvaluatorConfig) -> Self {
Self { llm, config }
}
fn build_code_prompt(code: &str, language: &str) -> String {
format!(
r#"You are an expert code reviewer evaluating a piece of {language} code.
Evaluate the following code and provide scores from 0-100 for each criterion:
1. **Quality Score**: Code correctness, best practices, proper error handling
2. **Complexity Score**: Appropriate complexity (not over-engineered, not too simple)
3. **Originality Score**: Creative solutions, good design patterns
Also provide brief feedback (2-3 sentences) on the code.
Respond in JSON format:
{{
"quality_score": <number>,
"complexity_score": <number>,
"originality_score": <number>,
"feedback": "<string>"
}}
Code to evaluate:
```{language}
{code}
```"#
)
}
fn build_content_prompt(content: &str, content_type: &str) -> String {
format!(
r#"You are an expert content evaluator assessing a piece of {content_type} content.
Evaluate the following content and provide scores from 0-100 for each criterion:
1. **Quality Score**: Clarity, accuracy, professionalism
2. **Complexity Score**: Appropriate depth for the audience
3. **Originality Score**: Unique insights, creative presentation
Also provide brief feedback (2-3 sentences) on the content.
Respond in JSON format:
{{
"quality_score": <number>,
"complexity_score": <number>,
"originality_score": <number>,
"feedback": "<string>"
}}
Content to evaluate:
---
{content}
---"#
)
}
fn parse_evaluation(response: &str) -> Result<EvaluationResult> {
let json_str = if let Some(start) = response.find('{') {
if let Some(end) = response.rfind('}') {
&response[start..=end]
} else {
response
}
} else {
response
};
let parsed: EvalResponse = serde_json::from_str(json_str).map_err(|e| {
AiError::EvaluationFailed(format!("Failed to parse evaluation response: {e}"))
})?;
let overall =
(parsed.quality_score + parsed.complexity_score + parsed.originality_score) / 3.0;
Ok(EvaluationResult {
quality_score: parsed.quality_score,
complexity_score: parsed.complexity_score,
originality_score: parsed.originality_score,
overall_score: overall,
feedback: parsed.feedback,
})
}
}
#[derive(Debug, Deserialize)]
struct EvalResponse {
quality_score: f64,
complexity_score: f64,
originality_score: f64,
feedback: String,
}
#[async_trait]
impl QualityEvaluator for AiEvaluator {
async fn evaluate_code(&self, code: &str, language: &str) -> Result<EvaluationResult> {
if code.len() > 50000 {
return Err(AiError::Validation(
"Code too long for evaluation (max 50KB)".to_string(),
));
}
let prompt = Self::build_code_prompt(code, language);
let request = ChatRequest::with_system(
"You are an expert code reviewer. Always respond with valid JSON.",
prompt,
)
.max_tokens(self.config.max_response_tokens)
.temperature(self.config.temperature);
let response = self.llm.chat(request).await?;
Self::parse_evaluation(&response.message.content)
}
async fn evaluate_content(
&self,
content: &str,
content_type: &str,
) -> Result<EvaluationResult> {
if content.len() > 100_000 {
return Err(AiError::Validation(
"Content too long for evaluation (max 100KB)".to_string(),
));
}
let prompt = Self::build_content_prompt(content, content_type);
let request = ChatRequest::with_system(
"You are an expert content evaluator. Always respond with valid JSON.",
prompt,
)
.max_tokens(self.config.max_response_tokens)
.temperature(self.config.temperature);
let response = self.llm.chat(request).await?;
Self::parse_evaluation(&response.message.content)
}
}
pub struct AiCommitmentVerifier {
llm: LlmClient,
}
impl AiCommitmentVerifier {
#[must_use]
pub fn new(llm: LlmClient) -> Self {
Self { llm }
}
pub async fn verify_evidence(
&self,
request: &VerificationRequest,
) -> Result<VerificationResult> {
let prompt = Self::build_verification_prompt(request);
let chat_request = ChatRequest::with_system(
"You are an expert at verifying commitment fulfillment. Be fair but thorough. Always respond with valid JSON.",
prompt,
)
.max_tokens(1024)
.temperature(0.2);
let response = self.llm.chat(chat_request).await?;
Self::parse_verification(&response.message.content)
}
fn build_verification_prompt(request: &VerificationRequest) -> String {
format!(
r#"Verify if the following commitment has been fulfilled based on the evidence provided.
**Commitment Title:** {}
**Commitment Description:** {}
**Deadline:** {}
**Evidence URL:** {}
**Evidence Description:** {}
Evaluate the evidence and determine:
1. Is the evidence valid and accessible?
2. Does the evidence match the commitment requirements?
3. Was it completed on time?
4. What is your confidence level (0-100)?
Respond in JSON format:
{{
"fulfilled": <boolean>,
"confidence": <number 0-100>,
"reasoning": "<string explaining your decision>",
"suggestions": "<string with any suggestions for improvement, or null>"
}}"#,
request.commitment_title,
request
.commitment_description
.as_deref()
.unwrap_or("Not provided"),
request.deadline,
request.evidence_url,
request
.evidence_description
.as_deref()
.unwrap_or("Not provided")
)
}
fn parse_verification(response: &str) -> Result<VerificationResult> {
let json_str = if let Some(start) = response.find('{') {
if let Some(end) = response.rfind('}') {
&response[start..=end]
} else {
response
}
} else {
response
};
let parsed: VerifyResponse = serde_json::from_str(json_str).map_err(|e| {
AiError::VerificationFailed(format!("Failed to parse verification response: {e}"))
})?;
Ok(VerificationResult {
fulfilled: parsed.fulfilled,
confidence: parsed.confidence,
reasoning: parsed.reasoning,
suggestions: parsed.suggestions,
needs_human_review: parsed.confidence < 70.0,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerificationRequest {
pub commitment_title: String,
pub commitment_description: Option<String>,
pub deadline: String,
pub evidence_url: String,
pub evidence_description: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerificationResult {
pub fulfilled: bool,
pub confidence: f64,
pub reasoning: String,
pub suggestions: Option<String>,
pub needs_human_review: bool,
}
#[derive(Debug, Deserialize)]
struct VerifyResponse {
fulfilled: bool,
confidence: f64,
reasoning: String,
suggestions: Option<String>,
}
pub struct AiFraudDetector {
llm: LlmClient,
}
impl AiFraudDetector {
#[must_use]
pub fn new(llm: LlmClient) -> Self {
Self { llm }
}
pub async fn check_fraud(&self, request: &FraudCheckRequest) -> Result<FraudCheckResult> {
let prompt = Self::build_fraud_prompt(request);
let chat_request = ChatRequest::with_system(
"You are a fraud detection specialist. Analyze content for signs of manipulation, gaming, or fraudulent activity. Be thorough but fair. Always respond with valid JSON.",
prompt,
)
.max_tokens(1024)
.temperature(0.2);
let response = self.llm.chat(chat_request).await?;
Self::parse_fraud_check(&response.message.content)
}
fn build_fraud_prompt(request: &FraudCheckRequest) -> String {
format!(
r#"Analyze the following for potential fraud or gaming indicators:
**Content Type:** {}
**Content:**
{}
**User History:**
- Commitments Made: {}
- Commitments Fulfilled: {}
- Average Quality Score: {}
Check for:
1. Plagiarized or generated content
2. Self-dealing or fake evidence
3. Pattern manipulation
4. Suspicious timing or velocity
Respond in JSON format:
{{
"risk_level": "<low|medium|high|critical>",
"risk_score": <number 0-100>,
"indicators": ["<list of suspicious indicators found>"],
"recommendation": "<string with recommended action>"
}}"#,
request.content_type,
request.content,
request.commitments_made,
request.commitments_fulfilled,
request.avg_quality_score.unwrap_or(0.0)
)
}
fn parse_fraud_check(response: &str) -> Result<FraudCheckResult> {
let json_str = if let Some(start) = response.find('{') {
if let Some(end) = response.rfind('}') {
&response[start..=end]
} else {
response
}
} else {
response
};
let parsed: FraudResponse = serde_json::from_str(json_str).map_err(|e| {
AiError::EvaluationFailed(format!("Failed to parse fraud check response: {e}"))
})?;
Ok(FraudCheckResult {
risk_level: parsed.risk_level,
risk_score: parsed.risk_score,
indicators: parsed.indicators,
recommendation: parsed.recommendation,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FraudCheckRequest {
pub content_type: String,
pub content: String,
pub commitments_made: i32,
pub commitments_fulfilled: i32,
pub avg_quality_score: Option<f64>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FraudCheckResult {
pub risk_level: String,
pub risk_score: f64,
pub indicators: Vec<String>,
pub recommendation: String,
}
#[derive(Debug, Deserialize)]
struct FraudResponse {
risk_level: String,
risk_score: f64,
indicators: Vec<String>,
recommendation: String,
}