reasonkit-web 0.1.7

High-performance MCP server for browser automation, web capture, and content extraction. Rust-powered CDP client for AI agents.
Documentation
//! Verification Result Types
//!
//! Defines the outcome structures for triangulated verification.

use super::sources::{SourceQuality, SourceTier};
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};

/// Overall verification status
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum VerificationStatus {
    /// Claim verified by 3+ independent sources with agreement
    Verified,
    /// Claim partially verified (2 sources, or minor discrepancies)
    PartiallyVerified,
    /// Sources conflict - claim disputed
    Conflicting,
    /// Could not verify - insufficient sources
    Unverified,
    /// Claim appears to be false based on sources
    Refuted,
    /// Verification in progress
    #[default]
    Pending,
}

impl VerificationStatus {
    /// Check if the status represents a successful verification
    pub fn is_success(&self) -> bool {
        matches!(self, Self::Verified | Self::PartiallyVerified)
    }

    /// Check if the status indicates a problem
    pub fn is_problem(&self) -> bool {
        matches!(self, Self::Conflicting | Self::Refuted)
    }

    /// Get a human-readable description
    pub fn description(&self) -> &'static str {
        match self {
            Self::Verified => "Verified by 3+ independent sources",
            Self::PartiallyVerified => "Partially verified (fewer sources or minor discrepancies)",
            Self::Conflicting => "Sources conflict - claim disputed",
            Self::Unverified => "Could not verify - insufficient sources",
            Self::Refuted => "Claim appears false based on sources",
            Self::Pending => "Verification in progress",
        }
    }

    /// Get an emoji indicator
    pub fn emoji(&self) -> &'static str {
        match self {
            Self::Verified => "\u{2705}",          // Green checkmark
            Self::PartiallyVerified => "\u{26a0}", // Warning
            Self::Conflicting => "\u{274c}",       // Red X
            Self::Unverified => "\u{2753}",        // Question mark
            Self::Refuted => "\u{1f6ab}",          // No entry
            Self::Pending => "\u{23f3}",           // Hourglass
        }
    }
}

/// A verified source with extracted information
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerifiedSource {
    /// Original URL
    pub url: String,
    /// Page title
    pub title: Option<String>,
    /// Source quality assessment
    pub quality: SourceQuality,
    /// Extracted relevant content (snippet)
    pub content_snippet: Option<String>,
    /// Does this source support the claim?
    pub supports_claim: Option<bool>,
    /// Confidence in this source's relevance (0.0 - 1.0)
    pub relevance_score: f64,
    /// When this source was accessed
    pub accessed_at: DateTime<Utc>,
    /// Any errors encountered while accessing
    pub access_errors: Vec<String>,
    /// HTTP status code if available
    pub http_status: Option<u16>,
}

impl VerifiedSource {
    /// Create a new verified source
    pub fn new(url: String, quality: SourceQuality) -> Self {
        Self {
            url,
            title: None,
            quality,
            content_snippet: None,
            supports_claim: None,
            relevance_score: 0.0,
            accessed_at: Utc::now(),
            access_errors: Vec::new(),
            http_status: None,
        }
    }

    /// Check if this source is usable for verification
    pub fn is_usable(&self) -> bool {
        self.access_errors.is_empty()
            && self
                .http_status
                .map(|s| (200..400).contains(&s))
                .unwrap_or(true)
            && self.quality.tier != SourceTier::Unknown
    }

    /// Get weighted confidence (tier weight * relevance)
    pub fn weighted_confidence(&self) -> f64 {
        self.quality.tier.weight() * self.relevance_score
    }
}

/// Evidence supporting or refuting a claim
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Evidence {
    /// The source providing this evidence
    pub source_url: String,
    /// Extracted quote or content
    pub quote: String,
    /// Does this evidence support (true) or refute (false) the claim?
    pub supports: bool,
    /// Confidence in this evidence (0.0 - 1.0)
    pub confidence: f64,
    /// Position in source content where found
    pub position: Option<usize>,
}

/// Aggregated verification metrics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VerificationMetrics {
    /// Total sources consulted
    pub total_sources: usize,
    /// Sources that were successfully accessed
    pub accessible_sources: usize,
    /// Sources that support the claim
    pub supporting_sources: usize,
    /// Sources that refute the claim
    pub refuting_sources: usize,
    /// Sources that are neutral/unclear
    pub neutral_sources: usize,
    /// Count by tier
    pub tier1_count: usize,
    /// Count by tier
    pub tier2_count: usize,
    /// Count by tier
    pub tier3_count: usize,
    /// Average weighted confidence
    pub average_confidence: f64,
    /// Total verification time in milliseconds
    pub verification_time_ms: u64,
}

impl VerificationMetrics {
    /// Create new empty metrics
    pub fn new() -> Self {
        Self {
            total_sources: 0,
            accessible_sources: 0,
            supporting_sources: 0,
            refuting_sources: 0,
            neutral_sources: 0,
            tier1_count: 0,
            tier2_count: 0,
            tier3_count: 0,
            average_confidence: 0.0,
            verification_time_ms: 0,
        }
    }

    /// Calculate metrics from verified sources
    pub fn from_sources(sources: &[VerifiedSource], time_ms: u64) -> Self {
        let accessible: Vec<&VerifiedSource> = sources.iter().filter(|s| s.is_usable()).collect();

        let supporting = accessible
            .iter()
            .filter(|s| s.supports_claim == Some(true))
            .count();
        let refuting = accessible
            .iter()
            .filter(|s| s.supports_claim == Some(false))
            .count();
        let neutral = accessible
            .iter()
            .filter(|s| s.supports_claim.is_none())
            .count();

        let tier1 = accessible
            .iter()
            .filter(|s| s.quality.tier == SourceTier::Tier1)
            .count();
        let tier2 = accessible
            .iter()
            .filter(|s| s.quality.tier == SourceTier::Tier2)
            .count();
        let tier3 = accessible
            .iter()
            .filter(|s| s.quality.tier == SourceTier::Tier3)
            .count();

        let avg_conf = if !accessible.is_empty() {
            accessible
                .iter()
                .map(|s| s.weighted_confidence())
                .sum::<f64>()
                / accessible.len() as f64
        } else {
            0.0
        };

        Self {
            total_sources: sources.len(),
            accessible_sources: accessible.len(),
            supporting_sources: supporting,
            refuting_sources: refuting,
            neutral_sources: neutral,
            tier1_count: tier1,
            tier2_count: tier2,
            tier3_count: tier3,
            average_confidence: avg_conf,
            verification_time_ms: time_ms,
        }
    }

    /// Check if triangulation requirement is met (3+ quality sources)
    pub fn meets_triangulation(&self) -> bool {
        // Need at least 3 accessible sources with at least 2 being Tier 1 or 2
        self.accessible_sources >= 3 && (self.tier1_count + self.tier2_count) >= 2
    }

    /// Determine verification status based on metrics
    ///
    /// Uses a multi-step approach:
    /// 1. Check if triangulation requirements are met
    /// 2. Detect genuine conflicts (roughly even split)
    /// 3. Determine majority status (agreement vs refutation)
    /// 4. Apply confidence as a modifier within categories
    pub fn determine_status(&self) -> VerificationStatus {
        if !self.meets_triangulation() {
            return VerificationStatus::Unverified;
        }

        let agreement_ratio = if self.accessible_sources > 0 {
            self.supporting_sources as f64 / self.accessible_sources as f64
        } else {
            0.0
        };

        let refutation_ratio = if self.accessible_sources > 0 {
            self.refuting_sources as f64 / self.accessible_sources as f64
        } else {
            0.0
        };

        // Step 1: Detect genuine conflict (roughly 1/3 on each side)
        let conflict_level = f64::min(agreement_ratio, refutation_ratio);
        if conflict_level > 0.33 {
            return VerificationStatus::Conflicting;
        }

        // Step 2: Check for clear refutation (majority refute)
        if refutation_ratio > 0.5 {
            return VerificationStatus::Refuted;
        }

        // Step 3: Check for verification (2/3+ agreement is strong consensus)
        // Use 0.67 threshold (2/3) which is more statistically robust for n>=3
        if agreement_ratio >= 0.67 {
            // Confidence modulates certainty WITHIN the verified category
            return if self.average_confidence >= 0.7 {
                VerificationStatus::Verified
            } else {
                VerificationStatus::PartiallyVerified
            };
        }

        // Step 4: Partial verification (simple majority)
        if agreement_ratio > 0.5 {
            return VerificationStatus::PartiallyVerified;
        }

        VerificationStatus::Unverified
    }
}

impl Default for VerificationMetrics {
    fn default() -> Self {
        Self::new()
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_verification_status_success() {
        assert!(VerificationStatus::Verified.is_success());
        assert!(VerificationStatus::PartiallyVerified.is_success());
        assert!(!VerificationStatus::Conflicting.is_success());
        assert!(!VerificationStatus::Unverified.is_success());
    }

    #[test]
    fn test_verification_status_problem() {
        assert!(!VerificationStatus::Verified.is_problem());
        assert!(VerificationStatus::Conflicting.is_problem());
        assert!(VerificationStatus::Refuted.is_problem());
    }

    #[test]
    fn test_verified_source_usable() {
        let mut source = VerifiedSource::new(
            "https://example.com".to_string(),
            SourceQuality {
                tier: SourceTier::Tier1,
                ..Default::default()
            },
        );
        source.http_status = Some(200);

        assert!(source.is_usable());

        source.access_errors.push("timeout".to_string());
        assert!(!source.is_usable());
    }

    #[test]
    fn test_metrics_triangulation() {
        let mut metrics = VerificationMetrics::new();
        metrics.accessible_sources = 3;
        metrics.tier1_count = 1;
        metrics.tier2_count = 2;

        assert!(metrics.meets_triangulation());

        metrics.tier1_count = 0;
        metrics.tier2_count = 1;
        metrics.tier3_count = 2;

        assert!(!metrics.meets_triangulation()); // Only 1 Tier1/2 source
    }

    #[test]
    fn test_metrics_determine_status() {
        let mut metrics = VerificationMetrics::new();
        metrics.accessible_sources = 4;
        metrics.tier1_count = 2;
        metrics.tier2_count = 2;
        metrics.supporting_sources = 4;
        metrics.average_confidence = 0.8;

        assert_eq!(metrics.determine_status(), VerificationStatus::Verified);

        metrics.supporting_sources = 2;
        metrics.refuting_sources = 2;

        assert_eq!(metrics.determine_status(), VerificationStatus::Conflicting);
    }
}