adk_eval/lib.rs
1//! # adk-eval
2//!
3//! Agent evaluation framework for ADK-Rust.
4//!
5//! This crate provides comprehensive tools for testing and validating agent behavior,
6//! enabling developers to ensure their agents perform correctly and consistently.
7//!
8//! ## Features
9//!
10//! - **Test Definitions**: Structured format for defining test cases (`.test.json`)
11//! - **Trajectory Evaluation**: Validate tool call sequences
12//! - **Response Quality**: Assess final output quality with multiple metrics
13//! - **Multiple Criteria**: Ground truth, rubric-based, and LLM-judged evaluation
14//! - **Automation**: Run evaluations programmatically or via CLI
15//!
16//! ## Quick Start
17//!
18//! ```rust,ignore
19//! use adk_eval::{Evaluator, EvaluationConfig, EvaluationCriteria};
20//! use std::sync::Arc;
21//!
22//! #[tokio::main]
23//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
24//! // Create your agent
25//! let agent = create_my_agent()?;
26//!
27//! // Configure evaluator
28//! let config = EvaluationConfig {
29//! criteria: EvaluationCriteria {
30//! tool_trajectory_score: Some(1.0), // Exact tool match
31//! response_similarity: Some(0.8), // 80% text similarity
32//! ..Default::default()
33//! },
34//! ..Default::default()
35//! };
36//!
37//! let evaluator = Evaluator::new(config);
38//!
39//! // Run evaluation
40//! let result = evaluator
41//! .evaluate_file(agent, "tests/my_agent.test.json")
42//! .await?;
43//!
44//! assert!(result.passed, "Evaluation failed: {:?}", result.failures);
45//! Ok(())
46//! }
47//! ```
48
49pub mod criteria;
50pub mod error;
51pub mod evaluator;
52pub mod llm_judge;
53pub mod report;
54pub mod schema;
55pub mod scoring;
56
57// Re-exports
58pub use criteria::{
59 EvaluationCriteria, ResponseMatchConfig, Rubric, RubricConfig, ToolTrajectoryConfig,
60};
61pub use error::{EvalError, Result};
62pub use evaluator::{EvaluationConfig, Evaluator};
63pub use llm_judge::{
64 LlmJudge, LlmJudgeConfig, RubricEvaluationResult, RubricScore, SemanticMatchResult,
65};
66pub use report::{EvaluationReport, EvaluationResult, Failure, TestCaseResult};
67pub use schema::{EvalCase, EvalSet, IntermediateData, SessionInput, TestFile, ToolUse, Turn};
68pub use scoring::{ResponseScorer, ToolTrajectoryScorer};
69
70/// Prelude for convenient imports
71pub mod prelude {
72 pub use crate::criteria::{
73 EvaluationCriteria, ResponseMatchConfig, Rubric, RubricConfig, ToolTrajectoryConfig,
74 };
75 pub use crate::error::{EvalError, Result};
76 pub use crate::evaluator::{EvaluationConfig, Evaluator};
77 pub use crate::llm_judge::{
78 LlmJudge, LlmJudgeConfig, RubricEvaluationResult, SemanticMatchResult,
79 };
80 pub use crate::report::{EvaluationReport, EvaluationResult, Failure, TestCaseResult};
81 pub use crate::schema::{EvalCase, EvalSet, TestFile, ToolUse, Turn};
82}