llm_toolkit_expertise/
lib.rs

1//! # llm-toolkit-expertise
2//!
3//! Agent as Code v2: Graph-based composition system for LLM agent capabilities.
4//!
5//! This library provides a flexible, composition-based approach to defining agent expertise
6//! through weighted knowledge fragments. Instead of inheritance hierarchies, expertise is
7//! built by composing independent fragments with priorities and contextual activation rules.
8//!
9//! ## Core Concepts
10//!
11//! - **Composition over Inheritance**: Build agents like RPG equipment sets
12//! - **Weighted Fragments**: Knowledge with priority levels (Critical/High/Normal/Low)
13//! - **Context-Driven**: Dynamic behavior based on TaskHealth and context
14//!
15//! ## Example
16//!
17//! ```rust
18//! use llm_toolkit_expertise::{
19//!     Expertise, WeightedFragment, KnowledgeFragment,
20//!     Priority, ContextProfile, TaskHealth,
21//! };
22//!
23//! let expertise = Expertise::new("code-reviewer", "1.0")
24//!     .with_tag("lang:rust")
25//!     .with_tag("role:reviewer")
26//!     .with_fragment(
27//!         WeightedFragment::new(KnowledgeFragment::Text(
28//!             "Always verify code compiles before review".to_string()
29//!         ))
30//!         .with_priority(Priority::Critical)
31//!     )
32//!     .with_fragment(
33//!         WeightedFragment::new(KnowledgeFragment::Logic {
34//!             instruction: "Check for security issues".to_string(),
35//!             steps: vec![
36//!                 "Scan for SQL injection vulnerabilities".to_string(),
37//!                 "Check input validation".to_string(),
38//!             ],
39//!         })
40//!         .with_priority(Priority::High)
41//!         .with_context(ContextProfile::Conditional {
42//!             task_types: vec!["security-review".to_string()],
43//!             user_states: vec![],
44//!             task_health: None,
45//!         })
46//!     );
47//!
48//! // Generate prompt
49//! let prompt = expertise.to_prompt();
50//! println!("{}", prompt);
51//!
52//! // Generate tree visualization
53//! let tree = expertise.to_tree();
54//! println!("{}", tree);
55//!
56//! // Generate Mermaid graph
57//! let mermaid = expertise.to_mermaid();
58//! println!("{}", mermaid);
59//! ```
60//!
61//! ## JSON Schema
62//!
63//! This library supports JSON Schema generation for expertise definitions:
64//!
65//! ```rust
66//! use llm_toolkit_expertise::dump_expertise_schema;
67//!
68//! let schema = dump_expertise_schema();
69//! println!("{}", serde_json::to_string_pretty(&schema).unwrap());
70//! ```
71
72// Allow the crate to reference itself by name
73extern crate self as llm_toolkit_expertise;
74
75pub mod context;
76pub mod fragment;
77pub mod types;
78
79// Re-export main types
80pub use context::{ContextMatcher, ContextProfile, Priority, TaskHealth};
81pub use fragment::{Anchor, KnowledgeFragment};
82pub use types::{Expertise, WeightedFragment};
83
84// Optional integration with llm-toolkit
85#[cfg(feature = "integration")]
86mod integration;
87
88/// Generate JSON Schema for Expertise type
89///
90/// Returns the JSON Schema as a serde_json::Value for inspection or storage.
91pub fn dump_expertise_schema() -> serde_json::Value {
92    let schema = schemars::schema_for!(Expertise);
93    serde_json::to_value(&schema).expect("Failed to serialize schema")
94}
95
96/// Save Expertise JSON Schema to a file
97///
98/// # Errors
99///
100/// Returns an error if file writing fails.
101pub fn save_expertise_schema(path: impl AsRef<std::path::Path>) -> std::io::Result<()> {
102    let schema = dump_expertise_schema();
103    let json = serde_json::to_string_pretty(&schema)?;
104    std::fs::write(path, json)
105}
106
107#[cfg(test)]
108mod tests {
109    use super::*;
110
111    #[test]
112    fn test_dump_schema() {
113        let schema = dump_expertise_schema();
114        assert!(schema.is_object());
115        assert!(schema.get("$schema").is_some());
116    }
117
118    #[test]
119    fn test_basic_expertise_creation() {
120        let expertise = Expertise::new("test", "1.0")
121            .with_tag("test")
122            .with_fragment(WeightedFragment::new(KnowledgeFragment::Text(
123                "Test".to_string(),
124            )));
125
126        assert_eq!(expertise.id, "test");
127        assert_eq!(expertise.version, "1.0");
128        assert_eq!(expertise.tags.len(), 1);
129        assert_eq!(expertise.content.len(), 1);
130    }
131
132    #[test]
133    fn test_to_prompt_generates_valid_output() {
134        let expertise = Expertise::new("test", "1.0").with_fragment(WeightedFragment::new(
135            KnowledgeFragment::Text("Test content".to_string()),
136        ));
137
138        let prompt = expertise.to_prompt();
139        assert!(prompt.contains("Expertise: test"));
140        assert!(prompt.contains("Test content"));
141    }
142
143    #[test]
144    fn test_visualizations() {
145        let expertise = Expertise::new("test", "1.0").with_fragment(WeightedFragment::new(
146            KnowledgeFragment::Text("Test".to_string()),
147        ));
148
149        // Tree visualization
150        let tree = expertise.to_tree();
151        assert!(tree.contains("Expertise: test"));
152
153        // Mermaid visualization
154        let mermaid = expertise.to_mermaid();
155        assert!(mermaid.contains("graph TD"));
156        assert!(mermaid.contains("Expertise: test"));
157    }
158}