llm_toolkit_expertise/lib.rs
1//! # ⚠️ DEPRECATED - llm-toolkit-expertise
2//!
3//! **This crate has been archived and integrated into `llm-toolkit` core.**
4//!
5//! **Migration Path:**
6//! - Replace `llm_toolkit_expertise::*` with `llm_toolkit::agent::expertise::*`
7//! - Context types are now in `llm_toolkit::context::{Priority, TaskHealth, ContextProfile}`
8//! - All APIs remain the same, only the import paths have changed
9//!
10//! **Version Support:**
11//! - v0.2.0: Final release with deprecation notice
12//! - v0.1.0: Original release
13//! - No further updates will be published
14//!
15//! ---
16//!
17//! # llm-toolkit-expertise (Archived)
18//!
19//! Agent as Code v2: Graph-based composition system for LLM agent capabilities.
20//!
21//! This library provides a flexible, composition-based approach to defining agent expertise
22//! through weighted knowledge fragments. Instead of inheritance hierarchies, expertise is
23//! built by composing independent fragments with priorities and contextual activation rules.
24//!
25//! ## Core Concepts
26//!
27//! - **Composition over Inheritance**: Build agents like RPG equipment sets
28//! - **Weighted Fragments**: Knowledge with priority levels (Critical/High/Normal/Low)
29//! - **Context-Driven**: Dynamic behavior based on TaskHealth and context
30//!
31//! ## Example
32//!
33//! ```rust
34//! use llm_toolkit_expertise::{
35//! Expertise, WeightedFragment, KnowledgeFragment,
36//! Priority, ContextProfile, TaskHealth,
37//! };
38//!
39//! let expertise = Expertise::new("code-reviewer", "1.0")
40//! .with_description("Rust code review specialist")
41//! .with_tag("lang:rust")
42//! .with_tag("role:reviewer")
43//! .with_fragment(
44//! WeightedFragment::new(KnowledgeFragment::Text(
45//! "Always verify code compiles before review".to_string()
46//! ))
47//! .with_priority(Priority::Critical)
48//! )
49//! .with_fragment(
50//! WeightedFragment::new(KnowledgeFragment::Logic {
51//! instruction: "Check for security issues".to_string(),
52//! steps: vec![
53//! "Scan for SQL injection vulnerabilities".to_string(),
54//! "Check input validation".to_string(),
55//! ],
56//! })
57//! .with_priority(Priority::High)
58//! .with_context(ContextProfile::Conditional {
59//! task_types: vec!["security-review".to_string()],
60//! user_states: vec![],
61//! task_health: None,
62//! })
63//! );
64//!
65//! // Generate prompt
66//! let prompt = expertise.to_prompt();
67//! println!("{}", prompt);
68//!
69//! // Generate tree visualization
70//! let tree = expertise.to_tree();
71//! println!("{}", tree);
72//!
73//! // Generate Mermaid graph
74//! let mermaid = expertise.to_mermaid();
75//! println!("{}", mermaid);
76//! ```
77//!
78//! ## Context-Aware Rendering
79//!
80//! Phase 2 adds dynamic prompt rendering based on runtime context:
81//!
82//! ```rust
83//! use llm_toolkit_expertise::{
84//! Expertise, WeightedFragment, KnowledgeFragment,
85//! RenderContext, ContextualPrompt, Priority, ContextProfile, TaskHealth,
86//! };
87//!
88//! // Create expertise with conditional fragments
89//! let expertise = Expertise::new("rust-tutor", "1.0")
90//! .with_fragment(
91//! WeightedFragment::new(KnowledgeFragment::Text(
92//! "You are a Rust tutor".to_string()
93//! ))
94//! .with_context(ContextProfile::Always)
95//! )
96//! .with_fragment(
97//! WeightedFragment::new(KnowledgeFragment::Text(
98//! "Provide detailed explanations".to_string()
99//! ))
100//! .with_context(ContextProfile::Conditional {
101//! task_types: vec![],
102//! user_states: vec!["beginner".to_string()],
103//! task_health: None,
104//! })
105//! );
106//!
107//! // Render with context
108//! let beginner_context = RenderContext::new().with_user_state("beginner");
109//! let prompt = expertise.to_prompt_with_render_context(&beginner_context);
110//! // Includes both "Always" and "beginner" fragments
111//!
112//! // Or use ContextualPrompt wrapper
113//! let prompt = ContextualPrompt::from_expertise(&expertise, RenderContext::new())
114//! .with_user_state("beginner")
115//! .to_prompt();
116//! ```
117//!
118//! ## JSON Schema
119//!
120//! This library supports JSON Schema generation for expertise definitions:
121//!
122//! ```rust
123//! use llm_toolkit_expertise::dump_expertise_schema;
124//!
125//! let schema = dump_expertise_schema();
126//! println!("{}", serde_json::to_string_pretty(&schema).unwrap());
127//! ```
128
129// Allow the crate to reference itself by name
130extern crate self as llm_toolkit_expertise;
131
132pub mod context;
133pub mod fragment;
134pub mod render;
135pub mod types;
136
137// Re-export main types
138pub use context::{ContextMatcher, ContextProfile, Priority, TaskHealth};
139pub use fragment::{Anchor, KnowledgeFragment};
140pub use render::{ContextualPrompt, RenderContext};
141pub use types::{Expertise, WeightedFragment};
142
143// Optional integration with llm-toolkit
144#[cfg(feature = "integration")]
145mod integration;
146
147/// Generate JSON Schema for Expertise type
148///
149/// Returns the JSON Schema as a serde_json::Value for inspection or storage.
150pub fn dump_expertise_schema() -> serde_json::Value {
151 let schema = schemars::schema_for!(Expertise);
152 serde_json::to_value(&schema).expect("Failed to serialize schema")
153}
154
155/// Save Expertise JSON Schema to a file
156///
157/// # Errors
158///
159/// Returns an error if file writing fails.
160pub fn save_expertise_schema(path: impl AsRef<std::path::Path>) -> std::io::Result<()> {
161 let schema = dump_expertise_schema();
162 let json = serde_json::to_string_pretty(&schema)?;
163 std::fs::write(path, json)
164}
165
166#[cfg(test)]
167mod tests {
168 use super::*;
169
170 #[test]
171 fn test_dump_schema() {
172 let schema = dump_expertise_schema();
173 assert!(schema.is_object());
174 assert!(schema.get("$schema").is_some());
175 }
176
177 #[test]
178 fn test_basic_expertise_creation() {
179 let expertise = Expertise::new("test", "1.0")
180 .with_description("Test expertise")
181 .with_tag("test")
182 .with_fragment(WeightedFragment::new(KnowledgeFragment::Text(
183 "Test".to_string(),
184 )));
185
186 assert_eq!(expertise.id, "test");
187 assert_eq!(expertise.version, "1.0");
188 assert_eq!(expertise.description, Some("Test expertise".to_string()));
189 assert_eq!(expertise.tags.len(), 1);
190 assert_eq!(expertise.content.len(), 1);
191 }
192
193 #[test]
194 fn test_to_prompt_generates_valid_output() {
195 let expertise = Expertise::new("test", "1.0").with_fragment(WeightedFragment::new(
196 KnowledgeFragment::Text("Test content".to_string()),
197 ));
198
199 let prompt = expertise.to_prompt();
200 assert!(prompt.contains("Expertise: test"));
201 assert!(prompt.contains("Test content"));
202 }
203
204 #[test]
205 fn test_visualizations() {
206 let expertise = Expertise::new("test", "1.0").with_fragment(WeightedFragment::new(
207 KnowledgeFragment::Text("Test".to_string()),
208 ));
209
210 // Tree visualization
211 let tree = expertise.to_tree();
212 assert!(tree.contains("Expertise: test"));
213
214 // Mermaid visualization
215 let mermaid = expertise.to_mermaid();
216 assert!(mermaid.contains("graph TD"));
217 assert!(mermaid.contains("Expertise: test"));
218 }
219}