use crate::builder::ClarityAssessment;
use crate::errors::{AigentError, Result};
use super::providers::{anthropic, google, ollama, openai};
pub trait LlmProvider: Send + Sync {
fn generate(&self, system: &str, user: &str) -> Result<String>;
}
#[must_use]
pub fn detect_provider() -> Option<Box<dyn LlmProvider>> {
if let Some(p) = anthropic::AnthropicProvider::from_env() {
return Some(Box::new(p));
}
if let Some(p) = openai::OpenAiProvider::from_env() {
return Some(Box::new(p));
}
if let Some(p) = google::GoogleProvider::from_env() {
return Some(Box::new(p));
}
if let Some(p) = ollama::OllamaProvider::from_env() {
return Some(Box::new(p));
}
None
}
pub fn llm_derive_name(provider: &dyn LlmProvider, purpose: &str) -> Result<String> {
let system = "You are a naming assistant. Given a purpose description, derive \
a kebab-case skill name using gerund form (e.g., 'processing-pdfs', \
'analyzing-data'). Reply with ONLY the name, no explanation. The name must be \
lowercase, use only letters, numbers, and hyphens, and be at most 64 characters.";
let raw = provider.generate(system, purpose)?;
let name = raw.trim().to_lowercase();
if name.is_empty() || name.len() > 64 {
return Err(AigentError::Build {
message: "LLM returned invalid name (empty or too long)".to_string(),
});
}
if !name
.chars()
.all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-')
{
return Err(AigentError::Build {
message: format!("LLM returned invalid name characters: {name}"),
});
}
if name.starts_with('-') || name.ends_with('-') || name.contains("--") {
return Err(AigentError::Build {
message: format!("LLM returned name with invalid hyphen placement: {name}"),
});
}
Ok(name)
}
pub fn llm_generate_description(
provider: &dyn LlmProvider,
purpose: &str,
name: &str,
) -> Result<String> {
let system = "You are a technical writer. Write a concise skill description \
in third person. Describe what the skill does and when to use it. Maximum 1024 \
characters. No quotes or formatting.";
let user_msg = format!("Skill name: {name}\nPurpose: {purpose}");
let raw = provider.generate(system, &user_msg)?;
let desc = raw.trim().to_string();
if desc.is_empty() {
return Err(AigentError::Build {
message: "LLM returned empty description".to_string(),
});
}
if desc.chars().count() > 1024 {
Ok(desc.chars().take(1024).collect())
} else {
Ok(desc)
}
}
pub fn llm_generate_body(
provider: &dyn LlmProvider,
purpose: &str,
name: &str,
description: &str,
) -> Result<String> {
let system = "You are a skill author following the Anthropic agent skill \
specification. Generate a markdown body for a SKILL.md file. Be concise — only \
add context the model doesn't already have. Use sections with ## headings. \
Keep under 100 lines. Do not include frontmatter delimiters (---).";
let user_msg = format!("Skill name: {name}\nDescription: {description}\nPurpose: {purpose}");
let raw = provider.generate(system, &user_msg)?;
let body = raw.trim().to_string();
if body.is_empty() {
return Err(AigentError::Build {
message: "LLM returned empty body".to_string(),
});
}
Ok(body)
}
pub fn llm_assess_clarity(provider: &dyn LlmProvider, purpose: &str) -> Result<ClarityAssessment> {
let system = "Evaluate if this purpose description is clear enough to \
generate an AI agent skill. Reply in JSON: {\"clear\": true/false, \
\"questions\": [\"question1\", ...]}. If clear, questions should be empty.";
let raw = provider.generate(system, purpose)?;
#[derive(serde::Deserialize)]
struct ClarityResponse {
clear: bool,
questions: Vec<String>,
}
let parsed: ClarityResponse =
serde_json::from_str(raw.trim()).map_err(|e| AigentError::Build {
message: format!("LLM clarity response parse failed: {e}"),
})?;
Ok(ClarityAssessment {
clear: parsed.clear,
questions: parsed.questions,
})
}
#[cfg(test)]
mod tests {
use super::*;
struct MockProvider {
response: String,
}
impl MockProvider {
fn new(response: &str) -> Self {
Self {
response: response.to_string(),
}
}
}
impl LlmProvider for MockProvider {
fn generate(&self, _system: &str, _user: &str) -> Result<String> {
Ok(self.response.clone())
}
}
struct FailingProvider;
impl LlmProvider for FailingProvider {
fn generate(&self, _system: &str, _user: &str) -> Result<String> {
Err(AigentError::Build {
message: "mock LLM failure".to_string(),
})
}
}
#[test]
fn mock_provider_returns_expected_text() {
let provider = MockProvider::new("hello world");
let result = provider.generate("system", "user").unwrap();
assert_eq!(result, "hello world");
}
#[test]
fn detect_provider_returns_none_when_no_env_vars() {
let result = detect_provider();
let _ = result;
}
#[test]
fn llm_name_derivation_falls_back_on_invalid_response() {
let provider = MockProvider::new("NOT-VALID-Name!");
let result = llm_derive_name(&provider, "Process PDFs");
assert!(
result.is_err(),
"should fail on invalid name so caller can fall back"
);
}
#[test]
fn llm_description_generation_falls_back_on_error() {
let provider = FailingProvider;
let result = llm_generate_description(&provider, "Process PDFs", "processing-pdfs");
assert!(result.is_err(), "should return error for fallback");
}
#[test]
fn llm_body_generation_falls_back_on_error() {
let provider = FailingProvider;
let result = llm_generate_body(&provider, "Process PDFs", "processing-pdfs", "Desc.");
assert!(result.is_err(), "should return error for fallback");
}
#[test]
fn llm_clarity_assessment_falls_back_on_parse_error() {
let provider = MockProvider::new("this is not json");
let result = llm_assess_clarity(&provider, "Process PDFs");
assert!(
result.is_err(),
"should fail to parse non-JSON, allowing fallback"
);
}
}