taskai_core/
lib.rs

1mod validate;
2mod next;
3
4use llm::{
5    builder::{LLMBackend, LLMBuilder},
6    chat::ChatMessage,
7};
8use taskai_schema::Backlog;
9use std::path::Path;
10
11/// BacklogGenerator is responsible for generating a project backlog from a specification using an LLM.
12pub struct BacklogGenerator {
13    model: String,
14    language: String,
15    style: String,
16}
17
18impl Default for BacklogGenerator {
19    /// Returns a default BacklogGenerator with preset model, language, and style.
20    fn default() -> Self {
21        Self {
22            model: "gpt-4.1-2025-04-14".to_string(),
23            language: "en".to_string(),
24            style: "standard".to_string(),
25        }
26    }
27}
28
29impl BacklogGenerator {
30    /// Creates a new BacklogGenerator with default settings.
31    pub fn new() -> Self {
32        Self::default()
33    }
34    
35    /// Sets the LLM model to use.
36    pub fn with_model(mut self, model: &str) -> Self {
37        self.model = model.to_string();
38        self
39    }
40    
41    /// Sets the language for the prompts.
42    pub fn with_language(mut self, language: &str) -> Self {
43        self.language = language.to_string();
44        self
45    }
46    
47    /// Sets the style for the backlog generation.
48    pub fn with_style(mut self, style: &str) -> Self {
49        self.style = style.to_string();
50        self
51    }
52    
53    /// Returns the system prompt string based on the selected language.
54    fn get_system_prompt(&self) -> String {
55        match self.language.as_str() {
56            "fr" => {
57                if let Ok(content) = std::fs::read_to_string(Self::find_prompt_path("system_fr.txt")) {
58                    content
59                } else {
60                    self.get_default_system_prompt()
61                }
62            }
63            _ => self.get_default_system_prompt(),
64        }
65    }
66    
67    /// Returns the default system prompt in English, or a hardcoded fallback if the file is not found.
68    fn get_default_system_prompt(&self) -> String {
69        if let Ok(content) = std::fs::read_to_string(Self::find_prompt_path("system_en.txt")) {
70            content
71        } else {
72            "You are a helpful assistant specialized in converting project specifications into structured task backlogs. Create a YAML backlog with tasks, dependencies, and deliverables.".to_string()
73        }
74    }
75    
76    /// Attempts to find the prompt file in several possible locations.
77    fn find_prompt_path(filename: &str) -> String {
78        let paths = vec![
79            format!("prompts/{}", filename),
80            format!("crates/core/prompts/{}", filename),
81            format!("{}", filename),
82        ];
83        
84        for path in paths {
85            if Path::new(&path).exists() {
86                return path;
87            }
88        }
89        
90        format!("crates/core/prompts/{}", filename)
91    }
92    
93    /// Generates a backlog from the given specification using the configured LLM.
94    pub async fn generate(&self, spec: &str) -> Result<Backlog, String> {
95        #[cfg(test)]
96        return self.generate_mock(spec);
97        
98        #[cfg(not(test))]
99        {
100            let system_prompt = self.get_system_prompt();
101            let user_prompt = spec.to_string();
102            
103            let response = self.call_llm(&system_prompt, &user_prompt).await?;
104            
105            return validate::parse_and_validate_yaml(&response);
106        }
107        
108        #[allow(unreachable_code)]
109        {
110            Err("Error: Unreachable code reached".to_string())
111        }
112    }
113    
114    /// Calls the LLM API with the given system and user prompts, returning the raw response.
115    async fn call_llm(&self, system_prompt: &str, user_prompt: &str) -> Result<String, String> {
116        let api_key = std::env::var("OPENAI_API_KEY")
117            .map_err(|_| "OPENAI_API_KEY environment variable not set".to_string())?;
118
119        let llm = LLMBuilder::new()
120            .backend(LLMBackend::OpenAI)
121            .api_key(api_key)
122            .model(&self.model)
123            .max_tokens(2048)
124            .temperature(0.7)
125            .stream(false)
126            .build()
127            .map_err(|e| format!("Failed to build LLM: {}", e))?;
128
129        let formatted_prompt = format!("{}\n\n{}", system_prompt, user_prompt);
130
131        let messages = vec![
132            ChatMessage::user()
133                .content(formatted_prompt)
134                .build(),
135        ];
136
137        let completion = llm.chat(&messages)
138            .await
139            .map_err(|e| format!("LLM API error: {}", e))?;
140        
141        Ok(completion.to_string())
142    }
143    
144    /// Determines if the input string is already a structured project specification.
145    #[allow(dead_code)]
146    fn is_structured_spec(input: &str) -> bool {
147        input.contains("Project:") && 
148        (input.contains("Language:") || input.contains("Goal:") || input.contains("Deliverables:"))
149    }
150    
151    /// Generates a mock backlog for testing purposes.
152    #[cfg(test)]
153    fn generate_mock(&self, spec: &str) -> Result<Backlog, String> {
154        let mock_yaml = format!(r#"
155        project: mock-project
156        rust_version: "1.77"
157        tasks:
158          - id: MOCK-1
159            title: "Mock task from spec: {}"
160            depends: []
161            state: Todo
162            deliverable: "src/main.rs"
163            done_when:
164              - "cargo test passes"
165        "#, spec.trim());
166        
167        serde_yaml::from_str(&mock_yaml).map_err(|e| e.to_string())
168    }
169}
170
171/// Returns a list of tasks that are ready to be worked on.
172pub use next::get_ready_tasks;
173
174#[cfg(test)]
175mod tests {
176    use super::*;
177    
178    /// Tests the mock backlog generation.
179    #[tokio::test]
180    async fn gen_mock() {
181        let generator = BacklogGenerator::new();
182        let result = generator.generate("Test specification").await.unwrap();
183        
184        assert_eq!(result.project, "mock-project");
185        assert_eq!(result.tasks[0].id, "MOCK-1");
186    }
187}