prompts_core/
lib.rs

1use serde::{Deserialize, Serialize};
2use std::fs;
3use std::io;
4use async_trait::async_trait;
5use clap::ValueEnum;
6
7#[derive(Debug, Deserialize, Serialize)]
8pub struct Prompt {
9    pub name: String,
10    pub text: String,
11}
12
13#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, ValueEnum, Debug)]
14pub enum GeneratorType {
15    /// Use a mock text generator
16    Mock,
17    /// Use a large language model (LLM) text generator
18    Llm,
19}
20
21#[async_trait]
22pub trait TextGenerator {
23    async fn generate(&self, prompt_text: &str) -> String;
24}
25
26pub struct MockTextGenerator;
27
28#[async_trait]
29impl TextGenerator for MockTextGenerator {
30    async fn generate(&self, prompt_text: &str) -> String {
31        format!("Generated text for '{}': {}\n(This is a mock generation)", prompt_text, prompt_text)
32    }
33}
34
35pub struct LLMTextGenerator;
36
37#[async_trait]
38impl TextGenerator for LLMTextGenerator {
39    async fn generate(&self, prompt_text: &str) -> String {
40        format!("Generated text for '{}' using LLM: {}\n(This is a placeholder for LLM API call)", prompt_text, prompt_text)
41    }
42}
43
44pub fn load_prompts(file_path: &str) -> Result<Vec<Prompt>, io::Error> {
45    let data = fs::read_to_string(file_path)?;
46    let prompts: Vec<Prompt> = serde_json::from_str(&data)?;
47    Ok(prompts)
48}