llmprogram 0.1.0

A Rust library that provides a structured and powerful way to create and run programs that use Large Language Models (LLMs). It uses a YAML-based configuration to define the behavior of your LLM programs, making them easy to create, manage, and share.
Documentation
use crate::core::program::LLMProgram;
use serde_json::Value;
use std::collections::HashMap;
use std::fs;
use anyhow::{Result, Context as AnyhowContext};

/// Generate an LLM program YAML file based on a description using an AI assistant.
/// 
/// # Arguments
/// 
/// * `description` - A detailed description of what the LLM program should do
/// * `example_input` - Optional example of the input the program will receive
/// * `example_output` - Optional example of the output the program should generate
/// * `output_path` - Optional path to output the YAML file (defaults to stdout)
/// * `api_key` - Optional OpenAI API key (defaults to OPENAI_API_KEY env var)
/// 
/// # Returns
/// 
/// * `Result<()>` - Ok if successful, Err if there was an error
pub async fn generate_yaml_program(
    description: &str,
    example_input: Option<&str>,
    example_output: Option<&str>,
    output_path: Option<&str>,
    api_key: Option<&str>,
) -> Result<()> {
    // Create a YAML generator program configuration
    let yaml_generator_config = r#"
name: yaml_generator
description: Generates LLM program YAML files based on user descriptions
version: 1.0.0

model:
  provider: openai
  name: gpt-4
  temperature: 0.7
  max_tokens: 2000
  response_format: json_object

system_prompt: |
  You are an expert at creating LLM program configurations in YAML format.
  Your task is to generate a complete, valid YAML configuration for an LLM program based on a user's description.

  The YAML should include:
  1. A descriptive name (kebab-case)
  2. A clear description
  3. Version (starting at 1.0.0)
  4. Model configuration with provider, name, temperature, max_tokens, and response_format
  5. A well-crafted system prompt that guides the LLM
  6. Input schema using JSON Schema to validate inputs
  7. Output schema using JSON Schema to validate outputs
  8. A Jinja2 template for the user prompt

  Guidelines:
  - Use gpt-4 or gpt-3.5-turbo as the model name
  - Temperature should be between 0.0 and 1.0 (0.7 is a good default for creative tasks)
  - max_tokens should be appropriate for the task (500-2000)
  - response_format should be "json_object" for structured outputs
  - System prompt should be detailed and specific
  - Input schema should be comprehensive but not overly complex
  - Output schema should define clear, structured responses
  - Template should effectively use the input variables
  - All fields should be properly formatted YAML

  Return ONLY the YAML content as a JSON object with a single "yaml_content" field containing the YAML as a string.

input_schema:
  type: object
  required:
    - description
  properties:
    description:
      type: string
      description: A detailed description of what the LLM program should do
    example_input:
      type: string
      description: An example of the input the program will receive
    example_output:
      type: string
      description: An example of the output the program should generate

output_schema:
  type: object
  required:
    - yaml_content
  properties:
    yaml_content:
      type: string
      description: The generated YAML content for the LLM program

template: |
  Create an LLM program configuration based on this description:

  Description: {{description}}
  {% if example_input %}Example Input: {{example_input}}{% endif %}
  {% if example_output %}Example Output: {{example_output}}{% endif %}

  Generate a complete, valid YAML configuration following the specified format.
"#;

    // Prepare inputs for the YAML generator
    let mut generator_inputs = HashMap::new();
    generator_inputs.insert("description".to_string(), Value::String(description.to_string()));
    
    if let Some(input) = example_input {
        generator_inputs.insert("example_input".to_string(), Value::String(input.to_string()));
    }
    
    if let Some(output) = example_output {
        generator_inputs.insert("example_output".to_string(), Value::String(output.to_string()));
    }

    // Create a temporary file for the YAML generator config
    let temp_config_path = "temp_yaml_generator.yaml";
    fs::write(temp_config_path, yaml_generator_config)
        .context("Failed to write temporary YAML generator config")?;

    // Run the YAML generator
    let program = LLMProgram::new_with_options(
        temp_config_path,
        api_key.map(|s| s.to_string()),
        None,  // Use default base URL
        true,  // Enable cache
        "redis://localhost:6379",
    ).context("Failed to create YAML generator program")?;

    let result = program.run(&generator_inputs).await
        .context("Failed to run YAML generator program")?;

    // Extract the YAML content
    let yaml_content = result.get("yaml_content")
        .and_then(|v| v.as_str())
        .ok_or_else(|| anyhow::anyhow!("YAML generator did not return yaml_content"))?;

    // Output the YAML content
    if let Some(path) = output_path {
        fs::write(path, yaml_content)
            .context("Failed to write generated YAML to file")?;
        println!("YAML program generated and saved to {}", path);
    } else {
        println!("{}", yaml_content);
    }

    // Clean up temporary file
    let _ = fs::remove_file(temp_config_path);

    Ok(())
}