neomemx 0.1.2

A high-performance memory library for AI agents with semantic search
Documentation
//! OpenAI LLM configuration

use serde::{Deserialize, Serialize};

/// Configuration for OpenAI LLM
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OpenAILlmConfig {
    /// Model to use
    #[serde(default = "default_model")]
    pub model: String,

    /// OpenAI API key
    pub api_key: Option<String>,

    /// Temperature for generation
    #[serde(default = "default_temperature")]
    pub temperature: f32,

    /// Top-p sampling parameter
    #[serde(default = "default_top_p")]
    pub top_p: f32,

    /// Maximum tokens to generate
    #[serde(default = "default_max_tokens")]
    pub max_tokens: u32,

    /// Base URL for OpenAI API
    #[serde(default = "default_base_url")]
    pub base_url: String,
}

fn default_model() -> String {
    "gpt-4o-mini".to_string()
}

fn default_temperature() -> f32 {
    0.7
}

fn default_top_p() -> f32 {
    1.0
}

fn default_max_tokens() -> u32 {
    2048
}

fn default_base_url() -> String {
    "https://api.openai.com/v1".to_string()
}

impl Default for OpenAILlmConfig {
    fn default() -> Self {
        Self {
            model: default_model(),
            api_key: None,
            temperature: default_temperature(),
            top_p: default_top_p(),
            max_tokens: default_max_tokens(),
            base_url: default_base_url(),
        }
    }
}

impl OpenAILlmConfig {
    /// Get the API key from config or environment
    pub fn get_api_key(&self) -> Option<String> {
        self.api_key
            .clone()
            .or_else(|| std::env::var("OPENAI_API_KEY").ok())
    }
}