tirami-core 0.3.0

Core types for the Forge compute economy: NodeId, CU, Config
Documentation
use serde::{Deserialize, Serialize};
use std::path::PathBuf;

#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct Config {
    /// Path to the local GGUF model file.
    pub model_path: Option<PathBuf>,

    /// Optional path to a persisted ledger snapshot.
    pub ledger_path: Option<PathBuf>,

    /// Optional path to the persisted forge-bank (L2) state.
    pub bank_state_path: Option<PathBuf>,

    /// Optional path to the persisted forge-agora (L4) marketplace state.
    pub marketplace_state_path: Option<PathBuf>,

    /// Optional path to the persisted forge-mind (L3) agent snapshot.
    pub mind_state_path: Option<PathBuf>,

    /// Whether to share compute with the network.
    pub share_compute: bool,

    /// Maximum memory (GB) to dedicate to inference.
    pub max_memory_gb: f32,

    /// Port for the local HTTP API.
    pub api_port: u16,

    /// Bind address for the local HTTP API.
    pub api_bind_addr: String,

    /// Optional bearer token protecting administrative API endpoints.
    pub api_bearer_token: Option<String>,

    /// Maximum accepted HTTP request body size for the local API.
    pub api_max_request_body_bytes: usize,

    /// Bootstrap relay addresses for WAN discovery.
    pub bootstrap_relays: Vec<String>,

    /// Region hint for peer discovery.
    pub region: String,

    /// Maximum accepted prompt length for API and remote inference requests.
    pub max_prompt_chars: usize,

    /// Maximum number of tokens a single request may ask the runtime to generate.
    pub max_generate_tokens: u32,

    /// Maximum number of concurrent remote inference requests the seed will execute.
    pub max_concurrent_remote_inference_requests: usize,

    /// Settlement window duration in hours (Issue #19). 0 = manual only.
    pub settlement_window_hours: u64,
}

impl Config {
    pub fn api_socket_addr(&self) -> String {
        format!("{}:{}", self.api_bind_addr, self.api_port)
    }

    pub fn validate_inference_request(
        &self,
        prompt: &str,
        max_tokens: u32,
        temperature: f32,
        top_p: Option<f32>,
    ) -> Result<(), crate::TiramiError> {
        let prompt_chars = prompt.chars().count();
        if prompt_chars == 0 {
            return Err(crate::TiramiError::InvalidRequest(
                "prompt must not be empty".to_string(),
            ));
        }
        if prompt_chars > self.max_prompt_chars {
            return Err(crate::TiramiError::InvalidRequest(format!(
                "prompt too large: {prompt_chars} chars > limit {}",
                self.max_prompt_chars
            )));
        }
        if max_tokens == 0 {
            return Err(crate::TiramiError::InvalidRequest(
                "max_tokens must be greater than zero".to_string(),
            ));
        }
        if max_tokens > self.max_generate_tokens {
            return Err(crate::TiramiError::InvalidRequest(format!(
                "max_tokens too large: {max_tokens} > limit {}",
                self.max_generate_tokens
            )));
        }
        if !temperature.is_finite() || !(0.0..=2.0).contains(&temperature) {
            return Err(crate::TiramiError::InvalidRequest(
                "temperature must be finite and within 0.0..=2.0".to_string(),
            ));
        }
        if let Some(top_p) = top_p {
            if !top_p.is_finite() || !(0.0..=1.0).contains(&top_p) || top_p == 0.0 {
                return Err(crate::TiramiError::InvalidRequest(
                    "top_p must be finite and within (0.0, 1.0]".to_string(),
                ));
            }
        }

        Ok(())
    }
}

impl Default for Config {
    fn default() -> Self {
        Self {
            model_path: None,
            ledger_path: None,
            bank_state_path: None,
            marketplace_state_path: None,
            mind_state_path: None,
            share_compute: false,
            max_memory_gb: 4.0,
            api_port: 3000,
            api_bind_addr: "127.0.0.1".to_string(),
            api_bearer_token: None,
            api_max_request_body_bytes: 64 * 1024,
            bootstrap_relays: vec![],
            region: "unknown".to_string(),
            max_prompt_chars: 8_192,
            max_generate_tokens: 1_024,
            max_concurrent_remote_inference_requests: 4,
            settlement_window_hours: 24,
        }
    }
}