car-inference 0.13.0

Local model inference for CAR — Candle backend with Qwen3 models
Documentation
use serde::{Deserialize, Serialize};

fn default_image_format() -> String {
    "png".to_string()
}

/// Request to generate an image from text.
///
/// `Default` is implemented manually (not derived) because `format`
/// has a sensible non-empty default (`"png"`) that
/// `String::default()` would clobber to `""`. Callers can
/// `GenerateImageRequest { prompt: "...".into(), ..Default::default() }`
/// and get a usable shape. Mirrors the `GenerateRequest` ergonomic
/// fix from #109.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GenerateImageRequest {
    pub prompt: String,
    #[serde(default)]
    pub model: Option<String>,
    #[serde(default)]
    pub negative_prompt: Option<String>,
    #[serde(default)]
    pub width: Option<u32>,
    #[serde(default)]
    pub height: Option<u32>,
    #[serde(default)]
    pub steps: Option<u32>,
    #[serde(default)]
    pub guidance: Option<f32>,
    #[serde(default)]
    pub seed: Option<u64>,
    #[serde(default)]
    pub output_path: Option<String>,
    #[serde(default = "default_image_format")]
    pub format: String,

    /// Input image to transform (img2img). Backends that don't
    /// support image conditioning return an
    /// `InferenceError::UnsupportedMode` if this is set.
    #[serde(default)]
    pub input_image_path: Option<String>,

    /// Number of variants to generate in one call. `None` (or 1)
    /// produces a single image — matches the original
    /// `generate_image` contract. Backends that don't support
    /// batching either loop internally or error if `> 1`. Honoured
    /// by [`crate::InferenceEngine::generate_image_batch`]; the
    /// scalar [`crate::InferenceEngine::generate_image`] forces it
    /// to 1.
    #[serde(default)]
    pub variant_count: Option<u32>,

    /// Anchor for cross-call continuity — instructs the model to
    /// generate variants in the visual neighbourhood of this image.
    /// Currently meaningful only on backends that natively support
    /// the concept (e.g. gpt-image-2). Other backends ignore the
    /// hint or return `UnsupportedMode`.
    #[serde(default)]
    pub parent_image_path: Option<String>,
}

impl Default for GenerateImageRequest {
    fn default() -> Self {
        Self {
            prompt: String::new(),
            model: None,
            negative_prompt: None,
            width: None,
            height: None,
            steps: None,
            guidance: None,
            seed: None,
            output_path: None,
            format: default_image_format(),
            input_image_path: None,
            variant_count: None,
            parent_image_path: None,
        }
    }
}

/// Image generation result.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct GenerateImageResult {
    pub image_path: String,
    pub media_type: String,
    #[serde(default, skip_serializing_if = "Option::is_none")]
    pub model_used: Option<String>,
}