aptu_core/ai/models.rs
1// SPDX-License-Identifier: Apache-2.0
2
3//! AI model and provider types.
4//!
5//! This module provides core types for AI model and provider representation.
6//! Runtime model validation is handled by the `ModelRegistry` trait in the registry module.
7
8use serde::{Deserialize, Serialize};
9
10/// AI provider identifier.
11///
12/// Represents different AI service providers that Aptu can integrate with.
13/// Each provider has different capabilities, pricing, and deployment models.
14#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
15pub enum ModelProvider {
16 /// `OpenRouter` - Unified API for multiple AI providers
17 /// Supports free and paid models from Mistral, Anthropic, xAI, and others.
18 OpenRouter,
19
20 /// `Ollama` - Local AI model runner
21 /// Runs models locally without API calls or costs.
22 Ollama,
23
24 /// `MLX` - Apple Silicon optimized models (future iOS support)
25 /// Runs models natively on iOS devices.
26 Mlx,
27}
28
29impl std::fmt::Display for ModelProvider {
30 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
31 match self {
32 ModelProvider::OpenRouter => write!(f, "OpenRouter"),
33 ModelProvider::Ollama => write!(f, "Ollama"),
34 ModelProvider::Mlx => write!(f, "MLX"),
35 }
36 }
37}
38
39/// AI model metadata and configuration.
40///
41/// Represents a single AI model with its capabilities, pricing, and provider information.
42/// Used for model selection, validation, and UI display.
43#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
44pub struct AiModel {
45 /// Human-readable model name for UI display
46 /// Example: "Devstral 2", "Claude Sonnet 4.5"
47 pub display_name: String,
48
49 /// Provider-specific model identifier
50 /// Used in API requests to specify which model to use.
51 /// Examples:
52 /// - `OpenRouter`: "mistralai/devstral-2512:free"
53 /// - `Ollama`: "mistral:7b"
54 pub identifier: String,
55
56 /// AI service provider
57 pub provider: ModelProvider,
58
59 /// Whether this model is free to use
60 /// Free models have no API cost (either free tier or local execution).
61 pub is_free: bool,
62
63 /// Maximum context window size in tokens
64 /// Determines how much input text the model can process.
65 pub context_window: u32,
66}