artificial_core/model.rs
1//! Model identifiers used throughout the **artificial** workspace.
2//!
3//! The enum hierarchy keeps the *public* API blissfully simple while allowing
4//! each provider crate to map the variants onto its own naming scheme. As a
5//! consequence you never have to type literal strings such as `"gpt-4o-mini"`
6//! in your application code—pick an enum variant instead and let the adapter
7//! translate it.
8//!
9//! # Adding more models
10//!
11//! 1. **Provider–specific enum**
12//! Add the variant to the sub-enum (`OpenAiModel`, `AnthropicModel`, …).
13//! 2. **Mapping layer**
14//! Update the mapping function in the provider crate
15//! (`artificial-openai::model_map::map_model`, etc.).
16//! 3. **Compile-time safety**
17//! The compiler will tell you if you forgot to handle the new variant in
18//! `From<T> for Model` or in provider match statements.
19//!
20//! # Example
21//!
22//! ```rust
23//! use artificial_core::model::{Model, OpenAiModel};
24//! assert_eq!(Model::from(OpenAiModel::Gpt4oMini),
25//! Model::OpenAi(OpenAiModel::Gpt4oMini));
26//! ```
27use std::borrow::Cow;
28
29/// Universal identifier for an LLM model.
30///
31/// * `OpenAi` – Enumerated list of officially supported OpenAI models.
32/// * `Custom` – Any provider / model name not yet covered by a dedicated enum. Use this if you run a self-hosted or beta model.
33#[derive(Debug, Clone, PartialEq, Eq, Hash)]
34pub enum Model {
35 /// Built-in OpenAI models (chat completion API).
36 OpenAi(OpenAiModel),
37 /// Fully qualified provider/model ID (`"provider:model-name"` or similar).
38 Custom(Cow<'static, str>),
39}
40
41/// Exhaustive list of models **officially** supported by the OpenAI back-end.
42///
43/// Keeping the list small avoids accidental typos while still allowing
44/// arbitrary model names through [`Model::Custom`].
45#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
46pub enum OpenAiModel {
47 /// GPT-4o flagship (all-round best quality, vision support).
48 Gpt4o,
49 /// GPT-4o mini (cheaper, currently in phased rollout).
50 Gpt4oMini,
51}
52
53impl From<OpenAiModel> for Model {
54 fn from(val: OpenAiModel) -> Self {
55 Model::OpenAi(val)
56 }
57}