lm_studio_api/chat/
model.rs

1use crate::prelude::*;
2
3/// The AI models
4#[allow(non_camel_case_types)]
5#[derive(Debug, Display, Clone, From, Eq, PartialEq, Serialize, Deserialize)]
6pub enum Model {
7    // Gemma:
8    #[serde(rename = "google/gemma-2-2b-it")]
9    #[display = "google/gemma-2-2b-it"]
10    Gemma2_2b,
11
12    #[serde(rename = "google/gemma-2-9b-it")]
13    #[display = "google/gemma-2-9b-it"]
14    Gemma2_9b,
15
16    #[serde(rename = "google/gemma-2-27b-it")]
17    #[display = "google/gemma-2-27b-it"]
18    Gemma2_27b,
19
20    #[serde(rename = "google/gemma-3-1b-it-qat")]
21    #[display = "google/gemma-3-1b-it-qat"]
22    Gemma3_1b,
23
24    #[serde(rename = "google/gemma-3-4b-it-qat")]
25    #[display = "google/gemma-3-4b-it-qat"]
26    Gemma3_4b,
27
28    #[serde(rename = "google/gemma-3-12b-it-qat")]
29    #[display = "google/gemma-3-12b-it-qat"]
30    Gemma3_12b,
31
32    #[serde(rename = "google/gemma-3-27b-it-qat")]
33    #[display = "google/gemma-3-27b-it-qat"]
34    Gemma3_27b,
35
36    // Qwen:
37    #[serde(rename = "qwen/qwen2.5-vl-7b")]
38    #[display = "qwen/qwen2.5-vl-7b"]
39    Qwen2_5_Vl_7b,
40    
41    #[serde(rename = "qwen/qwen3-1.7b")]
42    #[display = "qwen/qwen3-1.7b"]
43    Qwen3_1_7b,
44
45    #[serde(rename = "qwen/qwen3-4b")]
46    #[display = "qwen/qwen3-4b"]
47    Qwen3_4b,
48
49    // Code Gemma:
50    #[serde(rename = "google/codegemma-2b-GGUF")]
51    #[display = "google/codegemma-2b-GGUF"]
52    CodeGemma_2b,
53
54    #[serde(rename = "google/codegemma-7b-GGUF")]
55    #[display = "google/codegemma-7b-GGUF"]
56    CodeGemma_7b,
57
58    // Stable Code:
59    #[serde(rename = "stabilityai/stable-code-3b")]
60    #[display = "stabilityai/stable-code-3b"]
61    StableCode_3b,
62
63    #[serde(rename = "stabilityai/stable-code-instruct-3b")]
64    #[display = "stabilityai/stable-code-instruct-3b"]
65    StableCodeInstruct_3b,
66
67    // Kimiko:
68    #[serde(rename = "mythomax-l2-kimiko-v2-13b")]
69    #[display = "mythomax-l2-kimiko-v2-13b"]
70    Kimiko_13b,
71
72    // Llama:
73    #[serde(rename = "llama-3.1-Nemotron-Nano-4B-v1.1-GGUF")]
74    #[display = "llama-3.1-Nemotron-Nano-4B-v1.1-GGUF"]
75    Llama3_1_4b,
76    
77    #[serde(rename = "meta-llama-3.1-8b-instruct")]
78    #[display = "meta-llama-3.1-8b-instruct"]
79    Llama3_1_8b,
80
81    // Embedding:
82    #[serde(rename = "nomic-ai/nomic-embed-text-v1.5")]
83    #[display = "nomic-ai/nomic-embed-text-v1.5"]
84    NomicEmbedText,
85
86    #[serde(rename = "text-embedding-all-minilm-l6-v2-embedding")]
87    #[display = "text-embedding-all-minilm-l6-v2-embedding"]
88    AllMiniLmL6,
89    
90    // Other models:
91    #[from]
92    #[serde(untagged)]
93    #[display = "{0}"]
94    Other(String)
95}