1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#[allow(dead_code)]
pub enum Models {
TextDavinci003,
CodeDavinci002,
CodeCushman001,
Gpt35Turbo,
}
pub fn get_model(name: &str) -> Models {
match name {
"text-davinci-003" => Models::TextDavinci003,
"code-davinci-002" => Models::CodeDavinci002,
"code-cushman-001" => Models::CodeCushman001,
_ => Models::Gpt35Turbo,
}
}
impl Models {
pub fn all() -> Vec<Models> {
vec![
Models::Gpt35Turbo,
Models::TextDavinci003,
Models::CodeDavinci002,
Models::CodeCushman001,
]
}
pub fn name(&self) -> &str {
match *self {
Models::Gpt35Turbo => "gpt-3.5-turbo",
Models::TextDavinci003 => "text-davinci-003",
Models::CodeDavinci002 => "code-davinci-002",
Models::CodeCushman001 => "code-cushman-001",
}
}
#[allow(dead_code)]
pub fn description(&self) -> &str {
match *self {
Models::Gpt35Turbo => "Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with our latest model iteration.",
Models::TextDavinci003 => "Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports inserting completions within text.",
Models::CodeDavinci002 => "DaVinci code generation model, version 002",
Models::CodeCushman001 => "Cushman code generation model, version 001",
}
}
#[allow(dead_code)]
pub fn max_tokens(&self) -> i32 {
match *self {
Models::Gpt35Turbo => 4096,
Models::TextDavinci003 => 4093,
Models::CodeDavinci002 => 4093,
Models::CodeCushman001 => 4093,
}
}
#[allow(dead_code)]
pub fn training_data(&self) -> &str {
match *self {
Models::Gpt35Turbo => "Up to Sep 2021",
Models::TextDavinci003 => "Up to Sep 2021",
Models::CodeDavinci002 => "Up to Sep 2021",
Models::CodeCushman001 => "Up to Sep 2021",
}
}
}