lm_studio_api/chat/
request.rs1use crate::prelude::*;
2use super::{ Model, Message, format::* };
3
4#[derive(Debug, Clone, Serialize, Deserialize, From)]
6#[serde(untagged)]
7pub enum Request {
8 #[from] Messages(Messages),
9 #[from] Prompt(Prompt),
10}
11
12
13#[derive(Debug, Clone, Serialize, Deserialize)]
15pub struct Messages {
16 pub model: Model,
17 pub messages: Vec<Message>,
18 #[serde(skip)]
19 pub context: bool,
20 pub temperature: f32,
21 pub max_tokens: i32,
22 pub stream: bool,
23 #[serde(skip_serializing_if = "Option::is_none")]
24 #[serde(rename = "response_format")]
25 pub format: Option<Format>,
26 #[serde(skip)]
27 pub skip_think: bool,
28}
29
30impl ::std::default::Default for Messages {
31 fn default() -> Self {
32 Self {
33 model: Model::Other(str!()),
34 messages: vec![],
35 context: true,
36 temperature: 0.7,
37 max_tokens: -1,
38 stream: false,
39 format: None,
40 skip_think: true,
41 }
42 }
43}
44
45
46#[derive(Debug, Clone, Serialize, Deserialize)]
48pub struct Prompt {
49 pub model: Model,
50 pub prompt: String,
51 #[serde(skip)]
52 pub context: bool,
53 pub temperature: f32,
54 pub max_tokens: i32,
55 pub stream: bool,
56 pub stop: String,
57 #[serde(skip)]
58 pub skip_think: bool,
59}
60
61impl ::std::default::Default for Prompt {
62 fn default() -> Self {
63 Self {
64 model: Model::Other(str!()),
65 prompt: str!(),
66 context: true,
67 temperature: 0.7,
68 max_tokens: -1,
69 stream: false,
70 stop: str!("\n"),
71 skip_think: true,
72 }
73 }
74}