makepad_studio/ai_chat/
open_ai_data.rs

1use crate::makepad_micro_serde::*;
2
3#[derive(Debug, SerJson, DeJson)]
4pub struct OpenAiChatPrompt {
5    pub messages: Vec<OpenAiChatMessage>,
6    pub model: String,
7    pub reasoning_effort: Option<String>,
8    pub max_tokens: i32,
9    pub stream: bool
10}
11
12#[derive(Debug, SerJson, DeJson)]
13pub struct OpenAiChatMessage {
14    pub content: Option<String>,
15    pub role: Option<String>,
16    pub refusal: Option<JsonValue>
17} 
18
19#[allow(unused)]
20#[derive(Debug, DeJson)]
21pub struct OpenAiChatResponse {
22    pub id: String,
23    pub object: String,
24    pub created: i32,
25    pub model: String,
26    pub service_tier: Option<String>,
27    pub system_fingerprint: Option<JsonValue>,
28    pub usage: Option<OpenAiChatUsage>,
29    pub choices: Vec<OpenAiChatChoice>,
30}
31
32#[allow(unused)]
33#[derive(Debug, DeJson)]
34pub struct OpenAiCompletionDetails {
35    pub reasoning_tokens: i32,
36}
37
38#[allow(unused)]
39#[derive(Debug, DeJson)]
40pub struct OpenAiChatUsage {
41    pub prompt_tokens: i32,
42    pub completion_tokens: i32,
43    pub total_tokens: i32,
44    pub completion_tokens_details: OpenAiCompletionDetails
45}
46
47#[allow(unused)]
48#[derive(Debug, DeJson)]
49pub struct OpenAiChatChoice {
50    pub message: Option<OpenAiChatMessage>,
51    pub delta: Option<OpenAiChatMessage>,
52    pub finish_reason: Option<String>,
53    pub logprobs: Option<JsonValue>,
54    pub index: i32,
55}
56/*
57#[derive(SerJson, DeJson)]
58pub struct LLamaCppQuery {
59    pub stream: bool,
60    pub repeat_last_n: i32,
61    pub repeat_penalty:f32,
62    pub top_k:f32,
63    pub top_p:f32,
64    pub min_p:f32,
65    pub tfs_z:f32,
66    pub n_predict:i32,
67    pub temperature:f32,
68    pub stop: Vec<i32>,
69    pub typical_p: f32,
70    pub presence_penalty: f32,
71    pub frequency_penalty: f32,
72    pub mirostat:f32,
73    pub mirostat_tau:f32,
74    pub mirostat_eta:f32,
75    pub grammar:String,
76    pub n_probs:i32,
77    pub min_keep:f32,
78    pub image_data:Vec<i32>,
79    pub cache_prompt:bool,
80    pub api_key:String,
81    pub slot_id:i32,
82    pub prompt: String,
83}
84
85#[derive(SerJson, DeJson)]
86pub struct LLamaCppStream {
87    pub content: String,
88    pub stop: bool,
89    pub id_slot: u32,
90    pub multimodal: bool,
91    pub index: i32,
92}
93AiBackend::LlamaLocal=>{
94    for data in data.split("\n\n"){
95        if let Some(data) = data.strip_prefix("data: "){
96            if data != "[DONE]"{
97                match LLamaCppStream::deserialize_json(data){
98                    Ok(chat_response)=>{
99                        if let Some(AiChatMessage::Ai(s)) = self.chat.last_mut(){
100                            s.push_str(&chat_response.content);
101                        }
102                        else{
103                            self.chat.push(AiChatMessage::Ai(chat_response.content))
104                        }
105                        // triggre a save to disk as well
106                        changed = true;
107                    }
108                    Err(e)=>{
109                        println!("JSon parse error {:?} {}", e, data);
110                    }
111                }
112            }
113        }
114    }
115}
116AiBackend::LlamaLocal=>{
117    let url = format!("{}/completion", self.config.llama_url);
118    let mut request = HttpRequest::new(url, HttpMethod::POST);
119    request.set_is_streaming();
120    request.set_header("Content-Type".to_string(), "application/json".to_string());
121    request.set_metadata_id(chat_id); 
122    let mut prompt = String::new();
123    prompt.push_str("<|begin_of_text|>\n");
124    prompt.push_str("<|start_header_id|>system<|stop_header_id|>");
125    prompt.push_str("You are a helpful programming assitant<|eot_id|>");
126    for msg in &doc.chat{
127        match msg{
128            AiChatMessage::User(v)=>{
129                prompt.push_str("<|start_header_id|>user<|end_header_id|>");
130                prompt.push_str(v);
131                prompt.push_str("<|eot_id|>");
132            }
133            AiChatMessage::Ai(v)=>{
134                prompt.push_str("<|start_header_id|>assistant<|end_header_id|>");
135                prompt.push_str(v);
136                prompt.push_str("<|eot_id|>");
137            }
138        }
139    }
140    request.set_json_body(LLamaCppQuery{
141        stream: true,
142        n_predict:400,
143        temperature:0.7,
144        stop:vec![],
145        repeat_last_n:256,
146        repeat_penalty:1.18,
147        top_k:40.0,
148        top_p:0.95,
149        min_p:0.05,
150        tfs_z:1.0,
151        typical_p:1.0,
152        presence_penalty:0.0,
153        frequency_penalty:0.0,
154        mirostat:0.0,
155        mirostat_tau:5.0,
156        mirostat_eta:0.1,
157        grammar:"".to_string(),
158        n_probs:0,
159        min_keep:0.0,
160        image_data:vec![],
161        cache_prompt:true,
162        api_key:"".to_string(),
163        slot_id:-1,
164        prompt: message.to_string()
165    })
166    cx.http_request(request_id, request);
167}*/