1mod backends;
2mod error;
3pub mod message;
4mod traits;
5
6use backends::create_llm_model;
7pub use error::Error;
8use message::PromptMessageBuilder;
9use traits::{LLMBackend, MessageBuilder};
10
11pub enum Backend {
15 ChatGPT { api_key: String, model: String },
16 }
19
20impl Backend {}
21
22#[derive(Debug)]
24pub struct Model {
25 backend: Box<dyn LLMBackend>,
26 temperature: f64,
27}
28
29impl Model {
30 pub fn new(config: Backend) -> Result<Model, Error> {
31 let backend = create_llm_model(config)?;
32 Ok(Self {
33 backend,
34 temperature: 0.9,
35 })
36 }
37
38 pub async fn generate_response<T>(&self, context_message_group: T) -> Result<String, Error>
39 where
40 T: IntoIterator + Send,
41 T::Item: MessageBuilder + Send,
42 {
43 self.backend
44 .generate_response(
45 self.temperature,
46 PromptMessageBuilder::new(context_message_group)
47 .build()
48 .as_str(),
49 )
50 .await
51 }
52
53 pub fn set_temperature(&mut self, temperature: f64) {
54 self.temperature = temperature;
55 }
56
57 pub fn temperature(&self) -> f64 {
58 self.temperature
59 }
60}
61
62#[cfg(test)]
63mod tests {
64 use super::*;
65 use std::env;
66
67 #[test]
68 fn test_request() {
69 dotenv::dotenv().ok();
70 env_logger::init();
71
72 assert!(Model::new(Backend::ChatGPT {
73 api_key: env::var("OPEN_API_KEY").unwrap(),
74 model: "gpt-3.5-turbo".into(),
75 })
76 .is_ok());
77 }
78}