ChatSession

Struct ChatSession 

Source
pub struct ChatSession {
    pub messages: Vec<ChatMessage>,
    pub system_prompt: Option<String>,
}

Fields§

§messages: Vec<ChatMessage>§system_prompt: Option<String>

Implementations§

Source§

impl ChatSession

Source

pub fn new() -> Self

Examples found in repository?
examples/direct_llm_usage.rs (line 100)
87async fn conversation_with_context() -> helios_engine::Result<()> {
88    let llm_config = LLMConfig {
89        model_name: "gpt-3.5-turbo".to_string(),
90        base_url: "https://api.openai.com/v1".to_string(),
91        api_key: std::env::var("OPENAI_API_KEY")
92            .unwrap_or_else(|_| "your-api-key-here".to_string()),
93        temperature: 0.7,
94        max_tokens: 2048,
95    };
96
97    let client = LLMClient::new(llm_config);
98
99    // Use ChatSession to manage conversation
100    let mut session = ChatSession::new()
101        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103    // First turn
104    println!("Turn 1:");
105    session.add_user_message("What is 15 * 23?");
106    print!("  User: What is 15 * 23?\n  ");
107    
108    match client.chat(session.get_messages(), None).await {
109        Ok(response) => {
110            session.add_assistant_message(&response.content);
111            println!("Assistant: {}", response.content);
112        }
113        Err(e) => {
114            println!("Error: {}", e);
115            return Ok(());
116        }
117    }
118
119    // Second turn (with context from first turn)
120    println!("\nTurn 2:");
121    session.add_user_message("Now divide that by 5.");
122    print!("  User: Now divide that by 5.\n  ");
123    
124    match client.chat(session.get_messages(), None).await {
125        Ok(response) => {
126            session.add_assistant_message(&response.content);
127            println!("Assistant: {}", response.content);
128        }
129        Err(e) => {
130            println!("Error: {}", e);
131        }
132    }
133
134    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
135
136    Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141    println!("You can use Helios with various LLM providers:\n");
142
143    println!("🔵 OpenAI:");
144    println!("   LLMConfig {{");
145    println!("       model_name: \"gpt-4\".to_string(),");
146    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
147    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148    println!("       temperature: 0.7,");
149    println!("       max_tokens: 2048,");
150    println!("   }}\n");
151
152    println!("🟢 Local LM Studio:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"local-model\".to_string(),");
155    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
156    println!("       api_key: \"not-needed\".to_string(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🦙 Ollama:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"llama2\".to_string(),");
164    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🔷 Azure OpenAI:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"gpt-35-turbo\".to_string(),");
173    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182    let llm_config = LLMConfig {
183        model_name: "gpt-3.5-turbo".to_string(),
184        base_url: "https://api.openai.com/v1".to_string(),
185        api_key: std::env::var("OPENAI_API_KEY")
186            .unwrap_or_else(|_| "your-api-key-here".to_string()),
187        temperature: 0.7,
188        max_tokens: 2048,
189    };
190
191    let client = LLMClient::new(llm_config);
192    let mut session = ChatSession::new()
193        .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197    loop {
198        print!("You: ");
199        io::stdout().flush()?;
200
201        let mut input = String::new();
202        io::stdin().read_line(&mut input)?;
203        let input = input.trim();
204
205        if input.is_empty() {
206            continue;
207        }
208
209        if input == "exit" || input == "quit" {
210            println!("\n👋 Goodbye!");
211            break;
212        }
213
214        // Special commands
215        if input == "clear" {
216            session.clear();
217            println!("🧹 Conversation cleared!\n");
218            continue;
219        }
220
221        if input == "history" {
222            println!("\n📜 Conversation history:");
223            for (i, msg) in session.messages.iter().enumerate() {
224                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
225            }
226            println!();
227            continue;
228        }
229
230        session.add_user_message(input);
231
232        print!("Assistant: ");
233        io::stdout().flush()?;
234
235        match client.chat(session.get_messages(), None).await {
236            Ok(response) => {
237                session.add_assistant_message(&response.content);
238                println!("{}\n", response.content);
239            }
240            Err(e) => {
241                println!("\n❌ Error: {}", e);
242                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
243                // Remove the last user message since it failed
244                session.messages.pop();
245            }
246        }
247    }
248
249    Ok(())
250}
Source

pub fn with_system_prompt(self, prompt: impl Into<String>) -> Self

Examples found in repository?
examples/direct_llm_usage.rs (line 101)
87async fn conversation_with_context() -> helios_engine::Result<()> {
88    let llm_config = LLMConfig {
89        model_name: "gpt-3.5-turbo".to_string(),
90        base_url: "https://api.openai.com/v1".to_string(),
91        api_key: std::env::var("OPENAI_API_KEY")
92            .unwrap_or_else(|_| "your-api-key-here".to_string()),
93        temperature: 0.7,
94        max_tokens: 2048,
95    };
96
97    let client = LLMClient::new(llm_config);
98
99    // Use ChatSession to manage conversation
100    let mut session = ChatSession::new()
101        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103    // First turn
104    println!("Turn 1:");
105    session.add_user_message("What is 15 * 23?");
106    print!("  User: What is 15 * 23?\n  ");
107    
108    match client.chat(session.get_messages(), None).await {
109        Ok(response) => {
110            session.add_assistant_message(&response.content);
111            println!("Assistant: {}", response.content);
112        }
113        Err(e) => {
114            println!("Error: {}", e);
115            return Ok(());
116        }
117    }
118
119    // Second turn (with context from first turn)
120    println!("\nTurn 2:");
121    session.add_user_message("Now divide that by 5.");
122    print!("  User: Now divide that by 5.\n  ");
123    
124    match client.chat(session.get_messages(), None).await {
125        Ok(response) => {
126            session.add_assistant_message(&response.content);
127            println!("Assistant: {}", response.content);
128        }
129        Err(e) => {
130            println!("Error: {}", e);
131        }
132    }
133
134    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
135
136    Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141    println!("You can use Helios with various LLM providers:\n");
142
143    println!("🔵 OpenAI:");
144    println!("   LLMConfig {{");
145    println!("       model_name: \"gpt-4\".to_string(),");
146    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
147    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148    println!("       temperature: 0.7,");
149    println!("       max_tokens: 2048,");
150    println!("   }}\n");
151
152    println!("🟢 Local LM Studio:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"local-model\".to_string(),");
155    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
156    println!("       api_key: \"not-needed\".to_string(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🦙 Ollama:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"llama2\".to_string(),");
164    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🔷 Azure OpenAI:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"gpt-35-turbo\".to_string(),");
173    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182    let llm_config = LLMConfig {
183        model_name: "gpt-3.5-turbo".to_string(),
184        base_url: "https://api.openai.com/v1".to_string(),
185        api_key: std::env::var("OPENAI_API_KEY")
186            .unwrap_or_else(|_| "your-api-key-here".to_string()),
187        temperature: 0.7,
188        max_tokens: 2048,
189    };
190
191    let client = LLMClient::new(llm_config);
192    let mut session = ChatSession::new()
193        .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197    loop {
198        print!("You: ");
199        io::stdout().flush()?;
200
201        let mut input = String::new();
202        io::stdin().read_line(&mut input)?;
203        let input = input.trim();
204
205        if input.is_empty() {
206            continue;
207        }
208
209        if input == "exit" || input == "quit" {
210            println!("\n👋 Goodbye!");
211            break;
212        }
213
214        // Special commands
215        if input == "clear" {
216            session.clear();
217            println!("🧹 Conversation cleared!\n");
218            continue;
219        }
220
221        if input == "history" {
222            println!("\n📜 Conversation history:");
223            for (i, msg) in session.messages.iter().enumerate() {
224                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
225            }
226            println!();
227            continue;
228        }
229
230        session.add_user_message(input);
231
232        print!("Assistant: ");
233        io::stdout().flush()?;
234
235        match client.chat(session.get_messages(), None).await {
236            Ok(response) => {
237                session.add_assistant_message(&response.content);
238                println!("{}\n", response.content);
239            }
240            Err(e) => {
241                println!("\n❌ Error: {}", e);
242                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
243                // Remove the last user message since it failed
244                session.messages.pop();
245            }
246        }
247    }
248
249    Ok(())
250}
Source

pub fn add_message(&mut self, message: ChatMessage)

Source

pub fn add_user_message(&mut self, content: impl Into<String>)

Examples found in repository?
examples/direct_llm_usage.rs (line 105)
87async fn conversation_with_context() -> helios_engine::Result<()> {
88    let llm_config = LLMConfig {
89        model_name: "gpt-3.5-turbo".to_string(),
90        base_url: "https://api.openai.com/v1".to_string(),
91        api_key: std::env::var("OPENAI_API_KEY")
92            .unwrap_or_else(|_| "your-api-key-here".to_string()),
93        temperature: 0.7,
94        max_tokens: 2048,
95    };
96
97    let client = LLMClient::new(llm_config);
98
99    // Use ChatSession to manage conversation
100    let mut session = ChatSession::new()
101        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103    // First turn
104    println!("Turn 1:");
105    session.add_user_message("What is 15 * 23?");
106    print!("  User: What is 15 * 23?\n  ");
107    
108    match client.chat(session.get_messages(), None).await {
109        Ok(response) => {
110            session.add_assistant_message(&response.content);
111            println!("Assistant: {}", response.content);
112        }
113        Err(e) => {
114            println!("Error: {}", e);
115            return Ok(());
116        }
117    }
118
119    // Second turn (with context from first turn)
120    println!("\nTurn 2:");
121    session.add_user_message("Now divide that by 5.");
122    print!("  User: Now divide that by 5.\n  ");
123    
124    match client.chat(session.get_messages(), None).await {
125        Ok(response) => {
126            session.add_assistant_message(&response.content);
127            println!("Assistant: {}", response.content);
128        }
129        Err(e) => {
130            println!("Error: {}", e);
131        }
132    }
133
134    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
135
136    Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141    println!("You can use Helios with various LLM providers:\n");
142
143    println!("🔵 OpenAI:");
144    println!("   LLMConfig {{");
145    println!("       model_name: \"gpt-4\".to_string(),");
146    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
147    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148    println!("       temperature: 0.7,");
149    println!("       max_tokens: 2048,");
150    println!("   }}\n");
151
152    println!("🟢 Local LM Studio:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"local-model\".to_string(),");
155    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
156    println!("       api_key: \"not-needed\".to_string(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🦙 Ollama:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"llama2\".to_string(),");
164    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🔷 Azure OpenAI:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"gpt-35-turbo\".to_string(),");
173    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182    let llm_config = LLMConfig {
183        model_name: "gpt-3.5-turbo".to_string(),
184        base_url: "https://api.openai.com/v1".to_string(),
185        api_key: std::env::var("OPENAI_API_KEY")
186            .unwrap_or_else(|_| "your-api-key-here".to_string()),
187        temperature: 0.7,
188        max_tokens: 2048,
189    };
190
191    let client = LLMClient::new(llm_config);
192    let mut session = ChatSession::new()
193        .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197    loop {
198        print!("You: ");
199        io::stdout().flush()?;
200
201        let mut input = String::new();
202        io::stdin().read_line(&mut input)?;
203        let input = input.trim();
204
205        if input.is_empty() {
206            continue;
207        }
208
209        if input == "exit" || input == "quit" {
210            println!("\n👋 Goodbye!");
211            break;
212        }
213
214        // Special commands
215        if input == "clear" {
216            session.clear();
217            println!("🧹 Conversation cleared!\n");
218            continue;
219        }
220
221        if input == "history" {
222            println!("\n📜 Conversation history:");
223            for (i, msg) in session.messages.iter().enumerate() {
224                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
225            }
226            println!();
227            continue;
228        }
229
230        session.add_user_message(input);
231
232        print!("Assistant: ");
233        io::stdout().flush()?;
234
235        match client.chat(session.get_messages(), None).await {
236            Ok(response) => {
237                session.add_assistant_message(&response.content);
238                println!("{}\n", response.content);
239            }
240            Err(e) => {
241                println!("\n❌ Error: {}", e);
242                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
243                // Remove the last user message since it failed
244                session.messages.pop();
245            }
246        }
247    }
248
249    Ok(())
250}
Source

pub fn add_assistant_message(&mut self, content: impl Into<String>)

Examples found in repository?
examples/direct_llm_usage.rs (line 110)
87async fn conversation_with_context() -> helios_engine::Result<()> {
88    let llm_config = LLMConfig {
89        model_name: "gpt-3.5-turbo".to_string(),
90        base_url: "https://api.openai.com/v1".to_string(),
91        api_key: std::env::var("OPENAI_API_KEY")
92            .unwrap_or_else(|_| "your-api-key-here".to_string()),
93        temperature: 0.7,
94        max_tokens: 2048,
95    };
96
97    let client = LLMClient::new(llm_config);
98
99    // Use ChatSession to manage conversation
100    let mut session = ChatSession::new()
101        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103    // First turn
104    println!("Turn 1:");
105    session.add_user_message("What is 15 * 23?");
106    print!("  User: What is 15 * 23?\n  ");
107    
108    match client.chat(session.get_messages(), None).await {
109        Ok(response) => {
110            session.add_assistant_message(&response.content);
111            println!("Assistant: {}", response.content);
112        }
113        Err(e) => {
114            println!("Error: {}", e);
115            return Ok(());
116        }
117    }
118
119    // Second turn (with context from first turn)
120    println!("\nTurn 2:");
121    session.add_user_message("Now divide that by 5.");
122    print!("  User: Now divide that by 5.\n  ");
123    
124    match client.chat(session.get_messages(), None).await {
125        Ok(response) => {
126            session.add_assistant_message(&response.content);
127            println!("Assistant: {}", response.content);
128        }
129        Err(e) => {
130            println!("Error: {}", e);
131        }
132    }
133
134    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
135
136    Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141    println!("You can use Helios with various LLM providers:\n");
142
143    println!("🔵 OpenAI:");
144    println!("   LLMConfig {{");
145    println!("       model_name: \"gpt-4\".to_string(),");
146    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
147    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148    println!("       temperature: 0.7,");
149    println!("       max_tokens: 2048,");
150    println!("   }}\n");
151
152    println!("🟢 Local LM Studio:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"local-model\".to_string(),");
155    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
156    println!("       api_key: \"not-needed\".to_string(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🦙 Ollama:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"llama2\".to_string(),");
164    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🔷 Azure OpenAI:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"gpt-35-turbo\".to_string(),");
173    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182    let llm_config = LLMConfig {
183        model_name: "gpt-3.5-turbo".to_string(),
184        base_url: "https://api.openai.com/v1".to_string(),
185        api_key: std::env::var("OPENAI_API_KEY")
186            .unwrap_or_else(|_| "your-api-key-here".to_string()),
187        temperature: 0.7,
188        max_tokens: 2048,
189    };
190
191    let client = LLMClient::new(llm_config);
192    let mut session = ChatSession::new()
193        .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197    loop {
198        print!("You: ");
199        io::stdout().flush()?;
200
201        let mut input = String::new();
202        io::stdin().read_line(&mut input)?;
203        let input = input.trim();
204
205        if input.is_empty() {
206            continue;
207        }
208
209        if input == "exit" || input == "quit" {
210            println!("\n👋 Goodbye!");
211            break;
212        }
213
214        // Special commands
215        if input == "clear" {
216            session.clear();
217            println!("🧹 Conversation cleared!\n");
218            continue;
219        }
220
221        if input == "history" {
222            println!("\n📜 Conversation history:");
223            for (i, msg) in session.messages.iter().enumerate() {
224                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
225            }
226            println!();
227            continue;
228        }
229
230        session.add_user_message(input);
231
232        print!("Assistant: ");
233        io::stdout().flush()?;
234
235        match client.chat(session.get_messages(), None).await {
236            Ok(response) => {
237                session.add_assistant_message(&response.content);
238                println!("{}\n", response.content);
239            }
240            Err(e) => {
241                println!("\n❌ Error: {}", e);
242                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
243                // Remove the last user message since it failed
244                session.messages.pop();
245            }
246        }
247    }
248
249    Ok(())
250}
Source

pub fn get_messages(&self) -> Vec<ChatMessage>

Examples found in repository?
examples/direct_llm_usage.rs (line 108)
87async fn conversation_with_context() -> helios_engine::Result<()> {
88    let llm_config = LLMConfig {
89        model_name: "gpt-3.5-turbo".to_string(),
90        base_url: "https://api.openai.com/v1".to_string(),
91        api_key: std::env::var("OPENAI_API_KEY")
92            .unwrap_or_else(|_| "your-api-key-here".to_string()),
93        temperature: 0.7,
94        max_tokens: 2048,
95    };
96
97    let client = LLMClient::new(llm_config);
98
99    // Use ChatSession to manage conversation
100    let mut session = ChatSession::new()
101        .with_system_prompt("You are a helpful math tutor. Give brief, clear explanations.");
102
103    // First turn
104    println!("Turn 1:");
105    session.add_user_message("What is 15 * 23?");
106    print!("  User: What is 15 * 23?\n  ");
107    
108    match client.chat(session.get_messages(), None).await {
109        Ok(response) => {
110            session.add_assistant_message(&response.content);
111            println!("Assistant: {}", response.content);
112        }
113        Err(e) => {
114            println!("Error: {}", e);
115            return Ok(());
116        }
117    }
118
119    // Second turn (with context from first turn)
120    println!("\nTurn 2:");
121    session.add_user_message("Now divide that by 5.");
122    print!("  User: Now divide that by 5.\n  ");
123    
124    match client.chat(session.get_messages(), None).await {
125        Ok(response) => {
126            session.add_assistant_message(&response.content);
127            println!("Assistant: {}", response.content);
128        }
129        Err(e) => {
130            println!("Error: {}", e);
131        }
132    }
133
134    println!("\n💡 Notice how the assistant remembered the result from the first calculation!");
135
136    Ok(())
137}
138
139/// Example 3: Information about using different providers
140fn different_providers_info() {
141    println!("You can use Helios with various LLM providers:\n");
142
143    println!("🔵 OpenAI:");
144    println!("   LLMConfig {{");
145    println!("       model_name: \"gpt-4\".to_string(),");
146    println!("       base_url: \"https://api.openai.com/v1\".to_string(),");
147    println!("       api_key: env::var(\"OPENAI_API_KEY\").unwrap(),");
148    println!("       temperature: 0.7,");
149    println!("       max_tokens: 2048,");
150    println!("   }}\n");
151
152    println!("🟢 Local LM Studio:");
153    println!("   LLMConfig {{");
154    println!("       model_name: \"local-model\".to_string(),");
155    println!("       base_url: \"http://localhost:1234/v1\".to_string(),");
156    println!("       api_key: \"not-needed\".to_string(),");
157    println!("       temperature: 0.7,");
158    println!("       max_tokens: 2048,");
159    println!("   }}\n");
160
161    println!("🦙 Ollama:");
162    println!("   LLMConfig {{");
163    println!("       model_name: \"llama2\".to_string(),");
164    println!("       base_url: \"http://localhost:11434/v1\".to_string(),");
165    println!("       api_key: \"not-needed\".to_string(),");
166    println!("       temperature: 0.7,");
167    println!("       max_tokens: 2048,");
168    println!("   }}\n");
169
170    println!("🔷 Azure OpenAI:");
171    println!("   LLMConfig {{");
172    println!("       model_name: \"gpt-35-turbo\".to_string(),");
173    println!("       base_url: \"https://your-resource.openai.azure.com/...\".to_string(),");
174    println!("       api_key: env::var(\"AZURE_OPENAI_KEY\").unwrap(),");
175    println!("       temperature: 0.7,");
176    println!("       max_tokens: 2048,");
177    println!("   }}\n");
178}
179
180/// Example 4: Interactive chat session
181async fn interactive_chat() -> helios_engine::Result<()> {
182    let llm_config = LLMConfig {
183        model_name: "gpt-3.5-turbo".to_string(),
184        base_url: "https://api.openai.com/v1".to_string(),
185        api_key: std::env::var("OPENAI_API_KEY")
186            .unwrap_or_else(|_| "your-api-key-here".to_string()),
187        temperature: 0.7,
188        max_tokens: 2048,
189    };
190
191    let client = LLMClient::new(llm_config);
192    let mut session = ChatSession::new()
193        .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197    loop {
198        print!("You: ");
199        io::stdout().flush()?;
200
201        let mut input = String::new();
202        io::stdin().read_line(&mut input)?;
203        let input = input.trim();
204
205        if input.is_empty() {
206            continue;
207        }
208
209        if input == "exit" || input == "quit" {
210            println!("\n👋 Goodbye!");
211            break;
212        }
213
214        // Special commands
215        if input == "clear" {
216            session.clear();
217            println!("🧹 Conversation cleared!\n");
218            continue;
219        }
220
221        if input == "history" {
222            println!("\n📜 Conversation history:");
223            for (i, msg) in session.messages.iter().enumerate() {
224                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
225            }
226            println!();
227            continue;
228        }
229
230        session.add_user_message(input);
231
232        print!("Assistant: ");
233        io::stdout().flush()?;
234
235        match client.chat(session.get_messages(), None).await {
236            Ok(response) => {
237                session.add_assistant_message(&response.content);
238                println!("{}\n", response.content);
239            }
240            Err(e) => {
241                println!("\n❌ Error: {}", e);
242                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
243                // Remove the last user message since it failed
244                session.messages.pop();
245            }
246        }
247    }
248
249    Ok(())
250}
Source

pub fn clear(&mut self)

Examples found in repository?
examples/direct_llm_usage.rs (line 216)
181async fn interactive_chat() -> helios_engine::Result<()> {
182    let llm_config = LLMConfig {
183        model_name: "gpt-3.5-turbo".to_string(),
184        base_url: "https://api.openai.com/v1".to_string(),
185        api_key: std::env::var("OPENAI_API_KEY")
186            .unwrap_or_else(|_| "your-api-key-here".to_string()),
187        temperature: 0.7,
188        max_tokens: 2048,
189    };
190
191    let client = LLMClient::new(llm_config);
192    let mut session = ChatSession::new()
193        .with_system_prompt("You are a friendly and helpful AI assistant.");
194
195    println!("Chat started! Type 'exit' or 'quit' to end the conversation.\n");
196
197    loop {
198        print!("You: ");
199        io::stdout().flush()?;
200
201        let mut input = String::new();
202        io::stdin().read_line(&mut input)?;
203        let input = input.trim();
204
205        if input.is_empty() {
206            continue;
207        }
208
209        if input == "exit" || input == "quit" {
210            println!("\n👋 Goodbye!");
211            break;
212        }
213
214        // Special commands
215        if input == "clear" {
216            session.clear();
217            println!("🧹 Conversation cleared!\n");
218            continue;
219        }
220
221        if input == "history" {
222            println!("\n📜 Conversation history:");
223            for (i, msg) in session.messages.iter().enumerate() {
224                println!("  {}. {:?}: {}", i + 1, msg.role, msg.content);
225            }
226            println!();
227            continue;
228        }
229
230        session.add_user_message(input);
231
232        print!("Assistant: ");
233        io::stdout().flush()?;
234
235        match client.chat(session.get_messages(), None).await {
236            Ok(response) => {
237                session.add_assistant_message(&response.content);
238                println!("{}\n", response.content);
239            }
240            Err(e) => {
241                println!("\n❌ Error: {}", e);
242                println!("   (Make sure OPENAI_API_KEY is set correctly)\n");
243                // Remove the last user message since it failed
244                session.messages.pop();
245            }
246        }
247    }
248
249    Ok(())
250}

Trait Implementations§

Source§

impl Clone for ChatSession

Source§

fn clone(&self) -> ChatSession

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for ChatSession

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Default for ChatSession

Source§

fn default() -> Self

Returns the “default value” for a type. Read more

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more