1use crate::config::{AIConfig, ModelType};
4use crate::error::{BevyAIError, Result};
5use reqwest::Client;
6use serde::{Deserialize, Serialize};
7use std::time::Duration;
8use tracing::{debug, info};
9
10pub struct BevyAIAgent {
12 client: Client,
13 config: AIConfig,
14}
15
16pub struct AIRequest {
18 agent: BevyAIAgent,
19 prompt: String,
20 model: Option<ModelType>,
21 temperature: Option<f32>,
22 max_tokens: Option<u32>,
23 context: Vec<String>,
24 system_prompt: Option<String>,
25}
26
27#[derive(Debug, Clone)]
29pub struct AIResponse {
30 pub content: String,
32 pub model: ModelType,
34 pub tokens_used: Option<u32>,
36 pub finish_reason: Option<String>,
38 pub conversation_id: uuid::Uuid,
40}
41
42#[derive(Serialize, Deserialize)]
44struct OpenAIRequest {
45 model: String,
46 messages: Vec<OpenAIMessage>,
47 max_tokens: Option<u32>,
48 temperature: Option<f32>,
49 stream: bool,
50}
51
52#[derive(Serialize, Deserialize)]
53struct OpenAIMessage {
54 role: String,
55 content: String,
56}
57
58#[derive(Serialize, Deserialize)]
59struct OpenAIResponse {
60 choices: Vec<OpenAIChoice>,
61 usage: Option<OpenAIUsage>,
62}
63
64#[derive(Serialize, Deserialize)]
65struct OpenAIChoice {
66 message: OpenAIMessage,
67 finish_reason: Option<String>,
68}
69
70#[derive(Serialize, Deserialize)]
71struct OpenAIUsage {
72 total_tokens: u32,
73}
74
75#[derive(Serialize, Deserialize)]
77struct AnthropicRequest {
78 model: String,
79 messages: Vec<AnthropicMessage>,
80 max_tokens: u32,
81 temperature: Option<f32>,
82}
83
84#[derive(Serialize, Deserialize)]
85struct AnthropicMessage {
86 role: String,
87 content: String,
88}
89
90#[derive(Serialize, Deserialize)]
91struct AnthropicResponse {
92 content: Vec<AnthropicContent>,
93 usage: Option<AnthropicUsage>,
94 stop_reason: Option<String>,
95}
96
97#[derive(Serialize, Deserialize)]
98struct AnthropicContent {
99 text: String,
100}
101
102#[derive(Serialize, Deserialize)]
103struct AnthropicUsage {
104 input_tokens: u32,
105 output_tokens: u32,
106}
107
108#[derive(Serialize, Deserialize)]
110struct GoogleRequest {
111 contents: Vec<GoogleContent>,
112 generation_config: GoogleGenerationConfig,
113}
114
115#[derive(Serialize, Deserialize)]
116struct GoogleContent {
117 parts: Vec<GooglePart>,
118}
119
120#[derive(Serialize, Deserialize)]
121struct GooglePart {
122 text: String,
123}
124
125#[derive(Serialize, Deserialize)]
126struct GoogleGenerationConfig {
127 temperature: Option<f32>,
128 max_output_tokens: Option<u32>,
129}
130
131#[derive(Serialize, Deserialize)]
132struct GoogleResponse {
133 candidates: Vec<GoogleCandidate>,
134 usage_metadata: Option<GoogleUsage>,
135}
136
137#[derive(Serialize, Deserialize)]
138struct GoogleCandidate {
139 content: GoogleContent,
140 finish_reason: Option<String>,
141}
142
143#[derive(Serialize, Deserialize)]
144struct GoogleUsage {
145 total_token_count: u32,
146}
147
148impl BevyAIAgent {
149 pub async fn new(config: AIConfig) -> Result<Self> {
151 let client = Client::builder()
152 .timeout(Duration::from_secs(120))
153 .user_agent(crate::USER_AGENT)
154 .build()?;
155
156 Ok(Self { client, config })
157 }
158
159 pub fn request<S: Into<String>>(&self, prompt: S) -> AIRequest {
161 AIRequest {
162 agent: self.clone(),
163 prompt: prompt.into(),
164 model: None,
165 temperature: None,
166 max_tokens: None,
167 context: Vec::new(),
168 system_prompt: None,
169 }
170 }
171
172 pub fn generate_game<S: Into<String>>(&self, description: S) -> AIRequest {
174 let system_prompt = crate::ai::prompts::GAME_GENERATION_PROMPT;
175
176 self.request(description)
177 .with_system_prompt(system_prompt)
178 .with_model(self.config.default_model.clone())
179 }
180
181 pub fn add_feature<S: Into<String>>(&self, feature_description: S, existing_code: S) -> AIRequest {
183 let system_prompt = crate::ai::prompts::FEATURE_ADDITION_PROMPT;
184 let prompt = format!(
185 "Add this feature: {}\n\nExisting code:\n```rust\n{}\n```",
186 feature_description.into(),
187 existing_code.into()
188 );
189
190 self.request(prompt)
191 .with_system_prompt(system_prompt)
192 .with_model(self.config.default_model.clone())
193 }
194
195 pub fn improve_code<S: Into<String>>(&self, aspect: S, code: S) -> AIRequest {
197 let system_prompt = crate::ai::prompts::CODE_IMPROVEMENT_PROMPT;
198 let prompt = format!(
199 "Improve the {} of this code:\n\n```rust\n{}\n```",
200 aspect.into(),
201 code.into()
202 );
203
204 self.request(prompt)
205 .with_system_prompt(system_prompt)
206 .with_model(self.config.default_model.clone())
207 }
208
209 pub fn explain_code<S: Into<String>>(&self, code: S) -> AIRequest {
211 let system_prompt = crate::ai::prompts::CODE_EXPLANATION_PROMPT;
212 let prompt = format!("Explain this Bevy code:\n\n```rust\n{}\n```", code.into());
213
214 self.request(prompt)
215 .with_system_prompt(system_prompt)
216 .with_model(self.config.default_model.clone())
217 }
218
219 pub fn debug_code<S: Into<String>>(&self, code: S, error_message: S) -> AIRequest {
221 let system_prompt = crate::ai::prompts::CODE_DEBUGGING_PROMPT;
222 let prompt = format!(
223 "Debug this Bevy code:\n\n```rust\n{}\n```\n\nError: {}",
224 code.into(),
225 error_message.into()
226 );
227
228 self.request(prompt)
229 .with_system_prompt(system_prompt)
230 .with_model(self.config.default_model.clone())
231 }
232
233 pub fn optimize_performance<S: Into<String>>(&self, code: S) -> AIRequest {
235 let system_prompt = crate::ai::prompts::PERFORMANCE_OPTIMIZATION_PROMPT;
236
237 self.request(code)
238 .with_system_prompt(system_prompt)
239 .with_model(self.config.default_model.clone())
240 }
241
242 pub fn generate_tests<S: Into<String>>(&self, code: S) -> AIRequest {
244 let system_prompt = crate::ai::prompts::TEST_GENERATION_PROMPT;
245
246 self.request(code)
247 .with_system_prompt(system_prompt)
248 .with_model(self.config.default_model.clone())
249 }
250
251 pub fn extract_code(&self, response: &str) -> String {
253 if let Some(start) = response.find("```rust") {
255 let code_start = start + 7;
256 if let Some(end) = response[code_start..].find("```") {
257 return response[code_start..code_start + end].trim().to_string();
258 }
259 }
260
261 if let Some(start) = response.find("```") {
263 let code_start = start + 3;
264 if let Some(newline) = response[code_start..].find('\n') {
265 let actual_start = code_start + newline + 1;
266 if let Some(end) = response[actual_start..].find("```") {
267 return response[actual_start..actual_start + end].trim().to_string();
268 }
269 }
270 }
271
272 response.trim().to_string()
274 }
275
276 async fn call_openai(&self, request: &OpenAIRequest, model: &ModelType) -> Result<AIResponse> {
278 let api_key = self.config.get_api_key(model)?;
279
280 let base_url = self.config.openai.as_ref()
281 .and_then(|c| c.base_url.as_ref())
282 .map(|s| s.as_str())
283 .unwrap_or("https://api.openai.com");
284
285 let url = format!("{}/v1/chat/completions", base_url);
286
287 debug!("Making OpenAI API call to: {}", url);
288
289 let response = self.client
290 .post(&url)
291 .header("Authorization", format!("Bearer {}", api_key))
292 .header("Content-Type", "application/json")
293 .json(request)
294 .send()
295 .await?;
296
297 if !response.status().is_success() {
298 let status = response.status();
299 let error_text = response.text().await?;
300 return Err(BevyAIError::ai_api(format!(
301 "OpenAI API error ({}): {}",
302 status,
303 error_text
304 )));
305 }
306
307 let openai_response: OpenAIResponse = response.json().await?;
308
309 let choice = openai_response.choices
310 .into_iter()
311 .next()
312 .ok_or_else(|| BevyAIError::ai_api("No response from OpenAI"))?;
313
314 Ok(AIResponse {
315 content: choice.message.content,
316 model: model.clone(),
317 tokens_used: openai_response.usage.map(|u| u.total_tokens),
318 finish_reason: choice.finish_reason,
319 conversation_id: uuid::Uuid::new_v4(),
320 })
321 }
322
323 async fn call_anthropic(&self, request: &AnthropicRequest, model: &ModelType) -> Result<AIResponse> {
325 let api_key = self.config.get_api_key(model)?;
326
327 let base_url = self.config.anthropic.as_ref()
328 .and_then(|c| c.base_url.as_ref())
329 .map(|s| s.as_str())
330 .unwrap_or("https://api.anthropic.com");
331
332 let url = format!("{}/v1/messages", base_url);
333
334 debug!("Making Anthropic API call to: {}", url);
335
336 let response = self.client
337 .post(&url)
338 .header("x-api-key", api_key)
339 .header("Content-Type", "application/json")
340 .header("anthropic-version", "2023-06-01")
341 .json(request)
342 .send()
343 .await?;
344
345 if !response.status().is_success() {
346 let status = response.status();
347 let error_text = response.text().await?;
348 return Err(BevyAIError::ai_api(format!(
349 "Anthropic API error ({}): {}",
350 status,
351 error_text
352 )));
353 }
354
355 let anthropic_response: AnthropicResponse = response.json().await?;
356
357 let content = anthropic_response.content
358 .into_iter()
359 .next()
360 .ok_or_else(|| BevyAIError::ai_api("No response from Anthropic"))?;
361
362 let tokens_used = anthropic_response.usage
363 .map(|u| u.input_tokens + u.output_tokens);
364
365 Ok(AIResponse {
366 content: content.text,
367 model: model.clone(),
368 tokens_used,
369 finish_reason: anthropic_response.stop_reason,
370 conversation_id: uuid::Uuid::new_v4(),
371 })
372 }
373
374 async fn call_google(&self, request: &GoogleRequest, model: &ModelType) -> Result<AIResponse> {
376 let api_key = self.config.get_api_key(model)?;
377
378 let base_url = self.config.google.as_ref()
379 .and_then(|c| c.base_url.as_ref())
380 .map(|s| s.as_str())
381 .unwrap_or("https://generativelanguage.googleapis.com");
382
383 let url = format!("{}/v1beta/models/{}:generateContent?key={}",
384 base_url, model.as_str(), api_key);
385
386 debug!("Making Google API call to: {}", url);
387
388 let response = self.client
389 .post(&url)
390 .header("Content-Type", "application/json")
391 .json(request)
392 .send()
393 .await?;
394
395 if !response.status().is_success() {
396 let status = response.status();
397 let error_text = response.text().await?;
398 return Err(BevyAIError::ai_api(format!(
399 "Google API error ({}): {}",
400 status,
401 error_text
402 )));
403 }
404
405 let google_response: GoogleResponse = response.json().await?;
406
407 let candidate = google_response.candidates
408 .into_iter()
409 .next()
410 .ok_or_else(|| BevyAIError::ai_api("No response from Google"))?;
411
412 let content = candidate.content.parts
413 .into_iter()
414 .next()
415 .ok_or_else(|| BevyAIError::ai_api("No content in Google response"))?;
416
417 Ok(AIResponse {
418 content: content.text,
419 model: model.clone(),
420 tokens_used: google_response.usage_metadata.map(|u| u.total_token_count),
421 finish_reason: candidate.finish_reason,
422 conversation_id: uuid::Uuid::new_v4(),
423 })
424 }
425}
426
427impl Clone for BevyAIAgent {
428 fn clone(&self) -> Self {
429 Self {
430 client: self.client.clone(),
431 config: self.config.clone(),
432 }
433 }
434}
435
436impl AIRequest {
437 pub fn with_model(mut self, model: ModelType) -> Self {
439 self.model = Some(model);
440 self
441 }
442
443 pub fn with_temperature(mut self, temperature: f32) -> Self {
445 self.temperature = Some(temperature);
446 self
447 }
448
449 pub fn with_max_tokens(mut self, max_tokens: u32) -> Self {
451 self.max_tokens = Some(max_tokens);
452 self
453 }
454
455 pub fn with_context<S: Into<String>>(mut self, context: S) -> Self {
457 self.context.push(context.into());
458 self
459 }
460
461 pub fn with_system_prompt<S: Into<String>>(mut self, system_prompt: S) -> Self {
463 self.system_prompt = Some(system_prompt.into());
464 self
465 }
466
467 pub async fn execute(self) -> Result<AIResponse> {
469 let model = self.model.unwrap_or_else(|| self.agent.config.default_model.clone());
470 let temperature = self.temperature.unwrap_or(self.agent.config.generation.temperature);
471 let max_tokens = self.max_tokens.unwrap_or(self.agent.config.generation.max_tokens);
472
473 info!("Executing AI request with model: {}", model);
474
475 match model.provider() {
476 "openai" => {
477 let mut messages = Vec::new();
478
479 if let Some(system_prompt) = &self.system_prompt {
480 messages.push(OpenAIMessage {
481 role: "system".to_string(),
482 content: system_prompt.clone(),
483 });
484 }
485
486 for context in &self.context {
487 messages.push(OpenAIMessage {
488 role: "assistant".to_string(),
489 content: context.clone(),
490 });
491 }
492
493 messages.push(OpenAIMessage {
494 role: "user".to_string(),
495 content: self.prompt,
496 });
497
498 let request = OpenAIRequest {
499 model: model.as_str().to_string(),
500 messages,
501 max_tokens: Some(max_tokens),
502 temperature: Some(temperature),
503 stream: false,
504 };
505
506 self.agent.call_openai(&request, &model).await
507 }
508 "anthropic" => {
509 let mut messages = Vec::new();
510
511 if let Some(system_prompt) = &self.system_prompt {
512 let combined_prompt = format!("{}\n\nHuman: {}\n\nAssistant:",
513 system_prompt, self.prompt);
514 messages.push(AnthropicMessage {
515 role: "user".to_string(),
516 content: combined_prompt,
517 });
518 } else {
519 messages.push(AnthropicMessage {
520 role: "user".to_string(),
521 content: self.prompt,
522 });
523 }
524
525 let request = AnthropicRequest {
526 model: model.as_str().to_string(),
527 messages,
528 max_tokens,
529 temperature: Some(temperature),
530 };
531
532 self.agent.call_anthropic(&request, &model).await
533 }
534 "google" => {
535 let mut prompt = self.prompt;
536 if let Some(system_prompt) = &self.system_prompt {
537 prompt = format!("{}\n\n{}", system_prompt, prompt);
538 }
539
540 let request = GoogleRequest {
541 contents: vec![GoogleContent {
542 parts: vec![GooglePart { text: prompt }],
543 }],
544 generation_config: GoogleGenerationConfig {
545 temperature: Some(temperature),
546 max_output_tokens: Some(max_tokens),
547 },
548 };
549
550 self.agent.call_google(&request, &model).await
551 }
552 provider => Err(BevyAIError::unsupported_model(provider)),
553 }
554 }
555}