Skip to main content

dioxus_ai/
types.rs

1//! Types for dioxus-ai
2
3use serde::{Deserialize, Serialize};
4
5/// A message in the chat conversation
6#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
7pub struct ChatMessage {
8    /// Unique identifier for the message
9    pub id: String,
10    /// Role: "user", "assistant", or "system"
11    pub role: String,
12    /// Message content
13    pub content: String,
14    /// Timestamp when created
15    pub created_at: f64,
16}
17
18impl ChatMessage {
19    /// Create a new user message
20    pub fn user(content: impl Into<String>) -> Self {
21        Self {
22            id: generate_id(),
23            role: "user".to_string(),
24            content: content.into(),
25            created_at: now(),
26        }
27    }
28
29    /// Create a new assistant message
30    pub fn assistant(content: impl Into<String>) -> Self {
31        Self {
32            id: generate_id(),
33            role: "assistant".to_string(),
34            content: content.into(),
35            created_at: now(),
36        }
37    }
38
39    /// Create a new system message
40    pub fn system(content: impl Into<String>) -> Self {
41        Self {
42            id: generate_id(),
43            role: "system".to_string(),
44            content: content.into(),
45            created_at: now(),
46        }
47    }
48}
49
50/// Options for the chat hook
51#[derive(Debug, Clone)]
52pub struct ChatOptions {
53    /// LLM provider: "openai", "anthropic", "openrouter"
54    pub provider: String,
55    /// API key
56    pub api_key: String,
57    /// Model identifier (e.g., "gpt-4o-mini", "claude-3-sonnet")
58    pub model: String,
59    /// System prompt
60    pub system_prompt: Option<String>,
61    /// Temperature (0.0 - 2.0)
62    pub temperature: f32,
63    /// Maximum tokens to generate
64    pub max_tokens: u32,
65    /// Enable streaming
66    pub stream: bool,
67    /// Initial messages
68    pub initial_messages: Vec<ChatMessage>,
69}
70
71impl Default for ChatOptions {
72    fn default() -> Self {
73        Self {
74            provider: "openai".to_string(),
75            api_key: String::new(),
76            model: "gpt-4o-mini".to_string(),
77            system_prompt: None,
78            temperature: 0.7,
79            max_tokens: 4096,
80            stream: true,
81            initial_messages: Vec::new(),
82        }
83    }
84}
85
86/// Options for the completion hook
87#[derive(Debug, Clone)]
88pub struct CompletionOptions {
89    /// LLM provider: "openai", "anthropic", "openrouter"
90    pub provider: String,
91    /// API key
92    pub api_key: String,
93    /// Model identifier
94    pub model: String,
95    /// System prompt
96    pub system_prompt: Option<String>,
97    /// Temperature (0.0 - 2.0)
98    pub temperature: f32,
99    /// Maximum tokens to generate
100    pub max_tokens: u32,
101}
102
103impl Default for CompletionOptions {
104    fn default() -> Self {
105        Self {
106            provider: "openai".to_string(),
107            api_key: String::new(),
108            model: "gpt-4o-mini".to_string(),
109            system_prompt: None,
110            temperature: 0.7,
111            max_tokens: 4096,
112        }
113    }
114}
115
116/// Generate a unique ID
117fn generate_id() -> String {
118    use std::time::{SystemTime, UNIX_EPOCH};
119    let timestamp = SystemTime::now()
120        .duration_since(UNIX_EPOCH)
121        .unwrap_or_default()
122        .as_nanos();
123    format!("msg_{}", timestamp)
124}
125
126/// Get current timestamp
127fn now() -> f64 {
128    use std::time::{SystemTime, UNIX_EPOCH};
129    SystemTime::now()
130        .duration_since(UNIX_EPOCH)
131        .unwrap_or_default()
132        .as_secs_f64()
133}