praxis_graph/types/
config.rs

1use serde::{Deserialize, Serialize};
2use std::time::Duration;
3
4#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
5#[serde(rename_all = "lowercase")]
6pub enum Provider {
7    OpenAI,
8    Azure,
9    Anthropic,
10}
11
12impl Default for Provider {
13    fn default() -> Self {
14        Provider::OpenAI
15    }
16}
17
18#[derive(Debug, Clone, Serialize, Deserialize)]
19pub struct GraphConfig {
20    pub max_iterations: usize,
21    pub execution_timeout: Duration,
22    pub enable_cancellation: bool,
23}
24
25impl Default for GraphConfig {
26    fn default() -> Self {
27        Self {
28            max_iterations: 50,
29            execution_timeout: Duration::from_secs(300),
30            enable_cancellation: true,
31        }
32    }
33}
34
35impl GraphConfig {
36    pub fn new() -> Self {
37        Self::default()
38    }
39
40    pub fn with_max_iterations(mut self, max: usize) -> Self {
41        self.max_iterations = max;
42        self
43    }
44
45    pub fn with_timeout(mut self, timeout: Duration) -> Self {
46        self.execution_timeout = timeout;
47        self
48    }
49
50    pub fn with_cancellation(mut self, enabled: bool) -> Self {
51        self.enable_cancellation = enabled;
52        self
53    }
54}
55
56#[derive(Debug, Clone, Serialize, Deserialize)]
57pub struct LLMConfig {
58    pub model: String,
59    #[serde(default)]
60    pub provider: Provider,
61    pub temperature: Option<f32>,
62    pub max_tokens: Option<u32>,
63    #[serde(skip_serializing_if = "Option::is_none")]
64    pub reasoning_effort: Option<String>,
65}
66
67impl LLMConfig {
68    pub fn new(model: impl Into<String>) -> Self {
69        Self {
70            model: model.into(),
71            provider: Provider::default(),
72            temperature: None,
73            max_tokens: None,
74            reasoning_effort: None,
75        }
76    }
77
78    pub fn with_provider(mut self, provider: Provider) -> Self {
79        self.provider = provider;
80        self
81    }
82
83    pub fn with_temperature(mut self, temp: f32) -> Self {
84        self.temperature = Some(temp);
85        self
86    }
87
88    pub fn with_max_tokens(mut self, tokens: u32) -> Self {
89        self.max_tokens = Some(tokens);
90        self
91    }
92
93    pub fn with_reasoning_effort(mut self, effort: impl Into<String>) -> Self {
94        self.reasoning_effort = Some(effort.into());
95        self
96    }
97}
98
99impl Default for LLMConfig {
100    fn default() -> Self {
101        Self {
102            model: "gpt-4o".to_string(),
103            provider: Provider::default(),
104            temperature: Some(1.0),
105            max_tokens: Some(4096),
106            reasoning_effort: None,
107        }
108    }
109}
110
111#[derive(Debug, Clone, Serialize, Deserialize)]
112#[serde(tag = "type", rename_all = "snake_case")]
113pub enum ContextPolicy {
114    LastK { k: usize },
115    AllMessages,
116}
117
118impl Default for ContextPolicy {
119    fn default() -> Self {
120        Self::LastK { k: 10 }
121    }
122}
123