pub struct LLMConfig {
pub model: String,
pub temperature: Option<f32>,
pub max_tokens: Option<u32>,
}Fields§
§model: String§temperature: Option<f32>§max_tokens: Option<u32>Implementations§
Source§impl LLMConfig
impl LLMConfig
Sourcepub fn new(model: impl Into<String>) -> LLMConfig
pub fn new(model: impl Into<String>) -> LLMConfig
Examples found in repository?
examples/react_loop.rs (line 102)
10async fn main() -> Result<()> {
11 println!("╔════════════════════════════════════════════════════════════╗");
12 println!("║ Praxis React Agent - Interactive Demo ║");
13 println!("╚════════════════════════════════════════════════════════════╝");
14 println!();
15 println!("This demo shows a React agent that can:");
16 println!(" • Think through problems (reasoning)");
17 println!(" • Use tools from MCP servers");
18 println!(" • Respond to your questions");
19 println!();
20 println!("Prerequisites:");
21 println!(" 1. Set OPENAI_API_KEY: export OPENAI_API_KEY=your_key");
22 println!(" 2. Set MCP_SERVERS: export MCP_SERVERS=\"http://localhost:8000/mcp,http://localhost:8001/mcp\"");
23 println!(" 3. Start MCP servers: cd mcp_servers/weather && uv run python weather.py");
24 println!();
25 println!("Type 'exit' to quit");
26 println!();
27
28 // Get API key from environment
29 let api_key = std::env::var("OPENAI_API_KEY").expect(
30 "OPENAI_API_KEY must be set in environment. Run: export OPENAI_API_KEY=your_key_here"
31 );
32
33 // Parse MCP servers from environment
34 let mcp_servers = std::env::var("MCP_SERVERS")
35 .unwrap_or_else(|_| "http://localhost:8000/mcp".to_string());
36
37 // Create MCP tool executor (aggregates multiple servers)
38 let mcp_executor = Arc::new(MCPToolExecutor::new());
39
40 // Connect to each MCP server
41 println!("Connecting to MCP servers...");
42 for url in mcp_servers.split(',') {
43 let url = url.trim();
44 if !url.is_empty() {
45 print!(" Connecting to {}... ", url);
46 io::stdout().flush()?;
47 match MCPClient::new_http(
48 &format!("mcp-{}", uuid::Uuid::new_v4()),
49 url
50 ).await {
51 Ok(client) => {
52 mcp_executor.add_server(client).await?;
53 println!("✓");
54 }
55 Err(e) => {
56 println!("✗ Failed: {}", e);
57 println!("Make sure the MCP server is running at {}", url);
58 return Err(e);
59 }
60 }
61 }
62 }
63 println!();
64
65 // Create LLM client
66 let llm_client = Arc::new(OpenAIClient::new(api_key)?);
67
68 // Create graph config
69 let config = GraphConfig::default();
70
71 // Create graph
72 let graph = Graph::new(llm_client, mcp_executor, config);
73
74 // Conversation loop
75 let conversation_id = uuid::Uuid::new_v4().to_string();
76
77 loop {
78 // Get user input
79 print!("\n\x1b[1;36m You: \x1b[0m");
80 io::stdout().flush()?;
81
82 let mut input = String::new();
83 io::stdin().read_line(&mut input)?;
84 let input = input.trim();
85
86 if input.is_empty() {
87 continue;
88 }
89
90 if input.eq_ignore_ascii_case("exit") {
91 println!("\nGoodbye!");
92 break;
93 }
94
95 // Create message
96 let user_message = Message::Human {
97 content: Content::text(input),
98 name: None,
99 };
100
101 // Create graph input
102 let llm_config = LLMConfig::new("gpt-4")
103 .with_temperature(0.7)
104 .with_max_tokens(4096);
105
106 let graph_input = GraphInput::new(conversation_id.clone(), user_message, llm_config);
107
108 // Spawn execution
109 let mut event_rx = graph.spawn_run(graph_input);
110
111 // Print assistant label
112 print!("\n\x1b[1;32mAssistant:\x1b[0m ");
113 io::stdout().flush()?;
114
115 let mut in_reasoning = false;
116 let mut in_message = false;
117
118 // Process events
119 while let Some(event) = event_rx.recv().await {
120 match event {
121 StreamEvent::InitStream { .. } => {
122 // Silent - just track
123 }
124
125 StreamEvent::Reasoning { content } => {
126 if !in_reasoning {
127 print!("\n\x1b[2;3mReasoning: ");
128 in_reasoning = true;
129 in_message = false;
130 }
131 print!("{}", content);
132 io::stdout().flush()?;
133 }
134
135 StreamEvent::Message { content } => {
136 if !in_message {
137 if in_reasoning {
138 print!("\x1b[0m\n\n");
139 }
140 print!("\x1b[0m");
141 in_message = true;
142 in_reasoning = false;
143 }
144 print!("{}", content);
145 io::stdout().flush()?;
146 }
147
148 StreamEvent::ToolCall {
149 index: _,
150 id: _,
151 name,
152 arguments,
153 } => {
154 if in_reasoning {
155 print!("\x1b[0m\n");
156 }
157 if let Some(name) = name {
158 if let Some(args) = arguments {
159 print!("\n\x1b[1;33mCalling tool: {} ({})\x1b[0m", name, args);
160 } else {
161 print!("\n\x1b[1;33mCalling tool: {}\x1b[0m", name);
162 }
163 io::stdout().flush()?;
164 }
165 in_reasoning = false;
166 in_message = false;
167 }
168
169 StreamEvent::ToolResult {
170 tool_call_id: _,
171 result,
172 is_error,
173 duration_ms,
174 } => {
175 if is_error {
176 print!(
177 "\n\x1b[1;31mTool error ({}ms): {}\x1b[0m",
178 duration_ms, result
179 );
180 } else {
181 // Truncate long results
182 let display_result = if result.len() > 100 {
183 format!("{}...", &result[..100])
184 } else {
185 result
186 };
187 print!(
188 "\n\x1b[1;32mTool result ({}ms): {}\x1b[0m",
189 duration_ms, display_result
190 );
191 }
192 io::stdout().flush()?;
193 in_reasoning = false;
194 in_message = false;
195 }
196
197 StreamEvent::Done { finish_reason: _ } => {
198 // LLM stream done, continue to next node
199 }
200
201 StreamEvent::Error { message, .. } => {
202 print!("\n\n\x1b[1;31mError: {}\x1b[0m", message);
203 io::stdout().flush()?;
204 break;
205 }
206
207 StreamEvent::EndStream {
208 status: _,
209 total_duration_ms,
210 } => {
211 print!("\n\n\x1b[2m[Completed in {}ms]\x1b[0m", total_duration_ms);
212 io::stdout().flush()?;
213 break;
214 }
215 }
216 }
217
218 println!(); // Final newline
219 }
220
221 Ok(())
222}Sourcepub fn with_temperature(self, temp: f32) -> LLMConfig
pub fn with_temperature(self, temp: f32) -> LLMConfig
Examples found in repository?
examples/react_loop.rs (line 103)
10async fn main() -> Result<()> {
11 println!("╔════════════════════════════════════════════════════════════╗");
12 println!("║ Praxis React Agent - Interactive Demo ║");
13 println!("╚════════════════════════════════════════════════════════════╝");
14 println!();
15 println!("This demo shows a React agent that can:");
16 println!(" • Think through problems (reasoning)");
17 println!(" • Use tools from MCP servers");
18 println!(" • Respond to your questions");
19 println!();
20 println!("Prerequisites:");
21 println!(" 1. Set OPENAI_API_KEY: export OPENAI_API_KEY=your_key");
22 println!(" 2. Set MCP_SERVERS: export MCP_SERVERS=\"http://localhost:8000/mcp,http://localhost:8001/mcp\"");
23 println!(" 3. Start MCP servers: cd mcp_servers/weather && uv run python weather.py");
24 println!();
25 println!("Type 'exit' to quit");
26 println!();
27
28 // Get API key from environment
29 let api_key = std::env::var("OPENAI_API_KEY").expect(
30 "OPENAI_API_KEY must be set in environment. Run: export OPENAI_API_KEY=your_key_here"
31 );
32
33 // Parse MCP servers from environment
34 let mcp_servers = std::env::var("MCP_SERVERS")
35 .unwrap_or_else(|_| "http://localhost:8000/mcp".to_string());
36
37 // Create MCP tool executor (aggregates multiple servers)
38 let mcp_executor = Arc::new(MCPToolExecutor::new());
39
40 // Connect to each MCP server
41 println!("Connecting to MCP servers...");
42 for url in mcp_servers.split(',') {
43 let url = url.trim();
44 if !url.is_empty() {
45 print!(" Connecting to {}... ", url);
46 io::stdout().flush()?;
47 match MCPClient::new_http(
48 &format!("mcp-{}", uuid::Uuid::new_v4()),
49 url
50 ).await {
51 Ok(client) => {
52 mcp_executor.add_server(client).await?;
53 println!("✓");
54 }
55 Err(e) => {
56 println!("✗ Failed: {}", e);
57 println!("Make sure the MCP server is running at {}", url);
58 return Err(e);
59 }
60 }
61 }
62 }
63 println!();
64
65 // Create LLM client
66 let llm_client = Arc::new(OpenAIClient::new(api_key)?);
67
68 // Create graph config
69 let config = GraphConfig::default();
70
71 // Create graph
72 let graph = Graph::new(llm_client, mcp_executor, config);
73
74 // Conversation loop
75 let conversation_id = uuid::Uuid::new_v4().to_string();
76
77 loop {
78 // Get user input
79 print!("\n\x1b[1;36m You: \x1b[0m");
80 io::stdout().flush()?;
81
82 let mut input = String::new();
83 io::stdin().read_line(&mut input)?;
84 let input = input.trim();
85
86 if input.is_empty() {
87 continue;
88 }
89
90 if input.eq_ignore_ascii_case("exit") {
91 println!("\nGoodbye!");
92 break;
93 }
94
95 // Create message
96 let user_message = Message::Human {
97 content: Content::text(input),
98 name: None,
99 };
100
101 // Create graph input
102 let llm_config = LLMConfig::new("gpt-4")
103 .with_temperature(0.7)
104 .with_max_tokens(4096);
105
106 let graph_input = GraphInput::new(conversation_id.clone(), user_message, llm_config);
107
108 // Spawn execution
109 let mut event_rx = graph.spawn_run(graph_input);
110
111 // Print assistant label
112 print!("\n\x1b[1;32mAssistant:\x1b[0m ");
113 io::stdout().flush()?;
114
115 let mut in_reasoning = false;
116 let mut in_message = false;
117
118 // Process events
119 while let Some(event) = event_rx.recv().await {
120 match event {
121 StreamEvent::InitStream { .. } => {
122 // Silent - just track
123 }
124
125 StreamEvent::Reasoning { content } => {
126 if !in_reasoning {
127 print!("\n\x1b[2;3mReasoning: ");
128 in_reasoning = true;
129 in_message = false;
130 }
131 print!("{}", content);
132 io::stdout().flush()?;
133 }
134
135 StreamEvent::Message { content } => {
136 if !in_message {
137 if in_reasoning {
138 print!("\x1b[0m\n\n");
139 }
140 print!("\x1b[0m");
141 in_message = true;
142 in_reasoning = false;
143 }
144 print!("{}", content);
145 io::stdout().flush()?;
146 }
147
148 StreamEvent::ToolCall {
149 index: _,
150 id: _,
151 name,
152 arguments,
153 } => {
154 if in_reasoning {
155 print!("\x1b[0m\n");
156 }
157 if let Some(name) = name {
158 if let Some(args) = arguments {
159 print!("\n\x1b[1;33mCalling tool: {} ({})\x1b[0m", name, args);
160 } else {
161 print!("\n\x1b[1;33mCalling tool: {}\x1b[0m", name);
162 }
163 io::stdout().flush()?;
164 }
165 in_reasoning = false;
166 in_message = false;
167 }
168
169 StreamEvent::ToolResult {
170 tool_call_id: _,
171 result,
172 is_error,
173 duration_ms,
174 } => {
175 if is_error {
176 print!(
177 "\n\x1b[1;31mTool error ({}ms): {}\x1b[0m",
178 duration_ms, result
179 );
180 } else {
181 // Truncate long results
182 let display_result = if result.len() > 100 {
183 format!("{}...", &result[..100])
184 } else {
185 result
186 };
187 print!(
188 "\n\x1b[1;32mTool result ({}ms): {}\x1b[0m",
189 duration_ms, display_result
190 );
191 }
192 io::stdout().flush()?;
193 in_reasoning = false;
194 in_message = false;
195 }
196
197 StreamEvent::Done { finish_reason: _ } => {
198 // LLM stream done, continue to next node
199 }
200
201 StreamEvent::Error { message, .. } => {
202 print!("\n\n\x1b[1;31mError: {}\x1b[0m", message);
203 io::stdout().flush()?;
204 break;
205 }
206
207 StreamEvent::EndStream {
208 status: _,
209 total_duration_ms,
210 } => {
211 print!("\n\n\x1b[2m[Completed in {}ms]\x1b[0m", total_duration_ms);
212 io::stdout().flush()?;
213 break;
214 }
215 }
216 }
217
218 println!(); // Final newline
219 }
220
221 Ok(())
222}Sourcepub fn with_max_tokens(self, tokens: u32) -> LLMConfig
pub fn with_max_tokens(self, tokens: u32) -> LLMConfig
Examples found in repository?
examples/react_loop.rs (line 104)
10async fn main() -> Result<()> {
11 println!("╔════════════════════════════════════════════════════════════╗");
12 println!("║ Praxis React Agent - Interactive Demo ║");
13 println!("╚════════════════════════════════════════════════════════════╝");
14 println!();
15 println!("This demo shows a React agent that can:");
16 println!(" • Think through problems (reasoning)");
17 println!(" • Use tools from MCP servers");
18 println!(" • Respond to your questions");
19 println!();
20 println!("Prerequisites:");
21 println!(" 1. Set OPENAI_API_KEY: export OPENAI_API_KEY=your_key");
22 println!(" 2. Set MCP_SERVERS: export MCP_SERVERS=\"http://localhost:8000/mcp,http://localhost:8001/mcp\"");
23 println!(" 3. Start MCP servers: cd mcp_servers/weather && uv run python weather.py");
24 println!();
25 println!("Type 'exit' to quit");
26 println!();
27
28 // Get API key from environment
29 let api_key = std::env::var("OPENAI_API_KEY").expect(
30 "OPENAI_API_KEY must be set in environment. Run: export OPENAI_API_KEY=your_key_here"
31 );
32
33 // Parse MCP servers from environment
34 let mcp_servers = std::env::var("MCP_SERVERS")
35 .unwrap_or_else(|_| "http://localhost:8000/mcp".to_string());
36
37 // Create MCP tool executor (aggregates multiple servers)
38 let mcp_executor = Arc::new(MCPToolExecutor::new());
39
40 // Connect to each MCP server
41 println!("Connecting to MCP servers...");
42 for url in mcp_servers.split(',') {
43 let url = url.trim();
44 if !url.is_empty() {
45 print!(" Connecting to {}... ", url);
46 io::stdout().flush()?;
47 match MCPClient::new_http(
48 &format!("mcp-{}", uuid::Uuid::new_v4()),
49 url
50 ).await {
51 Ok(client) => {
52 mcp_executor.add_server(client).await?;
53 println!("✓");
54 }
55 Err(e) => {
56 println!("✗ Failed: {}", e);
57 println!("Make sure the MCP server is running at {}", url);
58 return Err(e);
59 }
60 }
61 }
62 }
63 println!();
64
65 // Create LLM client
66 let llm_client = Arc::new(OpenAIClient::new(api_key)?);
67
68 // Create graph config
69 let config = GraphConfig::default();
70
71 // Create graph
72 let graph = Graph::new(llm_client, mcp_executor, config);
73
74 // Conversation loop
75 let conversation_id = uuid::Uuid::new_v4().to_string();
76
77 loop {
78 // Get user input
79 print!("\n\x1b[1;36m You: \x1b[0m");
80 io::stdout().flush()?;
81
82 let mut input = String::new();
83 io::stdin().read_line(&mut input)?;
84 let input = input.trim();
85
86 if input.is_empty() {
87 continue;
88 }
89
90 if input.eq_ignore_ascii_case("exit") {
91 println!("\nGoodbye!");
92 break;
93 }
94
95 // Create message
96 let user_message = Message::Human {
97 content: Content::text(input),
98 name: None,
99 };
100
101 // Create graph input
102 let llm_config = LLMConfig::new("gpt-4")
103 .with_temperature(0.7)
104 .with_max_tokens(4096);
105
106 let graph_input = GraphInput::new(conversation_id.clone(), user_message, llm_config);
107
108 // Spawn execution
109 let mut event_rx = graph.spawn_run(graph_input);
110
111 // Print assistant label
112 print!("\n\x1b[1;32mAssistant:\x1b[0m ");
113 io::stdout().flush()?;
114
115 let mut in_reasoning = false;
116 let mut in_message = false;
117
118 // Process events
119 while let Some(event) = event_rx.recv().await {
120 match event {
121 StreamEvent::InitStream { .. } => {
122 // Silent - just track
123 }
124
125 StreamEvent::Reasoning { content } => {
126 if !in_reasoning {
127 print!("\n\x1b[2;3mReasoning: ");
128 in_reasoning = true;
129 in_message = false;
130 }
131 print!("{}", content);
132 io::stdout().flush()?;
133 }
134
135 StreamEvent::Message { content } => {
136 if !in_message {
137 if in_reasoning {
138 print!("\x1b[0m\n\n");
139 }
140 print!("\x1b[0m");
141 in_message = true;
142 in_reasoning = false;
143 }
144 print!("{}", content);
145 io::stdout().flush()?;
146 }
147
148 StreamEvent::ToolCall {
149 index: _,
150 id: _,
151 name,
152 arguments,
153 } => {
154 if in_reasoning {
155 print!("\x1b[0m\n");
156 }
157 if let Some(name) = name {
158 if let Some(args) = arguments {
159 print!("\n\x1b[1;33mCalling tool: {} ({})\x1b[0m", name, args);
160 } else {
161 print!("\n\x1b[1;33mCalling tool: {}\x1b[0m", name);
162 }
163 io::stdout().flush()?;
164 }
165 in_reasoning = false;
166 in_message = false;
167 }
168
169 StreamEvent::ToolResult {
170 tool_call_id: _,
171 result,
172 is_error,
173 duration_ms,
174 } => {
175 if is_error {
176 print!(
177 "\n\x1b[1;31mTool error ({}ms): {}\x1b[0m",
178 duration_ms, result
179 );
180 } else {
181 // Truncate long results
182 let display_result = if result.len() > 100 {
183 format!("{}...", &result[..100])
184 } else {
185 result
186 };
187 print!(
188 "\n\x1b[1;32mTool result ({}ms): {}\x1b[0m",
189 duration_ms, display_result
190 );
191 }
192 io::stdout().flush()?;
193 in_reasoning = false;
194 in_message = false;
195 }
196
197 StreamEvent::Done { finish_reason: _ } => {
198 // LLM stream done, continue to next node
199 }
200
201 StreamEvent::Error { message, .. } => {
202 print!("\n\n\x1b[1;31mError: {}\x1b[0m", message);
203 io::stdout().flush()?;
204 break;
205 }
206
207 StreamEvent::EndStream {
208 status: _,
209 total_duration_ms,
210 } => {
211 print!("\n\n\x1b[2m[Completed in {}ms]\x1b[0m", total_duration_ms);
212 io::stdout().flush()?;
213 break;
214 }
215 }
216 }
217
218 println!(); // Final newline
219 }
220
221 Ok(())
222}Trait Implementations§
Source§impl<'de> Deserialize<'de> for LLMConfig
impl<'de> Deserialize<'de> for LLMConfig
Source§fn deserialize<__D>(
__deserializer: __D,
) -> Result<LLMConfig, <__D as Deserializer<'de>>::Error>where
__D: Deserializer<'de>,
fn deserialize<__D>(
__deserializer: __D,
) -> Result<LLMConfig, <__D as Deserializer<'de>>::Error>where
__D: Deserializer<'de>,
Deserialize this value from the given Serde deserializer. Read more
Source§impl Serialize for LLMConfig
impl Serialize for LLMConfig
Source§fn serialize<__S>(
&self,
__serializer: __S,
) -> Result<<__S as Serializer>::Ok, <__S as Serializer>::Error>where
__S: Serializer,
fn serialize<__S>(
&self,
__serializer: __S,
) -> Result<<__S as Serializer>::Ok, <__S as Serializer>::Error>where
__S: Serializer,
Serialize this value into the given Serde serializer. Read more
Auto Trait Implementations§
impl Freeze for LLMConfig
impl RefUnwindSafe for LLMConfig
impl Send for LLMConfig
impl Sync for LLMConfig
impl Unpin for LLMConfig
impl UnwindSafe for LLMConfig
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more