pub struct Graph { /* private fields */ }Implementations§
Source§impl Graph
impl Graph
Sourcepub fn new(
llm_client: Arc<dyn LLMClient>,
mcp_executor: Arc<MCPToolExecutor>,
config: GraphConfig,
) -> Self
pub fn new( llm_client: Arc<dyn LLMClient>, mcp_executor: Arc<MCPToolExecutor>, config: GraphConfig, ) -> Self
Examples found in repository?
examples/react_loop.rs (line 72)
10async fn main() -> Result<()> {
11 println!("╔════════════════════════════════════════════════════════════╗");
12 println!("║ Praxis React Agent - Interactive Demo ║");
13 println!("╚════════════════════════════════════════════════════════════╝");
14 println!();
15 println!("This demo shows a React agent that can:");
16 println!(" • Think through problems (reasoning)");
17 println!(" • Use tools from MCP servers");
18 println!(" • Respond to your questions");
19 println!();
20 println!("Prerequisites:");
21 println!(" 1. Set OPENAI_API_KEY: export OPENAI_API_KEY=your_key");
22 println!(" 2. Set MCP_SERVERS: export MCP_SERVERS=\"http://localhost:8000/mcp,http://localhost:8001/mcp\"");
23 println!(" 3. Start MCP servers: cd mcp_servers/weather && uv run python weather.py");
24 println!();
25 println!("Type 'exit' to quit");
26 println!();
27
28 // Get API key from environment
29 let api_key = std::env::var("OPENAI_API_KEY").expect(
30 "OPENAI_API_KEY must be set in environment. Run: export OPENAI_API_KEY=your_key_here"
31 );
32
33 // Parse MCP servers from environment
34 let mcp_servers = std::env::var("MCP_SERVERS")
35 .unwrap_or_else(|_| "http://localhost:8005/mcp".to_string());
36
37 // Create MCP tool executor (aggregates multiple servers)
38 let mcp_executor = Arc::new(MCPToolExecutor::new());
39
40 // Connect to each MCP server
41 println!("Connecting to MCP servers...");
42 for url in mcp_servers.split(',') {
43 let url = url.trim();
44 if !url.is_empty() {
45 print!(" Connecting to {}... ", url);
46 io::stdout().flush()?;
47 match MCPClient::new_http(
48 &format!("mcp-{}", uuid::Uuid::new_v4()),
49 url
50 ).await {
51 Ok(client) => {
52 mcp_executor.add_server(client).await?;
53 println!("✓");
54 }
55 Err(e) => {
56 println!("✗ Failed: {}", e);
57 println!("Make sure the MCP server is running at {}", url);
58 return Err(e);
59 }
60 }
61 }
62 }
63 println!();
64
65 // Create LLM client
66 let llm_client = Arc::new(OpenAIClient::new(api_key)?);
67
68 // Create graph config
69 let config = GraphConfig::default();
70
71 // Create graph
72 let graph = Graph::new(llm_client, mcp_executor, config);
73
74 // Conversation loop
75 let conversation_id = uuid::Uuid::new_v4().to_string();
76 let mut conversation_history: Vec<Message> = Vec::new();
77
78 loop {
79 // Get user input
80 print!("\n\x1b[1;36m You: \x1b[0m");
81 io::stdout().flush()?;
82
83 let mut input = String::new();
84 io::stdin().read_line(&mut input)?;
85 let input = input.trim();
86
87 if input.is_empty() {
88 continue;
89 }
90
91 if input.eq_ignore_ascii_case("exit") {
92 println!("\nGoodbye!");
93 break;
94 }
95
96 // Create user message
97 let user_message = Message::Human {
98 content: Content::text(input),
99 name: None,
100 };
101
102 // Add to conversation history
103 conversation_history.push(user_message);
104
105 // Create graph input with full conversation history
106 let llm_config = LLMConfig::new("gpt-4")
107 .with_temperature(0.7)
108 .with_max_tokens(4096);
109
110 let graph_input = GraphInput::new(
111 conversation_id.clone(),
112 conversation_history.clone(),
113 llm_config
114 );
115
116 // Spawn execution (no persistence for this example)
117 let mut event_rx = graph.spawn_run(graph_input, None);
118
119 // Print assistant label
120 print!("\n\x1b[1;32mAssistant:\x1b[0m ");
121 io::stdout().flush()?;
122
123 let mut in_reasoning = false;
124 let mut in_message = false;
125 let mut assistant_response = String::new();
126
127 // Process events
128 while let Some(event) = event_rx.recv().await {
129 match event {
130 StreamEvent::InitStream { .. } => {
131 // Silent - just track
132 }
133
134 StreamEvent::Reasoning { content } => {
135 if !in_reasoning {
136 print!("\n\x1b[2;3mReasoning: ");
137 in_reasoning = true;
138 in_message = false;
139 }
140 print!("{}", content);
141 io::stdout().flush()?;
142 }
143
144 StreamEvent::Message { content } => {
145 if !in_message {
146 if in_reasoning {
147 print!("\x1b[0m\n\n");
148 }
149 print!("\x1b[0m");
150 in_message = true;
151 in_reasoning = false;
152 }
153 print!("{}", content);
154 assistant_response.push_str(&content);
155 io::stdout().flush()?;
156 }
157
158 StreamEvent::ToolCall {
159 index: _,
160 id: _,
161 name,
162 arguments,
163 } => {
164 if in_reasoning {
165 print!("\x1b[0m\n");
166 }
167 if let Some(name) = name {
168 if let Some(args) = arguments {
169 print!("\n\x1b[1;33mCalling tool: {} ({})\x1b[0m", name, args);
170 } else {
171 print!("\n\x1b[1;33mCalling tool: {}\x1b[0m", name);
172 }
173 io::stdout().flush()?;
174 }
175 in_reasoning = false;
176 in_message = false;
177 }
178
179 StreamEvent::ToolResult {
180 tool_call_id: _,
181 result,
182 is_error,
183 duration_ms,
184 } => {
185 if is_error {
186 print!(
187 "\n\x1b[1;31mTool error ({}ms): {}\x1b[0m",
188 duration_ms, result
189 );
190 } else {
191 // Truncate long results
192 let display_result = if result.len() > 100 {
193 format!("{}...", &result[..100])
194 } else {
195 result
196 };
197 print!(
198 "\n\x1b[1;32mTool result ({}ms): {}\x1b[0m",
199 duration_ms, display_result
200 );
201 }
202 io::stdout().flush()?;
203 in_reasoning = false;
204 in_message = false;
205 }
206
207 StreamEvent::Done { finish_reason: _ } => {
208 // LLM stream done, continue to next node
209 }
210
211 StreamEvent::Error { message, .. } => {
212 print!("\n\n\x1b[1;31mError: {}\x1b[0m", message);
213 io::stdout().flush()?;
214 break;
215 }
216
217 StreamEvent::EndStream {
218 status: _,
219 total_duration_ms,
220 } => {
221 print!("\n\n\x1b[2m[Completed in {}ms]\x1b[0m", total_duration_ms);
222 io::stdout().flush()?;
223 break;
224 }
225 }
226 }
227
228 // Add assistant response to conversation history
229 if !assistant_response.is_empty() {
230 conversation_history.push(Message::AI {
231 content: Some(Content::text(assistant_response)),
232 tool_calls: None,
233 name: None,
234 });
235 }
236
237 println!(); // Final newline
238 }
239
240 Ok(())
241}Sourcepub fn builder() -> GraphBuilder
pub fn builder() -> GraphBuilder
Create a builder for fluent construction
Sourcepub fn spawn_run(
&self,
input: GraphInput,
persistence_ctx: Option<PersistenceContext>,
) -> Receiver<StreamEvent>
pub fn spawn_run( &self, input: GraphInput, persistence_ctx: Option<PersistenceContext>, ) -> Receiver<StreamEvent>
Spawn execution in background, return event receiver
Examples found in repository?
examples/react_loop.rs (line 117)
10async fn main() -> Result<()> {
11 println!("╔════════════════════════════════════════════════════════════╗");
12 println!("║ Praxis React Agent - Interactive Demo ║");
13 println!("╚════════════════════════════════════════════════════════════╝");
14 println!();
15 println!("This demo shows a React agent that can:");
16 println!(" • Think through problems (reasoning)");
17 println!(" • Use tools from MCP servers");
18 println!(" • Respond to your questions");
19 println!();
20 println!("Prerequisites:");
21 println!(" 1. Set OPENAI_API_KEY: export OPENAI_API_KEY=your_key");
22 println!(" 2. Set MCP_SERVERS: export MCP_SERVERS=\"http://localhost:8000/mcp,http://localhost:8001/mcp\"");
23 println!(" 3. Start MCP servers: cd mcp_servers/weather && uv run python weather.py");
24 println!();
25 println!("Type 'exit' to quit");
26 println!();
27
28 // Get API key from environment
29 let api_key = std::env::var("OPENAI_API_KEY").expect(
30 "OPENAI_API_KEY must be set in environment. Run: export OPENAI_API_KEY=your_key_here"
31 );
32
33 // Parse MCP servers from environment
34 let mcp_servers = std::env::var("MCP_SERVERS")
35 .unwrap_or_else(|_| "http://localhost:8005/mcp".to_string());
36
37 // Create MCP tool executor (aggregates multiple servers)
38 let mcp_executor = Arc::new(MCPToolExecutor::new());
39
40 // Connect to each MCP server
41 println!("Connecting to MCP servers...");
42 for url in mcp_servers.split(',') {
43 let url = url.trim();
44 if !url.is_empty() {
45 print!(" Connecting to {}... ", url);
46 io::stdout().flush()?;
47 match MCPClient::new_http(
48 &format!("mcp-{}", uuid::Uuid::new_v4()),
49 url
50 ).await {
51 Ok(client) => {
52 mcp_executor.add_server(client).await?;
53 println!("✓");
54 }
55 Err(e) => {
56 println!("✗ Failed: {}", e);
57 println!("Make sure the MCP server is running at {}", url);
58 return Err(e);
59 }
60 }
61 }
62 }
63 println!();
64
65 // Create LLM client
66 let llm_client = Arc::new(OpenAIClient::new(api_key)?);
67
68 // Create graph config
69 let config = GraphConfig::default();
70
71 // Create graph
72 let graph = Graph::new(llm_client, mcp_executor, config);
73
74 // Conversation loop
75 let conversation_id = uuid::Uuid::new_v4().to_string();
76 let mut conversation_history: Vec<Message> = Vec::new();
77
78 loop {
79 // Get user input
80 print!("\n\x1b[1;36m You: \x1b[0m");
81 io::stdout().flush()?;
82
83 let mut input = String::new();
84 io::stdin().read_line(&mut input)?;
85 let input = input.trim();
86
87 if input.is_empty() {
88 continue;
89 }
90
91 if input.eq_ignore_ascii_case("exit") {
92 println!("\nGoodbye!");
93 break;
94 }
95
96 // Create user message
97 let user_message = Message::Human {
98 content: Content::text(input),
99 name: None,
100 };
101
102 // Add to conversation history
103 conversation_history.push(user_message);
104
105 // Create graph input with full conversation history
106 let llm_config = LLMConfig::new("gpt-4")
107 .with_temperature(0.7)
108 .with_max_tokens(4096);
109
110 let graph_input = GraphInput::new(
111 conversation_id.clone(),
112 conversation_history.clone(),
113 llm_config
114 );
115
116 // Spawn execution (no persistence for this example)
117 let mut event_rx = graph.spawn_run(graph_input, None);
118
119 // Print assistant label
120 print!("\n\x1b[1;32mAssistant:\x1b[0m ");
121 io::stdout().flush()?;
122
123 let mut in_reasoning = false;
124 let mut in_message = false;
125 let mut assistant_response = String::new();
126
127 // Process events
128 while let Some(event) = event_rx.recv().await {
129 match event {
130 StreamEvent::InitStream { .. } => {
131 // Silent - just track
132 }
133
134 StreamEvent::Reasoning { content } => {
135 if !in_reasoning {
136 print!("\n\x1b[2;3mReasoning: ");
137 in_reasoning = true;
138 in_message = false;
139 }
140 print!("{}", content);
141 io::stdout().flush()?;
142 }
143
144 StreamEvent::Message { content } => {
145 if !in_message {
146 if in_reasoning {
147 print!("\x1b[0m\n\n");
148 }
149 print!("\x1b[0m");
150 in_message = true;
151 in_reasoning = false;
152 }
153 print!("{}", content);
154 assistant_response.push_str(&content);
155 io::stdout().flush()?;
156 }
157
158 StreamEvent::ToolCall {
159 index: _,
160 id: _,
161 name,
162 arguments,
163 } => {
164 if in_reasoning {
165 print!("\x1b[0m\n");
166 }
167 if let Some(name) = name {
168 if let Some(args) = arguments {
169 print!("\n\x1b[1;33mCalling tool: {} ({})\x1b[0m", name, args);
170 } else {
171 print!("\n\x1b[1;33mCalling tool: {}\x1b[0m", name);
172 }
173 io::stdout().flush()?;
174 }
175 in_reasoning = false;
176 in_message = false;
177 }
178
179 StreamEvent::ToolResult {
180 tool_call_id: _,
181 result,
182 is_error,
183 duration_ms,
184 } => {
185 if is_error {
186 print!(
187 "\n\x1b[1;31mTool error ({}ms): {}\x1b[0m",
188 duration_ms, result
189 );
190 } else {
191 // Truncate long results
192 let display_result = if result.len() > 100 {
193 format!("{}...", &result[..100])
194 } else {
195 result
196 };
197 print!(
198 "\n\x1b[1;32mTool result ({}ms): {}\x1b[0m",
199 duration_ms, display_result
200 );
201 }
202 io::stdout().flush()?;
203 in_reasoning = false;
204 in_message = false;
205 }
206
207 StreamEvent::Done { finish_reason: _ } => {
208 // LLM stream done, continue to next node
209 }
210
211 StreamEvent::Error { message, .. } => {
212 print!("\n\n\x1b[1;31mError: {}\x1b[0m", message);
213 io::stdout().flush()?;
214 break;
215 }
216
217 StreamEvent::EndStream {
218 status: _,
219 total_duration_ms,
220 } => {
221 print!("\n\n\x1b[2m[Completed in {}ms]\x1b[0m", total_duration_ms);
222 io::stdout().flush()?;
223 break;
224 }
225 }
226 }
227
228 // Add assistant response to conversation history
229 if !assistant_response.is_empty() {
230 conversation_history.push(Message::AI {
231 content: Some(Content::text(assistant_response)),
232 tool_calls: None,
233 name: None,
234 });
235 }
236
237 println!(); // Final newline
238 }
239
240 Ok(())
241}Auto Trait Implementations§
impl Freeze for Graph
impl !RefUnwindSafe for Graph
impl Send for Graph
impl Sync for Graph
impl Unpin for Graph
impl !UnwindSafe for Graph
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more