1use std::sync::Arc;
4use uuid::Uuid;
5
6use serde::Deserialize;
7use vex_adversarial::{
8 Consensus, ConsensusProtocol, Debate, DebateRound, ShadowAgent, ShadowConfig, Vote,
9};
10use vex_core::{Agent, ContextPacket, Hash};
11
12#[derive(Debug, Deserialize)]
13struct ChallengeResponse {
14 is_challenge: bool,
15 confidence: f64,
16 reasoning: String,
17 suggested_revision: Option<String>,
18}
19
20#[derive(Debug, Clone)]
22pub struct ExecutorConfig {
23 pub max_debate_rounds: u32,
25 pub consensus_protocol: ConsensusProtocol,
27 pub enable_adversarial: bool,
29}
30
31impl Default for ExecutorConfig {
32 fn default() -> Self {
33 Self {
34 max_debate_rounds: 3,
35 consensus_protocol: ConsensusProtocol::Majority,
36 enable_adversarial: true,
37 }
38 }
39}
40
41#[derive(Debug, Clone)]
43pub struct ExecutionResult {
44 pub agent_id: Uuid,
46 pub response: String,
48 pub verified: bool,
50 pub confidence: f64,
52 pub context: ContextPacket,
54 pub trace_root: Option<Hash>,
56 pub debate: Option<Debate>,
58}
59
60use vex_llm::{LlmProvider, LlmRequest};
61
62pub struct AgentExecutor<L: LlmProvider> {
64 pub config: ExecutorConfig,
66 llm: Arc<L>,
68}
69
70impl<L: LlmProvider> Clone for AgentExecutor<L> {
71 fn clone(&self) -> Self {
72 Self {
73 config: self.config.clone(),
74 llm: self.llm.clone(),
75 }
76 }
77}
78
79impl<L: LlmProvider> AgentExecutor<L> {
80 pub fn new(llm: Arc<L>, config: ExecutorConfig) -> Self {
82 Self { config, llm }
83 }
84
85 pub async fn execute(
87 &self,
88 agent: &mut Agent,
89 prompt: &str,
90 ) -> Result<ExecutionResult, String> {
91 let full_prompt = if !agent.context.content.is_empty() {
93 format!(
94 "Previous Context (Time: {}):\n\"{}\"\n\nActive Prompt:\n\"{}\"",
95 agent.context.created_at, agent.context.content, prompt
96 )
97 } else {
98 prompt.to_string()
99 };
100
101 let blue_response = self
102 .llm
103 .complete(LlmRequest::with_role(&agent.config.role, &full_prompt))
104 .await
105 .map_err(|e| e.to_string())?
106 .content;
107
108 let (final_response, verified, confidence, debate) = if self.config.enable_adversarial {
110 self.run_adversarial_verification(agent, prompt, &blue_response)
111 .await?
112 } else {
113 (blue_response, false, 0.5, None)
114 };
115
116 let mut context = ContextPacket::new(&final_response);
118 context.source_agent = Some(agent.id);
119 context.importance = confidence;
120
121 agent.context = context.clone();
123 agent.fitness = confidence;
124
125 Ok(ExecutionResult {
126 agent_id: agent.id,
127 response: final_response,
128 verified,
129 confidence,
130 trace_root: context.trace_root.clone(),
131 context,
132 debate,
133 })
134 }
135
136 async fn run_adversarial_verification(
138 &self,
139 blue_agent: &Agent,
140 _original_prompt: &str,
141 blue_response: &str,
142 ) -> Result<(String, bool, f64, Option<Debate>), String> {
143 let shadow = ShadowAgent::new(blue_agent, ShadowConfig::default());
145
146 let mut debate = Debate::new(blue_agent.id, shadow.agent.id, blue_response);
148
149 let mut consensus = Consensus::new(ConsensusProtocol::WeightedConfidence);
151
152 for round_num in 1..=self.config.max_debate_rounds {
154 let mut challenge_prompt = shadow.challenge_prompt(blue_response);
156 challenge_prompt.push_str("\n\nIMPORTANT: Respond in valid JSON format: {\"is_challenge\": boolean, \"confidence\": float (0.0-1.0), \"reasoning\": \"string\", \"suggested_revision\": \"string\" | null}. If you agree with the statement, set is_challenge to false.");
157
158 let red_output = self
159 .llm
160 .complete(LlmRequest::with_role(
161 &shadow.agent.config.role,
162 &challenge_prompt,
163 ))
164 .await
165 .map_err(|e| e.to_string())?
166 .content;
167
168 let (is_challenge, red_confidence, red_reasoning, _suggested_revision) =
170 if let Some(start) = red_output.find('{') {
171 if let Some(end) = red_output.rfind('}') {
172 if let Ok(res) =
173 serde_json::from_str::<ChallengeResponse>(&red_output[start..=end])
174 {
175 (
176 res.is_challenge,
177 res.confidence,
178 res.reasoning,
179 res.suggested_revision,
180 )
181 } else {
182 (
183 red_output.to_lowercase().contains("disagree"),
184 0.5,
185 red_output.clone(),
186 None,
187 )
188 }
189 } else {
190 (false, 0.0, "Parsing failed".to_string(), None)
191 }
192 } else {
193 (false, 0.0, "No JSON found".to_string(), None)
194 };
195
196 let rebuttal = if is_challenge {
197 let rebuttal_prompt = format!(
198 "Your previous response was challenged by a Red agent:\n\n\
199 Original: \"{}\"\n\n\
200 Challenge: \"{}\"\n\n\
201 Please address these concerns or provide a revised response.",
202 blue_response, red_reasoning
203 );
204 Some(
205 self.llm
206 .complete(LlmRequest::with_role(
207 &blue_agent.config.role,
208 &rebuttal_prompt,
209 ))
210 .await
211 .map_err(|e| e.to_string())?
212 .content,
213 )
214 } else {
215 None
216 };
217
218 debate.add_round(DebateRound {
219 round: round_num,
220 blue_claim: blue_response.to_string(),
221 red_challenge: red_reasoning.clone(),
222 blue_rebuttal: rebuttal,
223 });
224
225 consensus.add_vote(Vote {
227 agent_id: shadow.agent.id,
228 agrees: !is_challenge,
229 confidence: red_confidence,
230 reasoning: Some(red_reasoning),
231 });
232
233 if !is_challenge {
234 break;
235 }
236 }
237
238 consensus.add_vote(Vote {
240 agent_id: blue_agent.id,
241 agrees: true,
242 confidence: blue_agent.fitness.max(0.5),
243 reasoning: Some(format!(
244 "Blue agent fitness: {:.0}%",
245 blue_agent.fitness * 100.0
246 )),
247 });
248
249 consensus.evaluate();
250
251 let final_response = if consensus.reached && consensus.decision == Some(true) {
253 blue_response.to_string()
254 } else if let Some(last_round) = debate.rounds.last() {
255 last_round
257 .blue_rebuttal
258 .clone()
259 .unwrap_or_else(|| blue_response.to_string())
260 } else {
261 blue_response.to_string()
262 };
263
264 let verified = consensus.reached;
265 let confidence = consensus.confidence;
266
267 debate.conclude(consensus.decision.unwrap_or(true), confidence);
268
269 Ok((final_response, verified, confidence, Some(debate)))
270 }
271}
272
273#[cfg(test)]
274mod tests {
275 use super::*;
276 use vex_core::AgentConfig;
277
278 #[tokio::test]
279 async fn test_executor() {
280 use vex_llm::MockProvider;
281 let llm = Arc::new(MockProvider::smart());
282 let executor = AgentExecutor::new(llm, ExecutorConfig::default());
283 let mut agent = Agent::new(AgentConfig::default());
284
285 let result = executor.execute(&mut agent, "Test prompt").await.unwrap();
286 assert!(!result.response.is_empty());
287 assert!(result.verified);
288 }
289}