Skip to main content

punch_runtime/
fighter_loop.rs

1//! The core agent execution loop.
2//!
3//! `run_fighter_loop` is the heart of the Punch runtime. It orchestrates the
4//! conversation between the user, the LLM, and the tools (moves), persisting
5//! messages to the memory substrate and enforcing loop guards.
6//!
7//! ## Production features
8//!
9//! - **Context window management**: Tracks estimated token count and trims
10//!   messages when approaching the context limit.
11//! - **Session repair**: Fixes orphaned tool results, empty messages,
12//!   duplicate results, and missing results on startup and after errors.
13//! - **Error recovery**: Handles empty responses, MaxTokens continuation,
14//!   and per-tool timeouts.
15//! - **Loop guard**: Graduated response (Allow → Warn → Block → CircuitBreak)
16//!   with ping-pong detection and poll-tool relaxation.
17
18use std::sync::Arc;
19
20use serde::Deserialize as SerdeDeserialize;
21use tracing::{debug, error, info, instrument, warn};
22
23use dashmap::DashMap;
24use punch_memory::{BoutId, MemorySubstrate};
25use punch_types::{
26    AgentCoordinator, FighterId, FighterManifest, Message, PolicyEngine, PunchError, PunchResult,
27    Role, SandboxEnforcer, ShellBleedDetector, ToolCallResult, ToolDefinition,
28};
29
30use crate::mcp::McpClient;
31
32use crate::context_budget::ContextBudget;
33use crate::driver::{CompletionRequest, LlmDriver, StopReason, TokenUsage};
34use crate::guard::{GuardConfig, LoopGuard, LoopGuardVerdict};
35use crate::session_repair;
36use crate::tool_executor::{self, ToolExecutionContext};
37
38/// Maximum number of MaxTokens continuations before giving up.
39const MAX_CONTINUATION_LOOPS: usize = 5;
40
41/// Default per-tool timeout in seconds.
42const DEFAULT_TOOL_TIMEOUT_SECS: u64 = 120;
43
44/// Parameters for the fighter loop.
45pub struct FighterLoopParams {
46    /// The fighter's manifest (identity, model config, system prompt, capabilities).
47    pub manifest: FighterManifest,
48    /// The user's message to process.
49    pub user_message: String,
50    /// The bout (session) ID.
51    pub bout_id: BoutId,
52    /// The fighter's unique ID.
53    pub fighter_id: FighterId,
54    /// Shared memory substrate for persistence.
55    pub memory: Arc<MemorySubstrate>,
56    /// The LLM driver to use for completions.
57    pub driver: Arc<dyn LlmDriver>,
58    /// Tools available for this fighter to use.
59    pub available_tools: Vec<ToolDefinition>,
60    /// Maximum loop iterations before forced termination (default: 50).
61    pub max_iterations: Option<usize>,
62    /// Context window size in tokens (default: 200K).
63    pub context_window: Option<usize>,
64    /// Per-tool timeout in seconds (default: 120).
65    pub tool_timeout_secs: Option<u64>,
66    /// Optional agent coordinator for inter-agent tools.
67    pub coordinator: Option<Arc<dyn AgentCoordinator>>,
68    /// Optional policy engine for approval-gated tool execution.
69    /// When present, the referee checks every move before the fighter can throw it.
70    pub approval_engine: Option<Arc<PolicyEngine>>,
71    /// Optional subprocess sandbox (containment ring) for shell and filesystem tools.
72    /// When present, commands are validated and environments are sanitized before execution.
73    pub sandbox: Option<Arc<SandboxEnforcer>>,
74    /// Active MCP server clients shared across fighters.
75    /// When present, MCP tools are available for dispatch.
76    pub mcp_clients: Option<Arc<DashMap<String, Arc<McpClient>>>>,
77}
78
79/// Result of a completed fighter loop run.
80#[derive(Debug, Clone)]
81pub struct FighterLoopResult {
82    /// The final text response from the fighter.
83    pub response: String,
84    /// Cumulative token usage across all LLM calls in this run.
85    pub usage: TokenUsage,
86    /// Number of loop iterations performed.
87    pub iterations: usize,
88    /// Number of individual tool calls executed.
89    pub tool_calls_made: usize,
90}
91
92/// Run the fighter loop: the core agent execution engine.
93///
94/// This function:
95/// 1. Loads message history from the bout and repairs it
96/// 2. Recalls relevant memories
97/// 3. Builds the system prompt with context
98/// 4. Applies context budget management before each LLM call
99/// 5. Calls the LLM with available tools
100/// 6. If the LLM requests tool use, executes tools and loops
101/// 7. Handles empty responses, MaxTokens continuation, and errors
102/// 8. Enforces loop guards against runaway iterations
103#[instrument(
104    skip(params),
105    fields(
106        fighter = %params.fighter_id,
107        bout = %params.bout_id,
108        fighter_name = %params.manifest.name,
109    )
110)]
111pub async fn run_fighter_loop(params: FighterLoopParams) -> PunchResult<FighterLoopResult> {
112    let max_iterations = params.max_iterations.unwrap_or(50);
113    let context_window = params.context_window.unwrap_or(200_000);
114    let tool_timeout = params
115        .tool_timeout_secs
116        .unwrap_or(DEFAULT_TOOL_TIMEOUT_SECS);
117
118    let budget = ContextBudget::new(context_window);
119    let mut guard = LoopGuard::with_config(GuardConfig {
120        max_iterations,
121        ..Default::default()
122    });
123    let mut total_usage = TokenUsage::default();
124    let mut tool_calls_made: usize = 0;
125    let mut continuation_count: usize = 0;
126
127    // 1. Load message history and repair.
128    let mut messages = params.memory.load_messages(&params.bout_id).await?;
129    debug!(history_len = messages.len(), "loaded bout message history");
130
131    // Run session repair on loaded history.
132    let repair_stats = session_repair::repair_session(&mut messages);
133    if repair_stats.any_repairs() {
134        info!(repairs = %repair_stats, "repaired loaded message history");
135    }
136
137    // 2. Append the user's new message and persist it.
138    let user_msg = Message::new(Role::User, &params.user_message);
139    params
140        .memory
141        .save_message(&params.bout_id, &user_msg)
142        .await?;
143    messages.push(user_msg);
144
145    // 3. Recall relevant memories and build an enriched system prompt.
146    let system_prompt =
147        build_system_prompt(&params.manifest, &params.fighter_id, &params.memory).await;
148
149    // Build the tool execution context.
150    let tool_context = ToolExecutionContext {
151        working_dir: std::env::current_dir().unwrap_or_default(),
152        fighter_id: params.fighter_id,
153        memory: Arc::clone(&params.memory),
154        coordinator: params.coordinator.clone(),
155        approval_engine: params.approval_engine.clone(),
156        sandbox: params.sandbox.clone(),
157        bleed_detector: Some(Arc::new(ShellBleedDetector::new())),
158        browser_pool: None,
159        plugin_registry: None,
160        mcp_clients: params.mcp_clients.clone(),
161    };
162
163    // 4. Main loop.
164    loop {
165        // --- Context Budget: check and trim before LLM call ---
166        if let Some(trim_action) = budget.check_trim_needed(&messages, &params.available_tools) {
167            budget.apply_trim(&mut messages, trim_action);
168
169            // Re-run session repair after trimming (may create orphans).
170            let post_trim_repair = session_repair::repair_session(&mut messages);
171            if post_trim_repair.any_repairs() {
172                debug!(repairs = %post_trim_repair, "repaired after context trim");
173            }
174        }
175
176        // Apply context guard (truncate oversized tool results).
177        budget.apply_context_guard(&mut messages);
178
179        // Build the completion request.
180        let request = CompletionRequest {
181            model: params.manifest.model.model.clone(),
182            messages: messages.clone(),
183            tools: params.available_tools.clone(),
184            max_tokens: params.manifest.model.max_tokens.unwrap_or(
185                // Reasoning models (Qwen, DeepSeek) use thinking tokens internally,
186                // so they need a much higher default to leave room for visible output.
187                // The thinking budget can easily consume 2000-4000 tokens alone.
188                match params.manifest.model.provider {
189                    punch_types::Provider::Ollama => 16384,
190                    _ => 4096,
191                },
192            ),
193            temperature: params.manifest.model.temperature,
194            system_prompt: Some(system_prompt.clone()),
195        };
196
197        // Call the LLM.
198        let completion = match params.driver.complete(request).await {
199            Ok(c) => c,
200            Err(e) => {
201                error!(error = %e, "LLM completion failed");
202                return Err(e);
203            }
204        };
205        total_usage.accumulate(&completion.usage);
206
207        debug!(
208            stop_reason = ?completion.stop_reason,
209            input_tokens = completion.usage.input_tokens,
210            output_tokens = completion.usage.output_tokens,
211            tool_calls = completion.message.tool_calls.len(),
212            "LLM completion received"
213        );
214
215        match completion.stop_reason {
216            StopReason::EndTurn => {
217                // --- Empty response handling ---
218                if completion.message.content.is_empty() && completion.message.tool_calls.is_empty()
219                {
220                    if guard.iterations() == 0 {
221                        // Empty response on iteration 0: one-shot retry.
222                        warn!("empty response on first iteration, retrying once");
223                        guard.record_iteration();
224                        continue;
225                    }
226
227                    // Empty response after tool use: insert fallback.
228                    let has_prior_tools = messages.iter().any(|m| m.role == Role::Tool);
229
230                    if has_prior_tools {
231                        warn!("empty response after tool use, inserting fallback");
232                        let fallback_msg = Message::new(
233                            Role::Assistant,
234                            "I completed the requested operations. The tool results above \
235                             contain the output.",
236                        );
237                        params
238                            .memory
239                            .save_message(&params.bout_id, &fallback_msg)
240                            .await?;
241                        messages.push(fallback_msg.clone());
242
243                        return Ok(FighterLoopResult {
244                            response: fallback_msg.content,
245                            usage: total_usage,
246                            iterations: guard.iterations(),
247                            tool_calls_made,
248                        });
249                    }
250                }
251
252                // The fighter is done. Save and return the response.
253                params
254                    .memory
255                    .save_message(&params.bout_id, &completion.message)
256                    .await?;
257                messages.push(completion.message.clone());
258
259                let response = completion.message.content.clone();
260
261                info!(
262                    iterations = guard.iterations(),
263                    tool_calls = tool_calls_made,
264                    total_tokens = total_usage.total(),
265                    "fighter loop complete"
266                );
267
268                // --- CREED EVOLUTION ---
269                // Update the creed with bout statistics after completion.
270                if let Ok(Some(mut creed)) = params
271                    .memory
272                    .load_creed_by_name(&params.manifest.name)
273                    .await
274                {
275                    creed.record_bout();
276                    creed.record_messages(guard.iterations() as u64 + 1); // +1 for user msg
277                    // Bind to current fighter instance
278                    creed.fighter_id = Some(params.fighter_id);
279
280                    // --- HEARTBEAT MARK ---
281                    // Mark due heartbeat tasks as checked now that the bout is complete.
282                    let due_indices: Vec<usize> = creed
283                        .heartbeat
284                        .iter()
285                        .enumerate()
286                        .filter(|(_, h)| {
287                            if !h.active {
288                                return false;
289                            }
290                            let now = chrono::Utc::now();
291                            match h.cadence.as_str() {
292                                "every_bout" => true,
293                                "on_wake" => h.last_checked.is_none(),
294                                "hourly" => h
295                                    .last_checked
296                                    .is_none_or(|t| (now - t) > chrono::Duration::hours(1)),
297                                "daily" => h
298                                    .last_checked
299                                    .is_none_or(|t| (now - t) > chrono::Duration::hours(24)),
300                                _ => false,
301                            }
302                        })
303                        .map(|(i, _)| i)
304                        .collect();
305                    for idx in due_indices {
306                        creed.mark_heartbeat_checked(idx);
307                    }
308
309                    if let Err(e) = params.memory.save_creed(&creed).await {
310                        warn!(error = %e, "failed to update creed after bout");
311                    } else {
312                        debug!(fighter = %params.manifest.name, bout_count = creed.bout_count, "creed evolved");
313                    }
314                }
315
316                // Spawn async reflection to extract learned behaviors from the bout.
317                // This runs in the background and does not block the response.
318                {
319                    let driver = Arc::clone(&params.driver);
320                    let memory = Arc::clone(&params.memory);
321                    let model = params.manifest.model.model.clone();
322                    let fighter_name = params.manifest.name.clone();
323                    let reflection_messages = messages.clone();
324                    tokio::spawn(async move {
325                        reflect_on_bout(driver, memory, model, fighter_name, reflection_messages)
326                            .await;
327                    });
328                }
329
330                return Ok(FighterLoopResult {
331                    response,
332                    usage: total_usage,
333                    iterations: guard.iterations(),
334                    tool_calls_made,
335                });
336            }
337
338            StopReason::MaxTokens => {
339                // --- MaxTokens continuation ---
340                params
341                    .memory
342                    .save_message(&params.bout_id, &completion.message)
343                    .await?;
344                messages.push(completion.message.clone());
345
346                continuation_count += 1;
347
348                if continuation_count > MAX_CONTINUATION_LOOPS {
349                    warn!(
350                        continuation_count = continuation_count,
351                        "max continuation loops exceeded, returning partial response"
352                    );
353                    return Ok(FighterLoopResult {
354                        response: completion.message.content,
355                        usage: total_usage,
356                        iterations: guard.iterations(),
357                        tool_calls_made,
358                    });
359                }
360
361                info!(
362                    continuation = continuation_count,
363                    max = MAX_CONTINUATION_LOOPS,
364                    "MaxTokens hit, appending continuation prompt"
365                );
366
367                // Append a user message asking to continue.
368                let continue_msg =
369                    Message::new(Role::User, "Please continue from where you left off.");
370                params
371                    .memory
372                    .save_message(&params.bout_id, &continue_msg)
373                    .await?;
374                messages.push(continue_msg);
375
376                guard.record_iteration();
377                continue;
378            }
379
380            StopReason::ToolUse => {
381                // Reset continuation count since we got a real tool use.
382                continuation_count = 0;
383
384                // Check the loop guard before executing tools.
385                let verdict = guard.record_tool_calls(&completion.message.tool_calls);
386                match verdict {
387                    LoopGuardVerdict::Break(reason) => {
388                        warn!(reason = %reason, "loop guard triggered");
389
390                        // Save the assistant message, then return with a guard message.
391                        params
392                            .memory
393                            .save_message(&params.bout_id, &completion.message)
394                            .await?;
395                        messages.push(completion.message.clone());
396
397                        let guard_response = format!(
398                            "{}\n\n[Loop terminated: {}]",
399                            completion.message.content, reason
400                        );
401
402                        return Ok(FighterLoopResult {
403                            response: guard_response,
404                            usage: total_usage,
405                            iterations: guard.iterations(),
406                            tool_calls_made,
407                        });
408                    }
409                    LoopGuardVerdict::Continue => {}
410                }
411
412                // Save the assistant message (with tool calls).
413                params
414                    .memory
415                    .save_message(&params.bout_id, &completion.message)
416                    .await?;
417                messages.push(completion.message.clone());
418
419                // Execute each tool call with per-tool timeout.
420                let mut tool_results = Vec::new();
421
422                for tc in &completion.message.tool_calls {
423                    debug!(tool = %tc.name, id = %tc.id, "executing tool call");
424
425                    // Check per-call guard verdict.
426                    let call_verdict = guard.evaluate_call(tc);
427                    if let crate::guard::GuardVerdict::Block(reason) = &call_verdict {
428                        warn!(tool = %tc.name, reason = %reason, "tool call blocked by guard");
429                        tool_results.push(ToolCallResult {
430                            id: tc.id.clone(),
431                            content: format!("Error: {}", reason),
432                            is_error: true,
433                        });
434                        tool_calls_made += 1;
435                        continue;
436                    }
437
438                    let result = tokio::time::timeout(
439                        std::time::Duration::from_secs(tool_timeout),
440                        tool_executor::execute_tool(
441                            &tc.name,
442                            &tc.input,
443                            &params.manifest.capabilities,
444                            &tool_context,
445                        ),
446                    )
447                    .await;
448
449                    let tool_call_result = match result {
450                        Ok(Ok(tool_result)) => {
451                            let content = if tool_result.success {
452                                tool_result.output.to_string()
453                            } else {
454                                tool_result
455                                    .error
456                                    .unwrap_or_else(|| "tool execution failed".to_string())
457                            };
458
459                            // Record outcome for future blocking.
460                            guard.record_outcome(tc, &content);
461
462                            // Truncate result if it exceeds the per-result cap.
463                            let cap = budget.per_result_cap().min(budget.single_result_max());
464                            let content = if content.len() > cap {
465                                debug!(
466                                    tool = %tc.name,
467                                    original_len = content.len(),
468                                    cap = cap,
469                                    "truncating tool result"
470                                );
471                                ContextBudget::truncate_result(&content, cap)
472                            } else {
473                                content
474                            };
475
476                            ToolCallResult {
477                                id: tc.id.clone(),
478                                content,
479                                is_error: !tool_result.success,
480                            }
481                        }
482                        Ok(Err(e)) => {
483                            error!(tool = %tc.name, error = %e, "tool execution error");
484                            ToolCallResult {
485                                id: tc.id.clone(),
486                                content: format!("Error: {}", e),
487                                is_error: true,
488                            }
489                        }
490                        Err(_) => {
491                            error!(
492                                tool = %tc.name,
493                                timeout_secs = tool_timeout,
494                                "tool execution timed out"
495                            );
496                            ToolCallResult {
497                                id: tc.id.clone(),
498                                content: format!(
499                                    "Error: tool '{}' timed out after {}s",
500                                    tc.name, tool_timeout
501                                ),
502                                is_error: true,
503                            }
504                        }
505                    };
506
507                    tool_results.push(tool_call_result);
508                    tool_calls_made += 1;
509                }
510
511                // Create and save the tool results message.
512                let tool_msg = Message {
513                    role: Role::Tool,
514                    content: String::new(),
515                    tool_calls: Vec::new(),
516                    tool_results,
517                    timestamp: chrono::Utc::now(),
518                };
519
520                params
521                    .memory
522                    .save_message(&params.bout_id, &tool_msg)
523                    .await?;
524                messages.push(tool_msg);
525
526                // Continue the loop -- call the LLM again with tool results.
527            }
528
529            StopReason::Error => {
530                error!("LLM returned error stop reason");
531                return Err(PunchError::Provider {
532                    provider: params.manifest.model.provider.to_string(),
533                    message: "model returned an error".to_string(),
534                });
535            }
536        }
537    }
538}
539
540/// Build an enriched system prompt by combining the fighter's base system
541/// prompt with recalled memories.
542async fn build_system_prompt(
543    manifest: &FighterManifest,
544    fighter_id: &FighterId,
545    memory: &MemorySubstrate,
546) -> String {
547    let mut prompt = manifest.system_prompt.clone();
548
549    // --- CREED INJECTION ---
550    // Load the fighter's creed (consciousness layer) if one exists.
551    // The creed is tied to fighter NAME so it persists across respawns.
552    match memory.load_creed_by_name(&manifest.name).await {
553        Ok(Some(creed)) => {
554            prompt.push_str("\n\n");
555            prompt.push_str(&creed.render());
556
557            // --- HEARTBEAT INJECTION ---
558            // Check for due heartbeat tasks and inject them into the prompt.
559            let due_tasks = creed.due_heartbeat_tasks();
560            if !due_tasks.is_empty() {
561                prompt.push_str("\n\n## HEARTBEAT — Due Tasks\n");
562                prompt.push_str(
563                    "The following proactive tasks are due. Address them briefly before responding to the user:\n",
564                );
565                for task in &due_tasks {
566                    prompt.push_str(&format!("- {}\n", task.task));
567                }
568            }
569        }
570        Ok(None) => {
571            // No creed defined — fighter runs without consciousness layer.
572        }
573        Err(e) => {
574            warn!(error = %e, "failed to load creed for fighter");
575        }
576    }
577
578    // --- SKILL INJECTION ---
579    // Load markdown-based skills from workspace, user, and bundled directories.
580    {
581        let workspace_skills = std::path::Path::new("./skills");
582        let user_skills = std::env::var("HOME")
583            .ok()
584            .map(|h| std::path::PathBuf::from(h).join(".punch").join("skills"));
585        // Bundled skills ship in the binary's directory
586        let bundled_skills = std::env::current_exe()
587            .ok()
588            .and_then(|p| p.parent().map(|d| d.join("skills")));
589
590        let skills = punch_skills::load_all_skills(
591            Some(workspace_skills),
592            user_skills.as_deref(),
593            bundled_skills.as_deref(),
594        );
595
596        if !skills.is_empty() {
597            prompt.push_str("\n\n");
598            prompt.push_str(&punch_skills::render_skills_prompt(&skills));
599        }
600    }
601
602    // Try to recall recent/relevant memories.
603    match memory.recall_memories(fighter_id, "", 10).await {
604        Ok(memories) if !memories.is_empty() => {
605            prompt.push_str("\n\n## Recalled Memories\n");
606            for mem in &memories {
607                prompt.push_str(&format!(
608                    "- **{}**: {} (confidence: {:.0}%)\n",
609                    mem.key,
610                    mem.value,
611                    mem.confidence * 100.0
612                ));
613            }
614        }
615        Ok(_) => {
616            // No memories to inject.
617        }
618        Err(e) => {
619            warn!(error = %e, "failed to recall memories for system prompt");
620        }
621    }
622
623    prompt
624}
625
626/// A single learned behavior extracted from post-bout reflection.
627#[derive(Debug, SerdeDeserialize)]
628struct ReflectionItem {
629    observation: String,
630    confidence: f64,
631}
632
633/// Post-bout reflection output from the LLM.
634#[derive(Debug, SerdeDeserialize)]
635struct ReflectionOutput {
636    behaviors: Vec<ReflectionItem>,
637    #[serde(default)]
638    interaction_quality: Option<f64>,
639}
640
641/// Reflect on a completed bout to extract learned behaviors.
642///
643/// Makes a lightweight LLM call asking the model to extract insights from
644/// the conversation. Updates the creed with new learned behaviors and
645/// adjusts the user relationship trust based on interaction quality.
646async fn reflect_on_bout(
647    driver: Arc<dyn LlmDriver>,
648    memory: Arc<MemorySubstrate>,
649    model: String,
650    fighter_name: String,
651    messages: Vec<Message>,
652) {
653    // Only use the last 20 messages to keep the reflection call small
654    let recent: Vec<Message> = messages.into_iter().rev().take(20).rev().collect();
655
656    let reflection_prompt = r#"You just completed a conversation. Reflect on it and extract learned behaviors.
657
658Respond ONLY with valid JSON (no markdown fences, no commentary):
659{
660  "behaviors": [
661    {"observation": "what you learned", "confidence": 0.0-1.0}
662  ],
663  "interaction_quality": 0.0-1.0
664}
665
666Rules:
667- Extract 0-3 genuinely new insights about the user, effective patterns, or self-improvement notes
668- confidence: 0.5 = uncertain, 0.9 = very confident
669- interaction_quality: how productive/positive was this interaction (0.5 = neutral, 0.9 = great)
670- If nothing notable was learned, return: {"behaviors": [], "interaction_quality": 0.7}
671- DO NOT restate your directives or identity as learned behaviors"#;
672
673    let request = CompletionRequest {
674        model,
675        messages: recent,
676        tools: vec![],
677        max_tokens: 512,
678        temperature: Some(0.3),
679        system_prompt: Some(reflection_prompt.to_string()),
680    };
681
682    let response = match driver.complete(request).await {
683        Ok(resp) => resp,
684        Err(e) => {
685            debug!(error = %e, fighter = %fighter_name, "reflection LLM call failed (non-critical)");
686            return;
687        }
688    };
689
690    let content = response.message.content.trim().to_string();
691
692    // Try to parse JSON, stripping markdown fences if present
693    let json_str = if let Some(start) = content.find('{') {
694        if let Some(end) = content.rfind('}') {
695            &content[start..=end]
696        } else {
697            &content
698        }
699    } else {
700        &content
701    };
702
703    let output: ReflectionOutput = match serde_json::from_str(json_str) {
704        Ok(o) => o,
705        Err(e) => {
706            debug!(error = %e, fighter = %fighter_name, "failed to parse reflection JSON (non-critical)");
707            return;
708        }
709    };
710
711    // Load creed, apply changes, save
712    let mut creed = match memory.load_creed_by_name(&fighter_name).await {
713        Ok(Some(c)) => c,
714        _ => return,
715    };
716
717    // Apply confidence decay to existing behaviors
718    creed.decay_learned_behaviors(0.01, 0.3);
719
720    // Learn new behaviors
721    for item in &output.behaviors {
722        if !item.observation.is_empty() {
723            creed.learn(&item.observation, item.confidence.clamp(0.0, 1.0));
724        }
725    }
726
727    // Prune to max 20 behaviors
728    creed.prune_learned_behaviors(20);
729
730    // Update user relationship trust based on interaction quality
731    if let Some(quality) = output.interaction_quality {
732        let quality = quality.clamp(0.0, 1.0);
733        if let Some(rel) = creed
734            .relationships
735            .iter_mut()
736            .find(|r| r.entity_type == "user")
737        {
738            rel.trust = (rel.trust * 0.9 + quality * 0.1).clamp(0.0, 1.0);
739            rel.interaction_count += 1;
740        } else {
741            creed.relationships.push(punch_types::Relationship {
742                entity: "user".to_string(),
743                entity_type: "user".to_string(),
744                nature: "operator".to_string(),
745                trust: quality,
746                interaction_count: 1,
747                notes: format!(
748                    "First interaction: {}",
749                    chrono::Utc::now().format("%Y-%m-%d %H:%M UTC")
750                ),
751            });
752        }
753    }
754
755    if let Err(e) = memory.save_creed(&creed).await {
756        warn!(error = %e, fighter = %fighter_name, "failed to save creed after reflection");
757    } else {
758        info!(
759            fighter = %fighter_name,
760            new_behaviors = output.behaviors.len(),
761            total_behaviors = creed.learned_behaviors.len(),
762            "creed evolved via post-bout reflection"
763        );
764    }
765}