Skip to main content

punch_runtime/
fighter_loop.rs

1//! The core agent execution loop.
2//!
3//! `run_fighter_loop` is the heart of the Punch runtime. It orchestrates the
4//! conversation between the user, the LLM, and the tools (moves), persisting
5//! messages to the memory substrate and enforcing loop guards.
6//!
7//! ## Production features
8//!
9//! - **Context window management**: Tracks estimated token count and trims
10//!   messages when approaching the context limit.
11//! - **Session repair**: Fixes orphaned tool results, empty messages,
12//!   duplicate results, and missing results on startup and after errors.
13//! - **Error recovery**: Handles empty responses, MaxTokens continuation,
14//!   and per-tool timeouts.
15//! - **Loop guard**: Graduated response (Allow → Warn → Block → CircuitBreak)
16//!   with ping-pong detection and poll-tool relaxation.
17
18use std::sync::Arc;
19
20use serde::Deserialize as SerdeDeserialize;
21use tracing::{debug, error, info, instrument, warn};
22
23use punch_memory::{BoutId, MemorySubstrate};
24use punch_types::{
25    AgentCoordinator, FighterId, FighterManifest, Message, PolicyEngine, PunchError, PunchResult,
26    Role, SandboxEnforcer, ShellBleedDetector, ToolCallResult, ToolDefinition,
27};
28
29use crate::context_budget::ContextBudget;
30use crate::driver::{CompletionRequest, LlmDriver, StopReason, TokenUsage};
31use crate::guard::{GuardConfig, LoopGuard, LoopGuardVerdict};
32use crate::session_repair;
33use crate::tool_executor::{self, ToolExecutionContext};
34
35/// Maximum number of MaxTokens continuations before giving up.
36const MAX_CONTINUATION_LOOPS: usize = 5;
37
38/// Default per-tool timeout in seconds.
39const DEFAULT_TOOL_TIMEOUT_SECS: u64 = 120;
40
41/// Parameters for the fighter loop.
42pub struct FighterLoopParams {
43    /// The fighter's manifest (identity, model config, system prompt, capabilities).
44    pub manifest: FighterManifest,
45    /// The user's message to process.
46    pub user_message: String,
47    /// The bout (session) ID.
48    pub bout_id: BoutId,
49    /// The fighter's unique ID.
50    pub fighter_id: FighterId,
51    /// Shared memory substrate for persistence.
52    pub memory: Arc<MemorySubstrate>,
53    /// The LLM driver to use for completions.
54    pub driver: Arc<dyn LlmDriver>,
55    /// Tools available for this fighter to use.
56    pub available_tools: Vec<ToolDefinition>,
57    /// Maximum loop iterations before forced termination (default: 50).
58    pub max_iterations: Option<usize>,
59    /// Context window size in tokens (default: 200K).
60    pub context_window: Option<usize>,
61    /// Per-tool timeout in seconds (default: 120).
62    pub tool_timeout_secs: Option<u64>,
63    /// Optional agent coordinator for inter-agent tools.
64    pub coordinator: Option<Arc<dyn AgentCoordinator>>,
65    /// Optional policy engine for approval-gated tool execution.
66    /// When present, the referee checks every move before the fighter can throw it.
67    pub approval_engine: Option<Arc<PolicyEngine>>,
68    /// Optional subprocess sandbox (containment ring) for shell and filesystem tools.
69    /// When present, commands are validated and environments are sanitized before execution.
70    pub sandbox: Option<Arc<SandboxEnforcer>>,
71}
72
73/// Result of a completed fighter loop run.
74#[derive(Debug, Clone)]
75pub struct FighterLoopResult {
76    /// The final text response from the fighter.
77    pub response: String,
78    /// Cumulative token usage across all LLM calls in this run.
79    pub usage: TokenUsage,
80    /// Number of loop iterations performed.
81    pub iterations: usize,
82    /// Number of individual tool calls executed.
83    pub tool_calls_made: usize,
84}
85
86/// Run the fighter loop: the core agent execution engine.
87///
88/// This function:
89/// 1. Loads message history from the bout and repairs it
90/// 2. Recalls relevant memories
91/// 3. Builds the system prompt with context
92/// 4. Applies context budget management before each LLM call
93/// 5. Calls the LLM with available tools
94/// 6. If the LLM requests tool use, executes tools and loops
95/// 7. Handles empty responses, MaxTokens continuation, and errors
96/// 8. Enforces loop guards against runaway iterations
97#[instrument(
98    skip(params),
99    fields(
100        fighter = %params.fighter_id,
101        bout = %params.bout_id,
102        fighter_name = %params.manifest.name,
103    )
104)]
105pub async fn run_fighter_loop(params: FighterLoopParams) -> PunchResult<FighterLoopResult> {
106    let max_iterations = params.max_iterations.unwrap_or(50);
107    let context_window = params.context_window.unwrap_or(200_000);
108    let tool_timeout = params
109        .tool_timeout_secs
110        .unwrap_or(DEFAULT_TOOL_TIMEOUT_SECS);
111
112    let budget = ContextBudget::new(context_window);
113    let mut guard = LoopGuard::with_config(GuardConfig {
114        max_iterations,
115        ..Default::default()
116    });
117    let mut total_usage = TokenUsage::default();
118    let mut tool_calls_made: usize = 0;
119    let mut continuation_count: usize = 0;
120
121    // 1. Load message history and repair.
122    let mut messages = params.memory.load_messages(&params.bout_id).await?;
123    debug!(history_len = messages.len(), "loaded bout message history");
124
125    // Run session repair on loaded history.
126    let repair_stats = session_repair::repair_session(&mut messages);
127    if repair_stats.any_repairs() {
128        info!(repairs = %repair_stats, "repaired loaded message history");
129    }
130
131    // 2. Append the user's new message and persist it.
132    let user_msg = Message::new(Role::User, &params.user_message);
133    params
134        .memory
135        .save_message(&params.bout_id, &user_msg)
136        .await?;
137    messages.push(user_msg);
138
139    // 3. Recall relevant memories and build an enriched system prompt.
140    let system_prompt =
141        build_system_prompt(&params.manifest, &params.fighter_id, &params.memory).await;
142
143    // Build the tool execution context.
144    let tool_context = ToolExecutionContext {
145        working_dir: std::env::current_dir().unwrap_or_default(),
146        fighter_id: params.fighter_id,
147        memory: Arc::clone(&params.memory),
148        coordinator: params.coordinator.clone(),
149        approval_engine: params.approval_engine.clone(),
150        sandbox: params.sandbox.clone(),
151        bleed_detector: Some(Arc::new(ShellBleedDetector::new())),
152        browser_pool: None,
153        plugin_registry: None,
154    };
155
156    // 4. Main loop.
157    loop {
158        // --- Context Budget: check and trim before LLM call ---
159        if let Some(trim_action) = budget.check_trim_needed(&messages, &params.available_tools) {
160            budget.apply_trim(&mut messages, trim_action);
161
162            // Re-run session repair after trimming (may create orphans).
163            let post_trim_repair = session_repair::repair_session(&mut messages);
164            if post_trim_repair.any_repairs() {
165                debug!(repairs = %post_trim_repair, "repaired after context trim");
166            }
167        }
168
169        // Apply context guard (truncate oversized tool results).
170        budget.apply_context_guard(&mut messages);
171
172        // Build the completion request.
173        let request = CompletionRequest {
174            model: params.manifest.model.model.clone(),
175            messages: messages.clone(),
176            tools: params.available_tools.clone(),
177            max_tokens: params.manifest.model.max_tokens.unwrap_or(
178                // Reasoning models (Qwen, DeepSeek) use thinking tokens internally,
179                // so they need a much higher default to leave room for visible output.
180                // The thinking budget can easily consume 2000-4000 tokens alone.
181                match params.manifest.model.provider {
182                    punch_types::Provider::Ollama => 16384,
183                    _ => 4096,
184                },
185            ),
186            temperature: params.manifest.model.temperature,
187            system_prompt: Some(system_prompt.clone()),
188        };
189
190        // Call the LLM.
191        let completion = match params.driver.complete(request).await {
192            Ok(c) => c,
193            Err(e) => {
194                error!(error = %e, "LLM completion failed");
195                return Err(e);
196            }
197        };
198        total_usage.accumulate(&completion.usage);
199
200        debug!(
201            stop_reason = ?completion.stop_reason,
202            input_tokens = completion.usage.input_tokens,
203            output_tokens = completion.usage.output_tokens,
204            tool_calls = completion.message.tool_calls.len(),
205            "LLM completion received"
206        );
207
208        match completion.stop_reason {
209            StopReason::EndTurn => {
210                // --- Empty response handling ---
211                if completion.message.content.is_empty() && completion.message.tool_calls.is_empty()
212                {
213                    if guard.iterations() == 0 {
214                        // Empty response on iteration 0: one-shot retry.
215                        warn!("empty response on first iteration, retrying once");
216                        guard.record_iteration();
217                        continue;
218                    }
219
220                    // Empty response after tool use: insert fallback.
221                    let has_prior_tools = messages.iter().any(|m| m.role == Role::Tool);
222
223                    if has_prior_tools {
224                        warn!("empty response after tool use, inserting fallback");
225                        let fallback_msg = Message::new(
226                            Role::Assistant,
227                            "I completed the requested operations. The tool results above \
228                             contain the output.",
229                        );
230                        params
231                            .memory
232                            .save_message(&params.bout_id, &fallback_msg)
233                            .await?;
234                        messages.push(fallback_msg.clone());
235
236                        return Ok(FighterLoopResult {
237                            response: fallback_msg.content,
238                            usage: total_usage,
239                            iterations: guard.iterations(),
240                            tool_calls_made,
241                        });
242                    }
243                }
244
245                // The fighter is done. Save and return the response.
246                params
247                    .memory
248                    .save_message(&params.bout_id, &completion.message)
249                    .await?;
250                messages.push(completion.message.clone());
251
252                let response = completion.message.content.clone();
253
254                info!(
255                    iterations = guard.iterations(),
256                    tool_calls = tool_calls_made,
257                    total_tokens = total_usage.total(),
258                    "fighter loop complete"
259                );
260
261                // --- CREED EVOLUTION ---
262                // Update the creed with bout statistics after completion.
263                if let Ok(Some(mut creed)) = params
264                    .memory
265                    .load_creed_by_name(&params.manifest.name)
266                    .await
267                {
268                    creed.record_bout();
269                    creed.record_messages(guard.iterations() as u64 + 1); // +1 for user msg
270                    // Bind to current fighter instance
271                    creed.fighter_id = Some(params.fighter_id);
272
273                    // --- HEARTBEAT MARK ---
274                    // Mark due heartbeat tasks as checked now that the bout is complete.
275                    let due_indices: Vec<usize> = creed
276                        .heartbeat
277                        .iter()
278                        .enumerate()
279                        .filter(|(_, h)| {
280                            if !h.active {
281                                return false;
282                            }
283                            let now = chrono::Utc::now();
284                            match h.cadence.as_str() {
285                                "every_bout" => true,
286                                "on_wake" => h.last_checked.is_none(),
287                                "hourly" => h
288                                    .last_checked
289                                    .is_none_or(|t| (now - t) > chrono::Duration::hours(1)),
290                                "daily" => h
291                                    .last_checked
292                                    .is_none_or(|t| (now - t) > chrono::Duration::hours(24)),
293                                _ => false,
294                            }
295                        })
296                        .map(|(i, _)| i)
297                        .collect();
298                    for idx in due_indices {
299                        creed.mark_heartbeat_checked(idx);
300                    }
301
302                    if let Err(e) = params.memory.save_creed(&creed).await {
303                        warn!(error = %e, "failed to update creed after bout");
304                    } else {
305                        debug!(fighter = %params.manifest.name, bout_count = creed.bout_count, "creed evolved");
306                    }
307                }
308
309                // Spawn async reflection to extract learned behaviors from the bout.
310                // This runs in the background and does not block the response.
311                {
312                    let driver = Arc::clone(&params.driver);
313                    let memory = Arc::clone(&params.memory);
314                    let model = params.manifest.model.model.clone();
315                    let fighter_name = params.manifest.name.clone();
316                    let reflection_messages = messages.clone();
317                    tokio::spawn(async move {
318                        reflect_on_bout(driver, memory, model, fighter_name, reflection_messages)
319                            .await;
320                    });
321                }
322
323                return Ok(FighterLoopResult {
324                    response,
325                    usage: total_usage,
326                    iterations: guard.iterations(),
327                    tool_calls_made,
328                });
329            }
330
331            StopReason::MaxTokens => {
332                // --- MaxTokens continuation ---
333                params
334                    .memory
335                    .save_message(&params.bout_id, &completion.message)
336                    .await?;
337                messages.push(completion.message.clone());
338
339                continuation_count += 1;
340
341                if continuation_count > MAX_CONTINUATION_LOOPS {
342                    warn!(
343                        continuation_count = continuation_count,
344                        "max continuation loops exceeded, returning partial response"
345                    );
346                    return Ok(FighterLoopResult {
347                        response: completion.message.content,
348                        usage: total_usage,
349                        iterations: guard.iterations(),
350                        tool_calls_made,
351                    });
352                }
353
354                info!(
355                    continuation = continuation_count,
356                    max = MAX_CONTINUATION_LOOPS,
357                    "MaxTokens hit, appending continuation prompt"
358                );
359
360                // Append a user message asking to continue.
361                let continue_msg =
362                    Message::new(Role::User, "Please continue from where you left off.");
363                params
364                    .memory
365                    .save_message(&params.bout_id, &continue_msg)
366                    .await?;
367                messages.push(continue_msg);
368
369                guard.record_iteration();
370                continue;
371            }
372
373            StopReason::ToolUse => {
374                // Reset continuation count since we got a real tool use.
375                continuation_count = 0;
376
377                // Check the loop guard before executing tools.
378                let verdict = guard.record_tool_calls(&completion.message.tool_calls);
379                match verdict {
380                    LoopGuardVerdict::Break(reason) => {
381                        warn!(reason = %reason, "loop guard triggered");
382
383                        // Save the assistant message, then return with a guard message.
384                        params
385                            .memory
386                            .save_message(&params.bout_id, &completion.message)
387                            .await?;
388                        messages.push(completion.message.clone());
389
390                        let guard_response = format!(
391                            "{}\n\n[Loop terminated: {}]",
392                            completion.message.content, reason
393                        );
394
395                        return Ok(FighterLoopResult {
396                            response: guard_response,
397                            usage: total_usage,
398                            iterations: guard.iterations(),
399                            tool_calls_made,
400                        });
401                    }
402                    LoopGuardVerdict::Continue => {}
403                }
404
405                // Save the assistant message (with tool calls).
406                params
407                    .memory
408                    .save_message(&params.bout_id, &completion.message)
409                    .await?;
410                messages.push(completion.message.clone());
411
412                // Execute each tool call with per-tool timeout.
413                let mut tool_results = Vec::new();
414
415                for tc in &completion.message.tool_calls {
416                    debug!(tool = %tc.name, id = %tc.id, "executing tool call");
417
418                    // Check per-call guard verdict.
419                    let call_verdict = guard.evaluate_call(tc);
420                    if let crate::guard::GuardVerdict::Block(reason) = &call_verdict {
421                        warn!(tool = %tc.name, reason = %reason, "tool call blocked by guard");
422                        tool_results.push(ToolCallResult {
423                            id: tc.id.clone(),
424                            content: format!("Error: {}", reason),
425                            is_error: true,
426                        });
427                        tool_calls_made += 1;
428                        continue;
429                    }
430
431                    let result = tokio::time::timeout(
432                        std::time::Duration::from_secs(tool_timeout),
433                        tool_executor::execute_tool(
434                            &tc.name,
435                            &tc.input,
436                            &params.manifest.capabilities,
437                            &tool_context,
438                        ),
439                    )
440                    .await;
441
442                    let tool_call_result = match result {
443                        Ok(Ok(tool_result)) => {
444                            let content = if tool_result.success {
445                                tool_result.output.to_string()
446                            } else {
447                                tool_result
448                                    .error
449                                    .unwrap_or_else(|| "tool execution failed".to_string())
450                            };
451
452                            // Record outcome for future blocking.
453                            guard.record_outcome(tc, &content);
454
455                            // Truncate result if it exceeds the per-result cap.
456                            let cap = budget.per_result_cap().min(budget.single_result_max());
457                            let content = if content.len() > cap {
458                                debug!(
459                                    tool = %tc.name,
460                                    original_len = content.len(),
461                                    cap = cap,
462                                    "truncating tool result"
463                                );
464                                ContextBudget::truncate_result(&content, cap)
465                            } else {
466                                content
467                            };
468
469                            ToolCallResult {
470                                id: tc.id.clone(),
471                                content,
472                                is_error: !tool_result.success,
473                            }
474                        }
475                        Ok(Err(e)) => {
476                            error!(tool = %tc.name, error = %e, "tool execution error");
477                            ToolCallResult {
478                                id: tc.id.clone(),
479                                content: format!("Error: {}", e),
480                                is_error: true,
481                            }
482                        }
483                        Err(_) => {
484                            error!(
485                                tool = %tc.name,
486                                timeout_secs = tool_timeout,
487                                "tool execution timed out"
488                            );
489                            ToolCallResult {
490                                id: tc.id.clone(),
491                                content: format!(
492                                    "Error: tool '{}' timed out after {}s",
493                                    tc.name, tool_timeout
494                                ),
495                                is_error: true,
496                            }
497                        }
498                    };
499
500                    tool_results.push(tool_call_result);
501                    tool_calls_made += 1;
502                }
503
504                // Create and save the tool results message.
505                let tool_msg = Message {
506                    role: Role::Tool,
507                    content: String::new(),
508                    tool_calls: Vec::new(),
509                    tool_results,
510                    timestamp: chrono::Utc::now(),
511                };
512
513                params
514                    .memory
515                    .save_message(&params.bout_id, &tool_msg)
516                    .await?;
517                messages.push(tool_msg);
518
519                // Continue the loop -- call the LLM again with tool results.
520            }
521
522            StopReason::Error => {
523                error!("LLM returned error stop reason");
524                return Err(PunchError::Provider {
525                    provider: params.manifest.model.provider.to_string(),
526                    message: "model returned an error".to_string(),
527                });
528            }
529        }
530    }
531}
532
533/// Build an enriched system prompt by combining the fighter's base system
534/// prompt with recalled memories.
535async fn build_system_prompt(
536    manifest: &FighterManifest,
537    fighter_id: &FighterId,
538    memory: &MemorySubstrate,
539) -> String {
540    let mut prompt = manifest.system_prompt.clone();
541
542    // --- CREED INJECTION ---
543    // Load the fighter's creed (consciousness layer) if one exists.
544    // The creed is tied to fighter NAME so it persists across respawns.
545    match memory.load_creed_by_name(&manifest.name).await {
546        Ok(Some(creed)) => {
547            prompt.push_str("\n\n");
548            prompt.push_str(&creed.render());
549
550            // --- HEARTBEAT INJECTION ---
551            // Check for due heartbeat tasks and inject them into the prompt.
552            let due_tasks = creed.due_heartbeat_tasks();
553            if !due_tasks.is_empty() {
554                prompt.push_str("\n\n## HEARTBEAT — Due Tasks\n");
555                prompt.push_str(
556                    "The following proactive tasks are due. Address them briefly before responding to the user:\n",
557                );
558                for task in &due_tasks {
559                    prompt.push_str(&format!("- {}\n", task.task));
560                }
561            }
562        }
563        Ok(None) => {
564            // No creed defined — fighter runs without consciousness layer.
565        }
566        Err(e) => {
567            warn!(error = %e, "failed to load creed for fighter");
568        }
569    }
570
571    // --- SKILL INJECTION ---
572    // Load markdown-based skills from workspace, user, and bundled directories.
573    {
574        let workspace_skills = std::path::Path::new("./skills");
575        let user_skills = std::env::var("HOME")
576            .ok()
577            .map(|h| std::path::PathBuf::from(h).join(".punch").join("skills"));
578        // Bundled skills ship in the binary's directory
579        let bundled_skills = std::env::current_exe()
580            .ok()
581            .and_then(|p| p.parent().map(|d| d.join("skills")));
582
583        let skills = punch_skills::load_all_skills(
584            Some(workspace_skills),
585            user_skills.as_deref(),
586            bundled_skills.as_deref(),
587        );
588
589        if !skills.is_empty() {
590            prompt.push_str("\n\n");
591            prompt.push_str(&punch_skills::render_skills_prompt(&skills));
592        }
593    }
594
595    // Try to recall recent/relevant memories.
596    match memory.recall_memories(fighter_id, "", 10).await {
597        Ok(memories) if !memories.is_empty() => {
598            prompt.push_str("\n\n## Recalled Memories\n");
599            for mem in &memories {
600                prompt.push_str(&format!(
601                    "- **{}**: {} (confidence: {:.0}%)\n",
602                    mem.key,
603                    mem.value,
604                    mem.confidence * 100.0
605                ));
606            }
607        }
608        Ok(_) => {
609            // No memories to inject.
610        }
611        Err(e) => {
612            warn!(error = %e, "failed to recall memories for system prompt");
613        }
614    }
615
616    prompt
617}
618
619/// A single learned behavior extracted from post-bout reflection.
620#[derive(Debug, SerdeDeserialize)]
621struct ReflectionItem {
622    observation: String,
623    confidence: f64,
624}
625
626/// Post-bout reflection output from the LLM.
627#[derive(Debug, SerdeDeserialize)]
628struct ReflectionOutput {
629    behaviors: Vec<ReflectionItem>,
630    #[serde(default)]
631    interaction_quality: Option<f64>,
632}
633
634/// Reflect on a completed bout to extract learned behaviors.
635///
636/// Makes a lightweight LLM call asking the model to extract insights from
637/// the conversation. Updates the creed with new learned behaviors and
638/// adjusts the user relationship trust based on interaction quality.
639async fn reflect_on_bout(
640    driver: Arc<dyn LlmDriver>,
641    memory: Arc<MemorySubstrate>,
642    model: String,
643    fighter_name: String,
644    messages: Vec<Message>,
645) {
646    // Only use the last 20 messages to keep the reflection call small
647    let recent: Vec<Message> = messages.into_iter().rev().take(20).rev().collect();
648
649    let reflection_prompt = r#"You just completed a conversation. Reflect on it and extract learned behaviors.
650
651Respond ONLY with valid JSON (no markdown fences, no commentary):
652{
653  "behaviors": [
654    {"observation": "what you learned", "confidence": 0.0-1.0}
655  ],
656  "interaction_quality": 0.0-1.0
657}
658
659Rules:
660- Extract 0-3 genuinely new insights about the user, effective patterns, or self-improvement notes
661- confidence: 0.5 = uncertain, 0.9 = very confident
662- interaction_quality: how productive/positive was this interaction (0.5 = neutral, 0.9 = great)
663- If nothing notable was learned, return: {"behaviors": [], "interaction_quality": 0.7}
664- DO NOT restate your directives or identity as learned behaviors"#;
665
666    let request = CompletionRequest {
667        model,
668        messages: recent,
669        tools: vec![],
670        max_tokens: 512,
671        temperature: Some(0.3),
672        system_prompt: Some(reflection_prompt.to_string()),
673    };
674
675    let response = match driver.complete(request).await {
676        Ok(resp) => resp,
677        Err(e) => {
678            debug!(error = %e, fighter = %fighter_name, "reflection LLM call failed (non-critical)");
679            return;
680        }
681    };
682
683    let content = response.message.content.trim().to_string();
684
685    // Try to parse JSON, stripping markdown fences if present
686    let json_str = if let Some(start) = content.find('{') {
687        if let Some(end) = content.rfind('}') {
688            &content[start..=end]
689        } else {
690            &content
691        }
692    } else {
693        &content
694    };
695
696    let output: ReflectionOutput = match serde_json::from_str(json_str) {
697        Ok(o) => o,
698        Err(e) => {
699            debug!(error = %e, fighter = %fighter_name, "failed to parse reflection JSON (non-critical)");
700            return;
701        }
702    };
703
704    // Load creed, apply changes, save
705    let mut creed = match memory.load_creed_by_name(&fighter_name).await {
706        Ok(Some(c)) => c,
707        _ => return,
708    };
709
710    // Apply confidence decay to existing behaviors
711    creed.decay_learned_behaviors(0.01, 0.3);
712
713    // Learn new behaviors
714    for item in &output.behaviors {
715        if !item.observation.is_empty() {
716            creed.learn(&item.observation, item.confidence.clamp(0.0, 1.0));
717        }
718    }
719
720    // Prune to max 20 behaviors
721    creed.prune_learned_behaviors(20);
722
723    // Update user relationship trust based on interaction quality
724    if let Some(quality) = output.interaction_quality {
725        let quality = quality.clamp(0.0, 1.0);
726        if let Some(rel) = creed
727            .relationships
728            .iter_mut()
729            .find(|r| r.entity_type == "user")
730        {
731            rel.trust = (rel.trust * 0.9 + quality * 0.1).clamp(0.0, 1.0);
732            rel.interaction_count += 1;
733        } else {
734            creed.relationships.push(punch_types::Relationship {
735                entity: "user".to_string(),
736                entity_type: "user".to_string(),
737                nature: "operator".to_string(),
738                trust: quality,
739                interaction_count: 1,
740                notes: format!(
741                    "First interaction: {}",
742                    chrono::Utc::now().format("%Y-%m-%d %H:%M UTC")
743                ),
744            });
745        }
746    }
747
748    if let Err(e) = memory.save_creed(&creed).await {
749        warn!(error = %e, fighter = %fighter_name, "failed to save creed after reflection");
750    } else {
751        info!(
752            fighter = %fighter_name,
753            new_behaviors = output.behaviors.len(),
754            total_behaviors = creed.learned_behaviors.len(),
755            "creed evolved via post-bout reflection"
756        );
757    }
758}