use std::collections::BTreeSet;
use std::path::{Path, PathBuf};
use std::process::ExitCode;
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use ulid::Ulid;
use crate::db;
use crate::handoff::{self, BranchMode};
use crate::memory::entries::StructuredMemoryEntry;
use crate::memory::file::{append_block_to_contents, render_entry_block};
use crate::memory::{governance, queue};
use crate::output::CommandReport;
use crate::paths::git as git_paths;
use crate::paths::state::StateLayout;
use crate::profile;
use crate::repo::marker as repo_marker;
use crate::repo::registry as repo_registry;
use crate::state::protected_write::{
self, AppendWriteAuthority, AppendWriteOutcome, ExclusiveWriteOptions,
};
use crate::state::{runtime as runtime_state, session};
use crate::timestamps;
const MAX_SUMMARY_CHARS: usize = 280;
const MAX_REF_CHARS: usize = 240;
const MAX_PROVIDER_NAME_CHARS: usize = 64;
#[derive(Debug, Clone, Default)]
pub(crate) struct SubmitRuntimeHints {
pub(crate) host_session_id: Option<String>,
pub(crate) host_run_id: Option<String>,
pub(crate) host_task_id: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) struct MemoryEvidenceEnvelope {
pub(crate) scope: String,
#[serde(rename = "type")]
pub(crate) entry_type: String,
pub(crate) source_kind: String,
pub(crate) summary: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) source_ref: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) host_reference: Option<MemoryEvidenceHostReference>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) provider_reference: Option<MemoryEvidenceProviderReference>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) struct MemoryEvidenceHostReference {
pub(crate) host: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) hook: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) session_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) run_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) task_id: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub(crate) struct MemoryEvidenceProviderReference {
pub(crate) provider: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) native_id: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub(crate) struct MemoryEvidenceView {
pub(crate) id: String,
pub(crate) scope: String,
#[serde(rename = "type")]
pub(crate) entry_type: String,
pub(crate) source_kind: String,
pub(crate) summary: String,
pub(crate) summary_digest: String,
pub(crate) observed_at_epoch_s: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) source_ref: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) host_reference: Option<MemoryEvidenceHostReference>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) provider_reference: Option<MemoryEvidenceProviderReference>,
pub(crate) extracted: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) extracted_candidate_id: Option<String>,
}
#[derive(Serialize)]
pub(crate) struct MemoryEvidenceSubmitReport {
command: &'static str,
ok: bool,
outcome: AppendWriteOutcome,
profile: String,
path: String,
#[serde(skip_serializing_if = "Option::is_none")]
project_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
locality_id: Option<String>,
native_state_path: String,
#[serde(skip_serializing_if = "Option::is_none")]
evidence: Option<MemoryEvidenceView>,
#[serde(skip_serializing_if = "Option::is_none")]
message: Option<String>,
}
#[derive(Serialize)]
pub(crate) struct MemoryCandidateExtractReport {
command: &'static str,
ok: bool,
path: String,
profile: String,
#[serde(skip_serializing_if = "Option::is_none")]
project_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
locality_id: Option<String>,
mode: &'static str,
native_state_path: String,
candidates: Vec<ExtractedMemoryCandidateView>,
}
#[derive(Debug, Clone, Serialize)]
pub(crate) struct ExtractedMemoryCandidateView {
pub(crate) evidence_id: String,
pub(crate) source_scope: String,
pub(crate) target_scope: String,
pub(crate) summary: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) generated_entry: Option<StructuredMemoryEntry>,
pub(crate) governance: governance::GovernanceDecisionView,
#[serde(skip_serializing_if = "Option::is_none")]
pub(crate) staged_write: Option<queue::StagedMemoryOpView>,
pub(crate) note: String,
#[serde(skip)]
pending_automation_update: Option<PendingAutomationUpdate>,
#[serde(skip)]
extracted_candidate_id: Option<String>,
}
#[derive(Debug, Clone)]
enum PendingAutomationUpdate {
Repo {
locality_id: String,
dedupe_key: String,
record: runtime_state::RuntimeMemoryAutomationRecord,
},
Profile {
dedupe_key: String,
record: runtime_state::RuntimeMemoryAutomationRecord,
},
}
#[derive(Debug, Clone)]
struct ProjectAutomationAssessment {
dedupe_key: String,
record: runtime_state::RuntimeMemoryAutomationRecord,
}
#[derive(Debug, Clone)]
struct NarrowerScopeContradiction {
scope_label: &'static str,
entry_id: String,
entry_type: String,
locality_id: String,
branch_name: Option<String>,
}
impl NarrowerScopeContradiction {
fn described_scope(&self) -> String {
match self.branch_name.as_deref() {
Some(branch_name) => format!(
"{} in locality `{}` on branch `{}`",
self.scope_label, self.locality_id, branch_name
),
None => format!("{} in locality `{}`", self.scope_label, self.locality_id),
}
}
}
#[derive(Debug, Clone)]
struct ProfileAutomationAssessment {
dedupe_key: String,
record: runtime_state::RuntimeMemoryAutomationRecord,
narrower_scope_contradiction: Option<NarrowerScopeContradiction>,
}
impl CommandReport for MemoryEvidenceSubmitReport {
fn exit_code(&self) -> ExitCode {
if self.ok {
ExitCode::SUCCESS
} else {
ExitCode::from(1)
}
}
fn render_text(&self) {
if !self.ok {
println!(
"{}",
self.message
.as_deref()
.unwrap_or("Memory evidence submission was rejected.")
);
return;
}
if self.outcome == AppendWriteOutcome::IdempotentNoop {
println!("Memory evidence submission was treated as an idempotent noop.");
return;
}
let evidence = self.evidence.as_ref().expect("submit report evidence");
println!(
"Recorded {} memory evidence {} at {}.",
evidence.scope, evidence.id, self.native_state_path
);
println!("Entry type: {}", evidence.entry_type);
println!("Summary: {}", evidence.summary);
}
}
impl CommandReport for MemoryCandidateExtractReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
if self.candidates.is_empty() {
println!("No pending memory evidence was eligible for extraction.");
return;
}
println!(
"Processed {} pending memory evidence entr{}.",
self.candidates.len(),
if self.candidates.len() == 1 {
"y"
} else {
"ies"
}
);
for candidate in &self.candidates {
println!(
"- {} -> {} ({})",
candidate.evidence_id,
candidate.target_scope,
candidate.governance.action.as_str()
);
}
}
}
pub(crate) fn submit(
path: &Path,
explicit_profile: Option<&str>,
envelope: MemoryEvidenceEnvelope,
protected_write: ExclusiveWriteOptions,
runtime_hints: &SubmitRuntimeHints,
) -> Result<MemoryEvidenceSubmitReport> {
let repo_root = resolve_project_root(path)?;
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(&repo_root, profile.clone())?;
ensure_profile_exists(&layout, &repo_root)?;
if let Some(conflict) = protected_write::authorize_append_surface_write(
&layout,
"memory_evidence",
&protected_write,
AppendWriteAuthority::OwnerOnly,
)? {
return Ok(MemoryEvidenceSubmitReport {
command: "memory-evidence-submit",
ok: false,
outcome: conflict.outcome,
profile: profile.to_string(),
path: repo_root.display().to_string(),
project_id: None,
locality_id: None,
native_state_path: layout.state_db_path().display().to_string(),
evidence: None,
message: Some(conflict.message),
});
}
let marker = repo_marker::load(&repo_root)?;
let normalized = normalize_envelope(envelope, runtime_hints)?;
let observed_at_epoch_s = session::now_epoch_s()?;
let id = format!("mem_evidence_{}", Ulid::new());
let pods_root = layout.pods_root();
let db = db::StateDb::open(&layout.state_db_path(), Some(&pods_root))?;
let record = db::memory_evidence::MemoryEvidenceRecord {
id: id.clone(),
scope: normalized.scope.clone(),
entry_type: normalized.entry_type.clone(),
source_kind: normalized.source_kind.clone(),
summary: normalized.summary.clone(),
summary_digest: hex_digest(normalized.summary.as_bytes()),
observed_at_epoch_s,
actor_id: protected_write.actor_id.clone(),
session_id: protected_write.session_id.clone(),
source_ref: normalized.source_ref.clone(),
host: normalized
.host_reference
.as_ref()
.map(|reference| reference.host.clone()),
host_hook: normalized
.host_reference
.as_ref()
.and_then(|reference| reference.hook.clone()),
host_session_id: normalized
.host_reference
.as_ref()
.and_then(|reference| reference.session_id.clone()),
host_run_id: normalized
.host_reference
.as_ref()
.and_then(|reference| reference.run_id.clone()),
host_task_id: normalized
.host_reference
.as_ref()
.and_then(|reference| reference.task_id.clone()),
provider_name: normalized
.provider_reference
.as_ref()
.map(|reference| reference.provider.clone()),
provider_ref: normalized
.provider_reference
.as_ref()
.and_then(|reference| reference.native_id.clone()),
extracted: false,
extracted_candidate_id: None,
extracted_at_epoch_s: None,
};
db::memory_evidence::insert(db.conn(), &record)?;
Ok(MemoryEvidenceSubmitReport {
command: "memory-evidence-submit",
ok: true,
outcome: AppendWriteOutcome::Applied,
profile: profile.to_string(),
path: repo_root.display().to_string(),
project_id: marker.as_ref().map(|marker| marker.locality_id.clone()),
locality_id: marker.as_ref().map(|marker| marker.locality_id.clone()),
native_state_path: layout.state_db_path().display().to_string(),
evidence: Some(view_from_record(&record)),
message: None,
})
}
pub(crate) fn extract_candidates(
path: &Path,
explicit_profile: Option<&str>,
evidence_id: Option<&str>,
limit: usize,
write: bool,
protected_write: ExclusiveWriteOptions,
) -> Result<MemoryCandidateExtractReport> {
let repo_root = resolve_project_root(path)?;
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(&repo_root, profile.clone())?;
ensure_profile_exists(&layout, &repo_root)?;
let marker = repo_marker::load(&repo_root)?;
let pods_root = layout.pods_root();
let db = db::StateDb::open(&layout.state_db_path(), Some(&pods_root))?;
let records = if let Some(evidence_id) = evidence_id {
let Some(record) = db::memory_evidence::get_by_id(db.conn(), evidence_id)? else {
bail!("memory evidence `{evidence_id}` was not found");
};
if record.extracted {
Vec::new()
} else {
vec![record]
}
} else {
db::memory_evidence::list_pending(db.conn(), limit)?
};
let autonomous = queue::queue_required_for_autonomous(&layout)?;
let session_id = session::load_for_layout(&layout)?.and_then(|state| state.session_id.clone());
let mut candidates = Vec::new();
for record in records {
let candidate = extract_one(
&repo_root,
&layout,
marker.as_ref(),
&record,
autonomous,
session_id.as_deref(),
write,
&protected_write,
)?;
if write {
if let Some(update) = candidate.pending_automation_update.as_ref() {
match update {
PendingAutomationUpdate::Repo {
locality_id,
dedupe_key,
record,
} => runtime_state::persist_repo_memory_automation_record(
&layout,
locality_id,
dedupe_key,
record.clone(),
)?,
PendingAutomationUpdate::Profile { dedupe_key, record } => {
runtime_state::persist_profile_memory_automation_record(
&layout,
dedupe_key,
record.clone(),
)?
}
}
}
}
if write {
let Some(entry_id) = candidate.extracted_candidate_id.as_deref() else {
candidates.push(candidate);
continue;
};
db::memory_evidence::mark_extracted(
db.conn(),
&candidate.evidence_id,
entry_id,
session::now_epoch_s()?,
)?;
}
candidates.push(candidate);
}
Ok(MemoryCandidateExtractReport {
command: "memory-candidate-extract",
ok: true,
path: repo_root.display().to_string(),
profile: profile.to_string(),
project_id: marker.as_ref().map(|marker| marker.locality_id.clone()),
locality_id: marker.as_ref().map(|marker| marker.locality_id.clone()),
mode: if write { "write" } else { "preview" },
native_state_path: layout.state_db_path().display().to_string(),
candidates,
})
}
#[allow(clippy::too_many_arguments)]
fn extract_one(
repo_root: &Path,
layout: &StateLayout,
marker: Option<&repo_marker::RepoMarker>,
record: &db::memory_evidence::MemoryEvidenceRecord,
autonomous: bool,
session_id: Option<&str>,
write: bool,
protected_write: &ExclusiveWriteOptions,
) -> Result<ExtractedMemoryCandidateView> {
let Some(extraction_target) =
extraction_target(record.scope.as_str(), repo_root, layout, marker)?
else {
let governance = broader_scope_staged_decision(record);
return Ok(ExtractedMemoryCandidateView {
evidence_id: record.id.clone(),
source_scope: record.scope.clone(),
target_scope: record.scope.clone(),
summary: record.summary.clone(),
generated_entry: None,
governance,
staged_write: None,
note: "Broader-scope evidence remains staged in this phase.".to_owned(),
pending_automation_update: None,
extracted_candidate_id: None,
});
};
let generated_entry = StructuredMemoryEntry {
id: format!("mem_{}", Ulid::new()),
entry_type: record.entry_type.clone(),
state: "active".to_owned(),
created_at: timestamps::current_utc_rfc3339()?,
last_touched_session: current_session_start_count(layout)?,
origin: "agent".to_owned(),
superseded_at: None,
decay_class: None,
expires_at: None,
tags: Vec::new(),
source_ref: Some(
record
.source_ref
.clone()
.unwrap_or_else(|| format!("memory-evidence://{}", record.id)),
),
supersedes: Vec::new(),
content: record.summary.clone(),
};
let project_automation = if extraction_target.queue_scope == "project_memory" {
Some(project_automation_assessment(
layout,
marker,
record,
&generated_entry,
session_id,
)?)
} else {
None
};
let profile_automation = if extraction_target.queue_scope == "profile_memory" {
Some(profile_automation_assessment(
repo_root,
layout,
marker,
record,
&generated_entry,
session_id,
)?)
} else {
None
};
let governance = evidence_governance_decision(
layout,
record.scope.as_str(),
&generated_entry,
&extraction_target,
autonomous,
session_id,
project_automation.as_ref(),
profile_automation.as_ref(),
)?;
let staged_write = if write && governance.status == governance::GovernanceStatus::Allowed {
let entry_block = render_entry_block(&generated_entry);
let next_contents = append_block_to_contents(&extraction_target.contents, &entry_block);
let plan = queue::MemoryOpPlan {
target: queue::MemoryFileMutation {
scope: extraction_target.queue_scope.to_owned(),
path: extraction_target.path.display().to_string(),
next_contents,
},
source: None,
refresh_locality_id: marker.map(|marker| marker.locality_id.clone()),
authored_entry_ids: vec![generated_entry.id.clone()],
governance: Some(governance.clone()),
};
let request_fingerprint = queue::fingerprint(&serde_json::json!({
"command": "memory-candidate-extract",
"evidence_id": record.id,
"target_scope": extraction_target.queue_scope,
}))?;
let op_id = queue::stable_op_id("candidate_extract", &request_fingerprint, None);
let staged_write = if autonomous {
queue::stage_and_reconcile(
repo_root,
layout,
"memory-candidate-extract",
&op_id,
&request_fingerprint,
protected_write,
&plan,
)?
} else {
let outcome = queue::apply_direct_plan(repo_root, layout, &plan)?;
queue::StagedMemoryOpView {
op_id,
outcome,
native_state_path: layout.state_db_path().display().to_string(),
target_scope: plan.target.scope,
target_path: plan.target.path,
source_path: None,
authored_entry_ids: plan.authored_entry_ids,
message: None,
}
};
Some(staged_write)
} else {
None
};
let finalize_confirmation = write
&& governance.status != governance::GovernanceStatus::RateLimited
&& staged_write.as_ref().is_none_or(|view| {
!matches!(
view.outcome,
AppendWriteOutcome::OwnershipConflict
| AppendWriteOutcome::StaleSession
| AppendWriteOutcome::UnsupportedMultiwriter
| AppendWriteOutcome::DuplicateIdConflict
)
});
let pending_automation_update = if finalize_confirmation {
if let Some(assessment) = project_automation.as_ref() {
Some(PendingAutomationUpdate::Repo {
locality_id: marker
.expect("project automation requires linked project")
.locality_id
.clone(),
dedupe_key: assessment.dedupe_key.clone(),
record: assessment.record.clone(),
})
} else {
profile_automation
.as_ref()
.map(|assessment| PendingAutomationUpdate::Profile {
dedupe_key: assessment.dedupe_key.clone(),
record: assessment.record.clone(),
})
}
} else {
None
};
let extracted_candidate_id = finalize_confirmation.then(|| generated_entry.id.clone());
let note = if !write {
"Preview only. No files were changed.".to_owned()
} else if let Some(staged_write) = staged_write.as_ref() {
match staged_write.outcome {
AppendWriteOutcome::Applied => {
if extraction_target.queue_scope == "project_memory" {
"Applied project-scoped evidence capture through the governed memory queue path."
.to_owned()
} else if extraction_target.queue_scope == "profile_memory" {
"Applied profile-scoped evidence capture through the governed memory queue path."
.to_owned()
} else {
"Applied bounded evidence capture through the governed memory queue path."
.to_owned()
}
}
AppendWriteOutcome::IdempotentNoop => {
"Evidence extraction was already applied; treated as an idempotent noop.".to_owned()
}
AppendWriteOutcome::OwnershipConflict
| AppendWriteOutcome::StaleSession
| AppendWriteOutcome::UnsupportedMultiwriter
| AppendWriteOutcome::DuplicateIdConflict => {
staged_write.message.clone().unwrap_or_else(|| {
"Write was rejected before evidence could be admitted.".to_owned()
})
}
}
} else if let Some(assessment) = project_automation.as_ref() {
project_scope_note(&governance, assessment)
} else if let Some(assessment) = profile_automation.as_ref() {
profile_scope_note(&governance, assessment)
} else if governance.status != governance::GovernanceStatus::Allowed {
format!(
"Write was not applied. Governance outcome: {} ({})",
governance.action.as_str(),
governance.blocked_write_message()
)
} else {
"Write was not applied.".to_owned()
};
Ok(ExtractedMemoryCandidateView {
evidence_id: record.id.clone(),
source_scope: record.scope.clone(),
target_scope: extraction_target.report_scope.to_owned(),
summary: record.summary.clone(),
generated_entry: Some(generated_entry),
governance,
staged_write,
note,
pending_automation_update,
extracted_candidate_id,
})
}
struct ExtractionTarget {
path: PathBuf,
contents: String,
queue_scope: &'static str,
report_scope: &'static str,
destination_entries: Vec<runtime_state::RuntimeMemoryEntry>,
repo_overlap_entries: Option<Vec<runtime_state::RuntimeMemoryEntry>>,
}
fn extraction_target(
evidence_scope: &str,
repo_root: &Path,
layout: &StateLayout,
marker: Option<&repo_marker::RepoMarker>,
) -> Result<Option<ExtractionTarget>> {
match evidence_scope {
"workspace" => {
let marker = marker.ok_or_else(|| {
anyhow::anyhow!(
"workspace memory evidence requires a linked project; run `ccd attach --path {}` first",
repo_root.display()
)
})?;
ensure_repo_registry_exists(layout, repo_root, &marker.locality_id)?;
let surface = runtime_state::load_clone_memory_surface(layout)?;
Ok(Some(ExtractionTarget {
path: surface.source.path,
contents: surface.source.content,
queue_scope: "workspace_memory",
report_scope: "workspace_memory",
destination_entries: surface.entries,
repo_overlap_entries: None,
}))
}
"work_stream" => {
let marker = marker.ok_or_else(|| {
anyhow::anyhow!(
"work-stream memory evidence requires a linked project; run `ccd attach --path {}` first",
repo_root.display()
)
})?;
ensure_repo_registry_exists(layout, repo_root, &marker.locality_id)?;
ensure_repo_overlay_exists(layout, repo_root, &marker.locality_id)?;
if runtime_state::resolve_active_branch_memory_path(
repo_root,
layout,
&marker.locality_id,
)?
.is_none()
{
return Ok(None);
}
let branch_memory =
runtime_state::load_branch_memory_surface(repo_root, layout, &marker.locality_id)?;
let repo_memory =
runtime_state::load_locality_memory_surface(layout, &marker.locality_id)?;
Ok(Some(ExtractionTarget {
path: branch_memory.source.path,
contents: branch_memory.source.content,
queue_scope: "work_stream_memory",
report_scope: "work_stream_memory",
destination_entries: branch_memory.entries,
repo_overlap_entries: Some(repo_memory.entries),
}))
}
"project" => {
let marker = marker.ok_or_else(|| {
anyhow::anyhow!(
"project memory evidence requires a linked project; run `ccd attach --path {}` first",
repo_root.display()
)
})?;
ensure_repo_registry_exists(layout, repo_root, &marker.locality_id)?;
ensure_repo_overlay_exists(layout, repo_root, &marker.locality_id)?;
let memory_path = layout.locality_memory_path(&marker.locality_id)?;
let surface =
match runtime_state::load_locality_memory_surface(layout, &marker.locality_id) {
Ok(surface) => surface,
Err(_error) if !memory_path.exists() => {
runtime_state::LoadedRuntimeMemorySurface {
source: runtime_state::RuntimeTextSurface::missing(
"locality_memory",
&memory_path,
),
entries: Vec::new(),
}
}
Err(error) => return Err(error),
};
Ok(Some(ExtractionTarget {
path: memory_path,
contents: surface.source.content,
queue_scope: "project_memory",
report_scope: "project_memory",
destination_entries: surface.entries,
repo_overlap_entries: None,
}))
}
"profile" => {
let marker = marker.ok_or_else(|| {
anyhow::anyhow!(
"profile memory evidence requires a linked project; run `ccd attach --path {}` first",
repo_root.display()
)
})?;
ensure_repo_registry_exists(layout, repo_root, &marker.locality_id)?;
ensure_repo_overlay_exists(layout, repo_root, &marker.locality_id)?;
let memory_path = layout.profile_memory_path();
let surface = match runtime_state::load_profile_memory_surface(layout) {
Ok(surface) => surface,
Err(_error) if !memory_path.exists() => runtime_state::LoadedRuntimeMemorySurface {
source: runtime_state::RuntimeTextSurface::missing(
"profile_memory",
&memory_path,
),
entries: Vec::new(),
},
Err(error) => return Err(error),
};
Ok(Some(ExtractionTarget {
path: memory_path,
contents: surface.source.content,
queue_scope: "profile_memory",
report_scope: "profile_memory",
destination_entries: surface.entries,
repo_overlap_entries: None,
}))
}
_ => Ok(None),
}
}
fn broader_scope_staged_decision(
record: &db::memory_evidence::MemoryEvidenceRecord,
) -> governance::GovernanceDecisionView {
let action = match record.scope.as_str() {
"project" => governance::GovernanceAction::StageProjectPromotion,
"profile" => governance::GovernanceAction::StageProfilePromotion,
"project_truth" => governance::GovernanceAction::DraftProjectTruth,
"work_stream" => governance::GovernanceAction::Escalate,
_ => governance::GovernanceAction::Escalate,
};
governance::GovernanceDecisionView::new(governance::GovernanceDecisionInit {
action,
status: governance::GovernanceStatus::Escalated,
target_scope: record.scope.clone(),
entry_type: record.entry_type.clone(),
source_scope: Some("memory_evidence".to_owned()),
rationale: if record.scope == "work_stream" {
"work-stream durable apply requires an active non-trunk work stream before evidence can be admitted".to_owned()
} else {
"broader scopes remain staged during bounded evidence capture".to_owned()
},
evidence_summary: vec![
format!(
"evidence summary digest `{}` was recorded",
record.summary_digest
),
format!("scope `{}` is not auto-applied in this phase", record.scope),
],
pressure_signals: vec![governance::signal(
"broader_scope_staged",
format!("{} evidence remains staged", record.scope),
)],
duplicate_digest: None,
rate_limit: None,
})
}
#[allow(clippy::too_many_arguments)]
fn evidence_governance_decision(
layout: &StateLayout,
source_scope: &str,
generated_entry: &StructuredMemoryEntry,
target: &ExtractionTarget,
autonomous: bool,
session_id: Option<&str>,
project_automation: Option<&ProjectAutomationAssessment>,
profile_automation: Option<&ProfileAutomationAssessment>,
) -> Result<governance::GovernanceDecisionView> {
if let Some(duplicate) =
governance::find_runtime_duplicate(&target.destination_entries, generated_entry)
{
return Ok(governance::GovernanceDecisionView::new(
governance::GovernanceDecisionInit {
action: governance::GovernanceAction::Discard,
status: governance::GovernanceStatus::Discarded,
target_scope: target.report_scope.to_owned(),
entry_type: generated_entry.entry_type.clone(),
source_scope: Some(source_scope.to_owned()),
rationale: format!(
"a normalized duplicate already exists in {} as `{}`",
target.report_scope, duplicate.entry_id
),
evidence_summary: vec![
"duplicate suppression applies before bounded admission".to_owned()
],
pressure_signals: vec![governance::signal(
"duplicate_match",
format!(
"{} already contains normalized duplicate `{}`",
target.report_scope, duplicate.entry_id
),
)],
duplicate_digest: Some(duplicate.digest),
rate_limit: None,
},
));
}
if target.queue_scope == "project_memory" {
let assessment = project_automation.expect("project memory requires automation state");
return project_evidence_governance_decision(
layout,
source_scope,
generated_entry,
autonomous,
session_id,
assessment,
);
}
if target.queue_scope == "profile_memory" {
let assessment = profile_automation.expect("profile memory requires automation state");
return profile_evidence_governance_decision(
layout,
source_scope,
generated_entry,
autonomous,
session_id,
assessment,
);
}
if target.queue_scope == "work_stream_memory" {
if let Some(repo_entries) = target.repo_overlap_entries.as_ref() {
if let Some(duplicate) =
governance::find_runtime_duplicate(repo_entries, generated_entry)
{
return Ok(governance::GovernanceDecisionView::new(
governance::GovernanceDecisionInit {
action: governance::GovernanceAction::Discard,
status: governance::GovernanceStatus::Discarded,
target_scope: target.report_scope.to_owned(),
entry_type: generated_entry.entry_type.clone(),
source_scope: Some(source_scope.to_owned()),
rationale: format!(
"work-stream capture overlaps existing project memory entry `{}`",
duplicate.entry_id
),
evidence_summary: vec![
"work-stream capture stays local when project memory already contains the lesson"
.to_owned(),
],
pressure_signals: vec![governance::signal(
"broader_scope_overlap",
format!(
"project_memory already carries normalized duplicate `{}`",
duplicate.entry_id
),
)],
duplicate_digest: Some(duplicate.digest),
rate_limit: None,
},
));
}
}
}
let action = if target.queue_scope == "workspace_memory" {
governance::GovernanceAction::AdmitWorkspace
} else {
governance::GovernanceAction::AdmitWorkStream
};
let rate_limit = if autonomous {
governance::session_rate_limit(layout, session_id, target.queue_scope, None)?
} else {
None
};
let mut pressure_signals = Vec::new();
if let Some(rate_limit) = &rate_limit {
if rate_limit.remaining == 0 {
pressure_signals.push(governance::signal(
"session_cap_reached",
format!(
"{} already has {} queued admissions or promotions in this session",
rate_limit.scope, rate_limit.count
),
));
}
}
let status = if rate_limit
.as_ref()
.is_some_and(|limit| limit.status == "reached")
{
governance::GovernanceStatus::RateLimited
} else {
governance::GovernanceStatus::Allowed
};
Ok(governance::GovernanceDecisionView::new(
governance::GovernanceDecisionInit {
action,
status,
target_scope: target.report_scope.to_owned(),
entry_type: generated_entry.entry_type.clone(),
source_scope: Some(source_scope.to_owned()),
rationale: if action == governance::GovernanceAction::AdmitWorkspace {
"workspace admission is allowed for bounded evidence summaries".to_owned()
} else {
"work-stream admission is allowed for bounded evidence summaries on an active non-trunk work stream".to_owned()
},
evidence_summary: vec![
format!("entry type `{}` is admit-ready", generated_entry.entry_type),
format!("evidence source scope `{source_scope}` stays bounded"),
format!("destination scope `{}` remains narrow", target.report_scope),
],
pressure_signals,
duplicate_digest: Some(governance::normalized_duplicate_digest(generated_entry)),
rate_limit,
},
))
}
fn project_automation_assessment(
layout: &StateLayout,
marker: Option<&repo_marker::RepoMarker>,
record: &db::memory_evidence::MemoryEvidenceRecord,
generated_entry: &StructuredMemoryEntry,
session_id: Option<&str>,
) -> Result<ProjectAutomationAssessment> {
let marker = marker
.ok_or_else(|| anyhow::anyhow!("project memory evidence requires a linked project"))?;
let dedupe_key = governance::normalized_duplicate_digest(generated_entry);
let current = runtime_state::load_repo_memory_automation_record(
layout,
&marker.locality_id,
&dedupe_key,
)?
.unwrap_or_default();
Ok(ProjectAutomationAssessment {
dedupe_key,
record: current.observe(
&generated_entry.entry_type,
record.observed_at_epoch_s,
session_id,
Some(&marker.locality_id),
is_deterministic_structural_signal(record.source_kind.as_str()),
),
})
}
fn profile_automation_assessment(
repo_root: &Path,
layout: &StateLayout,
marker: Option<&repo_marker::RepoMarker>,
record: &db::memory_evidence::MemoryEvidenceRecord,
generated_entry: &StructuredMemoryEntry,
session_id: Option<&str>,
) -> Result<ProfileAutomationAssessment> {
let marker = marker
.ok_or_else(|| anyhow::anyhow!("profile memory evidence requires a linked project"))?;
let dedupe_key = governance::normalized_duplicate_digest(generated_entry);
let current = runtime_state::load_profile_memory_automation_record(layout, &dedupe_key)?
.unwrap_or_default();
let mut next_record = current.observe(
&generated_entry.entry_type,
record.observed_at_epoch_s,
session_id,
Some(&marker.locality_id),
is_deterministic_structural_signal(record.source_kind.as_str()),
);
let narrower_scope_contradiction = find_active_narrower_scope_contradiction(
repo_root,
layout,
marker,
&next_record,
generated_entry,
)?;
if narrower_scope_contradiction.is_some() {
next_record.contradiction_count = next_record.contradiction_count.saturating_add(1);
}
Ok(ProfileAutomationAssessment {
dedupe_key,
record: next_record,
narrower_scope_contradiction,
})
}
fn project_evidence_governance_decision(
layout: &StateLayout,
source_scope: &str,
generated_entry: &StructuredMemoryEntry,
autonomous: bool,
session_id: Option<&str>,
assessment: &ProjectAutomationAssessment,
) -> Result<governance::GovernanceDecisionView> {
let eligible = project_auto_apply_eligible(generated_entry.entry_type.as_str());
let ready = project_auto_apply_ready(assessment, eligible);
let session_count = assessment.record.observed_session_count();
let observation_count = assessment.record.observation_count;
let has_structural_signal = assessment.record.has_structural_signal;
let rate_limit = if ready && autonomous {
governance::session_rate_limit(layout, session_id, "project_memory", None)?
} else {
None
};
let mut pressure_signals = Vec::new();
if !eligible {
pressure_signals.push(governance::signal(
"entry_type_staged",
format!(
"project automation keeps `{}` staged above work_stream",
generated_entry.entry_type
),
));
}
if eligible && !ready && session_count == 0 {
pressure_signals.push(governance::signal(
"project_confirmation_pending",
"project automation needs a confirming same-project session before automatic apply is eligible",
));
} else if eligible && !ready && session_count < 2 && !has_structural_signal {
pressure_signals.push(governance::signal(
"project_confirmation_pending",
format!(
"project automation has {} confirming session{}; automatic apply stays staged until either a second distinct same-project session is recorded or the single-session structural gate is satisfied",
session_count,
if session_count == 1 { "" } else { "s" }
),
));
} else if eligible && !ready && observation_count < 2 {
pressure_signals.push(governance::signal(
"project_observation_pending",
format!(
"project automation has a deterministic structural signal but only {} observation{}; one more confirming observation is needed",
observation_count,
if observation_count == 1 { "" } else { "s" }
),
));
}
if let Some(rate_limit) = &rate_limit {
if rate_limit.remaining == 0 {
pressure_signals.push(governance::signal(
"session_cap_reached",
format!(
"{} already has {} queued admissions or promotions in this session",
rate_limit.scope, rate_limit.count
),
));
}
}
let status = if !ready {
governance::GovernanceStatus::Escalated
} else if rate_limit
.as_ref()
.is_some_and(|limit| limit.status == "reached")
{
governance::GovernanceStatus::RateLimited
} else {
governance::GovernanceStatus::Allowed
};
Ok(governance::GovernanceDecisionView::new(
governance::GovernanceDecisionInit {
action: governance::GovernanceAction::StageProjectPromotion,
status,
target_scope: "project_memory".to_owned(),
entry_type: generated_entry.entry_type.clone(),
source_scope: Some(source_scope.to_owned()),
rationale: if !eligible {
format!(
"project automation only auto-applies rule and constraint entries; `{}` remains staged",
generated_entry.entry_type
)
} else if !ready {
"project automation needs either two distinct same-project sessions or one confirmation plus one deterministic structural signal".to_owned()
} else {
"project automation confirmation gate was satisfied; CCD can apply the lesson through the governed project-memory queue".to_owned()
},
evidence_summary: vec![
format!("entry type `{}` was observed", generated_entry.entry_type),
format!(
"repo-local confirmation ledger has {} observation{} across {} session{}",
assessment.record.observation_count,
if assessment.record.observation_count == 1 {
""
} else {
"s"
},
assessment.record.observed_session_count(),
if assessment.record.observed_session_count() == 1 {
""
} else {
"s"
}
),
format!(
"deterministic structural signal observed: {}",
if assessment.record.has_structural_signal {
"yes"
} else {
"no"
}
),
],
pressure_signals,
duplicate_digest: Some(assessment.dedupe_key.clone()),
rate_limit,
},
))
}
fn project_auto_apply_eligible(entry_type: &str) -> bool {
matches!(entry_type, "rule" | "constraint")
}
fn project_auto_apply_ready(assessment: &ProjectAutomationAssessment, eligible: bool) -> bool {
eligible
&& (assessment.record.observed_session_count() >= 2
|| (assessment.record.observed_session_count() >= 1
&& assessment.record.has_structural_signal
&& assessment.record.observation_count >= 2))
}
fn is_deterministic_structural_signal(source_kind: &str) -> bool {
matches!(
source_kind,
"event_stream" | "hook_output" | "log" | "document"
)
}
fn project_scope_note(
governance: &governance::GovernanceDecisionView,
assessment: &ProjectAutomationAssessment,
) -> String {
match governance.status {
governance::GovernanceStatus::Escalated
if !project_auto_apply_eligible(governance.entry_type.as_str()) =>
{
format!(
"Project confirmation was recorded in the repo-local automation ledger, but `{}` entries remain staged above work_stream.",
governance.entry_type
)
}
governance::GovernanceStatus::Escalated => format!(
"Project confirmation was recorded in the repo-local automation ledger; {} observation{} across {} session{} are available, so the write stays staged pending the project gate.",
assessment.record.observation_count,
if assessment.record.observation_count == 1 {
""
} else {
"s"
},
assessment.record.observed_session_count(),
if assessment.record.observed_session_count() == 1 {
""
} else {
"s"
}
),
governance::GovernanceStatus::Discarded => {
"Project memory already carries the normalized lesson; the evidence row was consumed without another write."
.to_owned()
}
governance::GovernanceStatus::RateLimited => format!(
"Write was not applied. Governance outcome: {} ({})",
governance.action.as_str(),
governance.blocked_write_message()
),
governance::GovernanceStatus::Allowed => {
"Project confirmation gate passed.".to_owned()
}
}
}
fn profile_evidence_governance_decision(
layout: &StateLayout,
source_scope: &str,
generated_entry: &StructuredMemoryEntry,
autonomous: bool,
session_id: Option<&str>,
assessment: &ProfileAutomationAssessment,
) -> Result<governance::GovernanceDecisionView> {
let eligible = profile_auto_apply_eligible(generated_entry.entry_type.as_str());
let ready = profile_auto_apply_ready(assessment, eligible);
let project_count = assessment.record.observed_locality_count();
let session_count = assessment.record.observed_session_count();
let rate_limit = if ready && autonomous {
governance::session_rate_limit(layout, session_id, "profile_memory", None)?
} else {
None
};
let mut pressure_signals = Vec::new();
if !eligible {
pressure_signals.push(governance::signal(
"entry_type_staged",
format!(
"profile automation only auto-applies rule and constraint entries; `{}` remains staged",
generated_entry.entry_type
),
));
}
if eligible {
if let Some(contradiction) = assessment.narrower_scope_contradiction.as_ref() {
pressure_signals.push(governance::signal(
"narrower_scope_contradiction",
format!(
"{} still carries active `{}` entry `{}` for the same lesson",
contradiction.described_scope(),
contradiction.entry_type,
contradiction.entry_id
),
));
} else if project_count < 2 {
pressure_signals.push(governance::signal(
"cross_project_confirmation_pending",
format!(
"profile automation has confirmation from {} distinct project{}; automatic apply requires corroboration from another project",
project_count,
if project_count == 1 { "" } else { "s" }
),
));
} else if session_count < 3 {
pressure_signals.push(governance::signal(
"profile_session_confirmation_pending",
format!(
"profile automation has corroboration from {} distinct project{} but only {} total session{}; automatic apply requires at least three sessions overall",
project_count,
if project_count == 1 { "" } else { "s" },
session_count,
if session_count == 1 { "" } else { "s" }
),
));
}
}
if let Some(rate_limit) = &rate_limit {
if rate_limit.remaining == 0 {
pressure_signals.push(governance::signal(
"session_cap_reached",
format!(
"{} already has {} queued admissions or promotions in this session",
rate_limit.scope, rate_limit.count
),
));
}
}
let status = if !ready {
governance::GovernanceStatus::Escalated
} else if rate_limit
.as_ref()
.is_some_and(|limit| limit.status == "reached")
{
governance::GovernanceStatus::RateLimited
} else {
governance::GovernanceStatus::Allowed
};
Ok(governance::GovernanceDecisionView::new(
governance::GovernanceDecisionInit {
action: governance::GovernanceAction::StageProfilePromotion,
status,
target_scope: "profile_memory".to_owned(),
entry_type: generated_entry.entry_type.clone(),
source_scope: Some(source_scope.to_owned()),
rationale: if !eligible {
format!(
"profile automation only auto-applies rule and constraint entries; `{}` remains staged",
generated_entry.entry_type
)
} else if let Some(contradiction) = assessment.narrower_scope_contradiction.as_ref() {
format!(
"profile automation stays staged while {} contains active contradictory entry `{}`",
contradiction.described_scope(),
contradiction.entry_id
)
} else if !ready {
"profile automation needs corroboration from at least two distinct projects and three total sessions before CCD will apply it at profile scope".to_owned()
} else {
"profile automation cross-project gate was satisfied; CCD can apply the lesson through the governed profile-memory queue".to_owned()
},
evidence_summary: vec![
format!("entry type `{}` was observed", generated_entry.entry_type),
format!(
"profile automation ledger has {} observation{} across {} session{}",
assessment.record.observation_count,
if assessment.record.observation_count == 1 {
""
} else {
"s"
},
session_count,
if session_count == 1 { "" } else { "s" }
),
format!(
"cross-project confirmation covers {} distinct project{}",
project_count,
if project_count == 1 { "" } else { "s" }
),
format!(
"active narrower-scope contradiction present: {}",
if assessment.narrower_scope_contradiction.is_some() {
"yes"
} else {
"no"
}
),
],
pressure_signals,
duplicate_digest: Some(assessment.dedupe_key.clone()),
rate_limit,
},
))
}
fn profile_auto_apply_eligible(entry_type: &str) -> bool {
matches!(entry_type, "rule" | "constraint")
}
fn profile_auto_apply_ready(assessment: &ProfileAutomationAssessment, eligible: bool) -> bool {
eligible
&& assessment.narrower_scope_contradiction.is_none()
&& assessment.record.observed_locality_count() >= 2
&& assessment.record.observed_session_count() >= 3
}
fn profile_scope_note(
governance: &governance::GovernanceDecisionView,
assessment: &ProfileAutomationAssessment,
) -> String {
match governance.status {
governance::GovernanceStatus::Escalated
if !profile_auto_apply_eligible(governance.entry_type.as_str()) =>
{
format!(
"Profile confirmation was recorded in the profile-local automation ledger, but `{}` entries remain staged above project scope.",
governance.entry_type
)
}
governance::GovernanceStatus::Escalated
if assessment.narrower_scope_contradiction.is_some() =>
{
let contradiction = assessment
.narrower_scope_contradiction
.as_ref()
.expect("contradiction already checked");
format!(
"Profile confirmation is still blocked because {} contains active `{}` entry `{}` for the same lesson.",
contradiction.described_scope(),
contradiction.entry_type,
contradiction.entry_id
)
}
governance::GovernanceStatus::Escalated => format!(
"Profile confirmation was recorded in the profile-local automation ledger; {} distinct project{} and {} session{} have corroborated this lesson so far, so the write stays staged pending the cross-project profile gate.",
assessment.record.observed_locality_count(),
if assessment.record.observed_locality_count() == 1 {
""
} else {
"s"
},
assessment.record.observed_session_count(),
if assessment.record.observed_session_count() == 1 {
""
} else {
"s"
}
),
governance::GovernanceStatus::Discarded => {
"Profile memory already carries the normalized lesson; the evidence row was consumed without another write."
.to_owned()
}
governance::GovernanceStatus::RateLimited => format!(
"Write was not applied. Governance outcome: {} ({})",
governance.action.as_str(),
governance.blocked_write_message()
),
governance::GovernanceStatus::Allowed => {
"Profile cross-project confirmation gate passed.".to_owned()
}
}
}
fn find_active_narrower_scope_contradiction(
repo_root: &Path,
layout: &StateLayout,
marker: &repo_marker::RepoMarker,
record: &runtime_state::RuntimeMemoryAutomationRecord,
generated_entry: &StructuredMemoryEntry,
) -> Result<Option<NarrowerScopeContradiction>> {
let mut locality_ids = BTreeSet::new();
locality_ids.insert(marker.locality_id.clone());
locality_ids.extend(record.locality_ids.iter().cloned());
for locality_id in locality_ids {
ensure_repo_registry_exists(layout, repo_root, &locality_id)?;
ensure_repo_overlay_exists(layout, repo_root, &locality_id)?;
let locality_entries = runtime_state::load_locality_memory_surface(layout, &locality_id)?;
if let Some(contradiction) = contradiction_in_entries(
"project memory",
&locality_entries.entries,
generated_entry,
&locality_id,
None,
) {
return Ok(Some(contradiction));
}
for clone_root in contradiction_clone_roots(repo_root, layout, marker, &locality_id)? {
if !clone_root.is_dir() {
continue;
}
if runtime_state::resolve_active_branch_memory_path(&clone_root, layout, &locality_id)?
.is_none()
{
continue;
}
let branch_name = handoff::read_git_state(&clone_root, BranchMode::AllowDetachedHead)
.ok()
.map(|git| git.branch);
let branch_entries =
runtime_state::load_branch_memory_surface(&clone_root, layout, &locality_id)?
.entries;
if let Some(contradiction) = contradiction_in_entries(
"work-stream memory",
&branch_entries,
generated_entry,
&locality_id,
branch_name.as_deref(),
) {
return Ok(Some(contradiction));
}
}
}
Ok(None)
}
fn contradiction_clone_roots(
repo_root: &Path,
layout: &StateLayout,
marker: &repo_marker::RepoMarker,
locality_id: &str,
) -> Result<Vec<PathBuf>> {
let mut clone_roots = BTreeSet::new();
if locality_id == marker.locality_id {
clone_roots.insert(repo_registry::normalize_clone_root(repo_root)?);
}
if let Some(registry_entry) = repo_registry::load(&layout.repo_metadata_path(locality_id)?)? {
clone_roots.extend(registry_entry.known_clone_roots);
}
Ok(clone_roots.into_iter().map(PathBuf::from).collect())
}
fn contradiction_in_entries(
scope_label: &'static str,
entries: &[runtime_state::RuntimeMemoryEntry],
generated_entry: &StructuredMemoryEntry,
locality_id: &str,
branch_name: Option<&str>,
) -> Option<NarrowerScopeContradiction> {
let selected_digest = normalized_content_digest(generated_entry);
entries.iter().find_map(|entry| {
let structured = entry.as_structured_entry()?;
if structured.id == generated_entry.id
|| structured.state != "active"
|| structured.entry_type == generated_entry.entry_type
{
return None;
}
if normalized_content_digest(&structured) == selected_digest {
Some(NarrowerScopeContradiction {
scope_label,
entry_id: structured.id,
entry_type: structured.entry_type,
locality_id: locality_id.to_owned(),
branch_name: branch_name.map(str::to_owned),
})
} else {
None
}
})
}
fn normalized_content_digest(entry: &StructuredMemoryEntry) -> String {
let key = governance::normalized_content_key(&entry.content);
hex_digest(key.as_bytes())
}
fn normalize_envelope(
envelope: MemoryEvidenceEnvelope,
runtime_hints: &SubmitRuntimeHints,
) -> Result<MemoryEvidenceEnvelope> {
let scope = normalize_scope(&envelope.scope)?;
let entry_type = normalize_entry_type(&envelope.entry_type)?;
let source_kind = normalize_source_kind(&envelope.source_kind)?;
let summary = normalize_summary(&envelope.summary)?;
let source_ref = normalize_optional_text(envelope.source_ref, "source_ref", MAX_REF_CHARS)?;
let host_reference = normalize_host_reference(envelope.host_reference, runtime_hints)?;
let provider_reference = normalize_provider_reference(envelope.provider_reference)?;
Ok(MemoryEvidenceEnvelope {
scope,
entry_type,
source_kind,
summary,
source_ref,
host_reference,
provider_reference,
})
}
fn normalize_scope(raw: &str) -> Result<String> {
let normalized = match raw.trim() {
"workspace" | "clone" => "workspace",
"work_stream" | "work-stream" | "branch" => "work_stream",
"project" | "repo" => "project",
"profile" => "profile",
"project_truth" | "project-truth" => "project_truth",
other => bail!(
"invalid memory evidence scope `{other}`: expected workspace, work_stream, project, profile, or project_truth"
),
};
Ok(normalized.to_owned())
}
fn normalize_entry_type(raw: &str) -> Result<String> {
match raw.trim() {
"rule" | "constraint" | "heuristic" | "observation" | "attempt" => {
Ok(raw.trim().to_owned())
}
other => bail!(
"invalid memory evidence type `{other}`: expected rule, constraint, heuristic, observation, or attempt"
),
}
}
fn normalize_source_kind(raw: &str) -> Result<String> {
let normalized = match raw.trim() {
"transcript" | "session" | "event_stream" | "event-stream" | "hook_output"
| "hook-output" | "log" | "document" => raw.trim().replace('-', "_"),
other => bail!(
"invalid memory evidence source kind `{other}`: expected transcript, session, event_stream, hook_output, log, or document"
),
};
Ok(normalized)
}
fn normalize_summary(raw: &str) -> Result<String> {
let summary = raw.split_whitespace().collect::<Vec<_>>().join(" ");
if summary.is_empty() {
bail!("memory evidence summary cannot be empty");
}
if summary.chars().count() > MAX_SUMMARY_CHARS {
bail!(
"memory evidence summary exceeds {MAX_SUMMARY_CHARS} characters; submit a bounded summary instead of raw transcript content"
);
}
Ok(summary)
}
fn normalize_optional_text(
value: Option<String>,
field: &str,
max_chars: usize,
) -> Result<Option<String>> {
let Some(value) = value else {
return Ok(None);
};
let normalized = value.trim().to_owned();
if normalized.is_empty() {
return Ok(None);
}
if normalized.chars().count() > max_chars {
bail!("{field} exceeds {max_chars} characters");
}
Ok(Some(normalized))
}
fn normalize_host_reference(
host_reference: Option<MemoryEvidenceHostReference>,
runtime_hints: &SubmitRuntimeHints,
) -> Result<Option<MemoryEvidenceHostReference>> {
let Some(mut host_reference) = host_reference else {
if runtime_hints.host_session_id.is_none()
&& runtime_hints.host_run_id.is_none()
&& runtime_hints.host_task_id.is_none()
{
return Ok(None);
}
return Ok(Some(MemoryEvidenceHostReference {
host: "unknown".to_owned(),
hook: None,
session_id: runtime_hints.host_session_id.clone(),
run_id: runtime_hints.host_run_id.clone(),
task_id: runtime_hints.host_task_id.clone(),
}));
};
host_reference.host = normalize_optional_text(
Some(host_reference.host),
"host_reference.host",
MAX_PROVIDER_NAME_CHARS,
)?
.unwrap_or_else(|| "unknown".to_owned());
host_reference.hook = normalize_optional_text(
host_reference.hook,
"host_reference.hook",
MAX_PROVIDER_NAME_CHARS,
)?
.map(|hook| match hook.as_str() {
"on-session-start" => "on_session_start".to_owned(),
"before-prompt-build" => "before_prompt_build".to_owned(),
"on-compaction-notice" => "on_compaction_notice".to_owned(),
"on-agent-end" => "on_agent_end".to_owned(),
"on-session-end" => "on_session_end".to_owned(),
"supervisor-tick" => "supervisor_tick".to_owned(),
_ => hook,
});
host_reference.session_id = normalize_optional_text(
host_reference
.session_id
.or_else(|| runtime_hints.host_session_id.clone()),
"host_reference.session_id",
MAX_REF_CHARS,
)?;
host_reference.run_id = normalize_optional_text(
host_reference
.run_id
.or_else(|| runtime_hints.host_run_id.clone()),
"host_reference.run_id",
MAX_REF_CHARS,
)?;
host_reference.task_id = normalize_optional_text(
host_reference
.task_id
.or_else(|| runtime_hints.host_task_id.clone()),
"host_reference.task_id",
MAX_REF_CHARS,
)?;
Ok(Some(host_reference))
}
fn normalize_provider_reference(
provider_reference: Option<MemoryEvidenceProviderReference>,
) -> Result<Option<MemoryEvidenceProviderReference>> {
let Some(mut provider_reference) = provider_reference else {
return Ok(None);
};
provider_reference.provider = normalize_optional_text(
Some(provider_reference.provider),
"provider_reference.provider",
MAX_PROVIDER_NAME_CHARS,
)?
.unwrap_or_else(|| "unknown".to_owned());
provider_reference.native_id = normalize_optional_text(
provider_reference.native_id,
"provider_reference.native_id",
MAX_REF_CHARS,
)?;
Ok(Some(provider_reference))
}
fn view_from_record(record: &db::memory_evidence::MemoryEvidenceRecord) -> MemoryEvidenceView {
MemoryEvidenceView {
id: record.id.clone(),
scope: record.scope.clone(),
entry_type: record.entry_type.clone(),
source_kind: record.source_kind.clone(),
summary: record.summary.clone(),
summary_digest: record.summary_digest.clone(),
observed_at_epoch_s: record.observed_at_epoch_s,
source_ref: record.source_ref.clone(),
host_reference: if record.host.is_none()
&& record.host_hook.is_none()
&& record.host_session_id.is_none()
&& record.host_run_id.is_none()
&& record.host_task_id.is_none()
{
None
} else {
Some(MemoryEvidenceHostReference {
host: record.host.clone().unwrap_or_else(|| "unknown".to_owned()),
hook: record.host_hook.clone(),
session_id: record.host_session_id.clone(),
run_id: record.host_run_id.clone(),
task_id: record.host_task_id.clone(),
})
},
provider_reference: record.provider_name.clone().map(|provider| {
MemoryEvidenceProviderReference {
provider,
native_id: record.provider_ref.clone(),
}
}),
extracted: record.extracted,
extracted_candidate_id: record.extracted_candidate_id.clone(),
}
}
fn resolve_project_root(path: &Path) -> Result<PathBuf> {
if git_paths::is_git_work_tree(path) {
return resolve_git_repo_root(path);
}
for ancestor in path.ancestors() {
let Some(marker) = repo_marker::load(ancestor)? else {
continue;
};
if marker.substrate() == repo_marker::MarkerSubstrate::Directory {
return Ok(ancestor.to_path_buf());
}
}
bail!(
"`ccd memory evidence` requires --path to be inside a git checkout or an attached directory workspace; run `ccd attach --path {}` first for directory projects",
path.display()
);
}
fn resolve_git_repo_root(path: &Path) -> Result<PathBuf> {
let output = std::process::Command::new("git")
.args(["rev-parse", "--show-toplevel"])
.current_dir(path)
.output()
.with_context(|| {
format!(
"failed to run `git rev-parse --show-toplevel` in {}",
path.display()
)
})?;
if !output.status.success() {
bail!(
"`git rev-parse --show-toplevel` failed in {}: {}",
path.display(),
String::from_utf8_lossy(&output.stderr).trim()
);
}
PathBuf::from(String::from_utf8(output.stdout)?.trim())
.canonicalize()
.with_context(|| format!("failed to resolve repo root from {}", path.display()))
}
fn ensure_profile_exists(layout: &StateLayout, repo_root: &Path) -> Result<()> {
let profile_root = layout.profile_root();
if profile_root.is_dir() {
return Ok(());
}
bail!(
"profile `{}` does not exist at {}; bootstrap it with `ccd attach --path {}` before using memory evidence capture",
layout.profile(),
profile_root.display(),
repo_root.display()
)
}
fn ensure_repo_registry_exists(
layout: &StateLayout,
repo_root: &Path,
locality_id: &str,
) -> Result<()> {
let metadata_path = layout.repo_metadata_path(locality_id)?;
if metadata_path.is_file() {
return Ok(());
}
bail!(
"repo `{}` is linked from {} but the registry entry is missing at {}; run `ccd attach --path {}` or `ccd link --path {}` to repair the overlay before using memory evidence capture",
locality_id,
repo_root.display(),
metadata_path.display(),
repo_root.display(),
repo_root.display()
)
}
fn ensure_repo_overlay_exists(
layout: &StateLayout,
repo_root: &Path,
locality_id: &str,
) -> Result<()> {
let overlay_root = layout.repo_overlay_root(locality_id)?;
if overlay_root.is_dir() {
return Ok(());
}
bail!(
"repo overlay for `{}` is missing at {}; run `ccd attach --path {}` or `ccd link --path {}` to restore it before using memory evidence capture",
locality_id,
overlay_root.display(),
repo_root.display(),
repo_root.display()
)
}
fn current_session_start_count(layout: &StateLayout) -> Result<u64> {
Ok(session::load_for_layout(layout)?
.map(|state| u64::from(state.start_count))
.unwrap_or(0))
}
fn hex_digest(bytes: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(bytes);
format!("{:x}", hasher.finalize())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn normalize_envelope_canonicalizes_scope_and_summary() {
let normalized = normalize_envelope(
MemoryEvidenceEnvelope {
scope: "branch".to_owned(),
entry_type: "rule".to_owned(),
source_kind: "hook-output".to_owned(),
summary: " Keep generated manifests out of review. ".to_owned(),
source_ref: None,
host_reference: None,
provider_reference: None,
},
&SubmitRuntimeHints::default(),
)
.unwrap();
assert_eq!(normalized.scope, "work_stream");
assert_eq!(normalized.source_kind, "hook_output");
assert_eq!(
normalized.summary,
"Keep generated manifests out of review."
);
}
#[test]
fn normalize_envelope_rejects_overlong_summary() {
let error = normalize_envelope(
MemoryEvidenceEnvelope {
scope: "workspace".to_owned(),
entry_type: "rule".to_owned(),
source_kind: "session".to_owned(),
summary: "x".repeat(MAX_SUMMARY_CHARS + 1),
source_ref: None,
host_reference: None,
provider_reference: None,
},
&SubmitRuntimeHints::default(),
)
.unwrap_err();
assert!(error.to_string().contains("bounded summary"));
}
}