use super::status::{dir_size, format_bytes, get_volume_usage};
use super::*;
pub(super) async fn soul_reset(state: web::Data<NodeState>) -> HttpResponse {
let soul_db = match &state.soul_db {
Some(db) => db,
None => {
return HttpResponse::ServiceUnavailable()
.json(serde_json::json!({"error": "soul not active"}));
}
};
match soul_db.reset_history() {
Ok((thoughts, plans, nudges)) => HttpResponse::Ok().json(serde_json::json!({
"status": "ok",
"deleted": {
"thoughts": thoughts,
"plans": plans,
"nudges": nudges,
},
"kept": "active goals, active beliefs"
})),
Err(e) => HttpResponse::InternalServerError().json(serde_json::json!({
"error": format!("reset failed: {e}")
})),
}
}
pub(super) async fn soul_cleanup(state: web::Data<NodeState>) -> HttpResponse {
let mut cleaned = serde_json::Map::new();
let target_dir = "/data/workspace/target";
if std::path::Path::new(target_dir).exists() {
let size_before = dir_size(target_dir);
let _ = std::fs::remove_dir_all(target_dir);
cleaned.insert(
"cargo_target_freed".to_string(),
serde_json::json!(format_bytes(size_before)),
);
}
let _ = std::process::Command::new("git")
.args(["gc", "--aggressive", "--prune=now"])
.current_dir("/data/workspace")
.output();
cleaned.insert("git_gc".to_string(), serde_json::json!("done"));
if let Some(db) = &state.soul_db {
let _ = db.prune_old_data();
let _ = db.wal_checkpoint();
cleaned.insert("soul_db_pruned".to_string(), serde_json::json!(true));
}
let _ = std::process::Command::new("sqlite3")
.args([
"/data/gateway.db",
"PRAGMA wal_checkpoint(TRUNCATE); VACUUM;",
])
.output();
cleaned.insert("gateway_db_vacuumed".to_string(), serde_json::json!(true));
cleanup_old_files("/data/brain_checkpoints", 3);
cleaned.insert(
"brain_checkpoints_pruned".to_string(),
serde_json::json!(true),
);
cleanup_old_files("/data/benchmark_history", 5);
cleaned.insert(
"benchmark_history_pruned".to_string(),
serde_json::json!(true),
);
let after = get_volume_usage();
cleaned.insert("volume_after".to_string(), after);
HttpResponse::Ok().json(serde_json::Value::Object(cleaned))
}
fn cleanup_old_files(dir: &str, keep: usize) {
let p = std::path::Path::new(dir);
if !p.is_dir() {
return;
}
let mut entries: Vec<(std::time::SystemTime, std::path::PathBuf)> = match std::fs::read_dir(p) {
Ok(rd) => rd
.filter_map(|e| e.ok())
.filter_map(|e| {
let mtime = e.metadata().ok()?.modified().ok()?;
Some((mtime, e.path()))
})
.collect(),
Err(_) => return,
};
entries.sort_by(|a, b| b.0.cmp(&a.0)); for (_mtime, path) in entries.into_iter().skip(keep) {
if path.is_dir() {
let _ = std::fs::remove_dir_all(&path);
} else {
let _ = std::fs::remove_file(&path);
}
}
}
pub(super) async fn open_prs(state: web::Data<NodeState>) -> HttpResponse {
let fork_repo = std::env::var("SOUL_FORK_REPO").unwrap_or_default();
let upstream_repo = std::env::var("SOUL_UPSTREAM_REPO").unwrap_or_default();
let instance_id = std::env::var("INSTANCE_ID").unwrap_or_default();
if fork_repo.is_empty() {
return HttpResponse::Ok().json(serde_json::json!({
"instance_id": instance_id,
"prs": [],
"message": "no fork repo configured"
}));
}
let workspace =
std::env::var("SOUL_WORKSPACE_ROOT").unwrap_or_else(|_| "/data/workspace".into());
let gh_token = std::env::var("GH_TOKEN")
.or_else(|_| std::env::var("GITHUB_TOKEN"))
.unwrap_or_default();
let result = tokio::time::timeout(
std::time::Duration::from_secs(15),
tokio::process::Command::new("gh")
.args([
"pr",
"list",
"--repo",
&fork_repo,
"--state",
"open",
"--json",
"number,title,headRefName,author,additions,deletions,createdAt,reviewDecision",
"--limit",
"20",
])
.current_dir(&workspace)
.env("GH_TOKEN", &gh_token)
.output(),
)
.await;
let prs: serde_json::Value = match result {
Ok(Ok(output)) if output.status.success() => {
let stdout = String::from_utf8_lossy(&output.stdout);
serde_json::from_str(&stdout).unwrap_or(serde_json::json!([]))
}
_ => serde_json::json!([]),
};
let upstream_prs: serde_json::Value = if !upstream_repo.is_empty() {
let result = tokio::time::timeout(
std::time::Duration::from_secs(15),
tokio::process::Command::new("gh")
.args([
"pr",
"list",
"--repo",
&upstream_repo,
"--state",
"open",
"--json",
"number,title,headRefName,author,additions,deletions,createdAt,reviewDecision",
"--limit",
"20",
])
.current_dir(&workspace)
.env("GH_TOKEN", &gh_token)
.output(),
)
.await;
match result {
Ok(Ok(output)) if output.status.success() => {
let stdout = String::from_utf8_lossy(&output.stdout);
serde_json::from_str(&stdout).unwrap_or(serde_json::json!([]))
}
_ => serde_json::json!([]),
}
} else {
serde_json::json!([])
};
let empty_vec = vec![];
let fork_prs_arr = prs.as_array().unwrap_or(&empty_vec);
let upstream_prs_arr = upstream_prs.as_array().unwrap_or(&empty_vec);
let needs_review_count = fork_prs_arr
.iter()
.chain(upstream_prs_arr.iter())
.filter(|pr| {
pr.get("reviewDecision")
.and_then(|v| v.as_str())
.map(|s| s.is_empty() || s == "REVIEW_REQUIRED")
.unwrap_or(true)
})
.count();
let _ = &state;
HttpResponse::Ok().json(serde_json::json!({
"instance_id": instance_id,
"fork_repo": fork_repo,
"upstream_repo": upstream_repo,
"fork_prs": prs,
"upstream_prs": upstream_prs,
"needs_review_count": needs_review_count,
}))
}
pub(super) async fn soul_rules_reset(
state: web::Data<NodeState>,
query: web::Query<std::collections::HashMap<String, String>>,
) -> HttpResponse {
let soul_db = match state.soul_db.as_ref() {
Some(db) => db,
None => {
return HttpResponse::ServiceUnavailable()
.json(serde_json::json!({"error": "soul not active"}))
}
};
let _ = soul_db.set_state("durable_rules", "[]");
let cleared_chains = if query
.get("reset_failure_chains")
.map(|v| v == "true")
.unwrap_or(false)
{
let _ = soul_db.set_state("failure_chains", "[]");
true
} else {
false
};
HttpResponse::Ok().json(serde_json::json!({
"durable_rules": "cleared",
"failure_chains": if cleared_chains { "cleared" } else { "unchanged" },
}))
}
pub(super) async fn disk_cleanup(_state: web::Data<NodeState>) -> HttpResponse {
let ws = std::env::var("SOUL_WORKSPACE_ROOT").unwrap_or_else(|_| "/data/workspace".to_string());
let script = format!(
"rm -rf {ws}/target /tmp/x402_cargo_target {ws}/.cargo 2>/dev/null; \
rm -rf /data/workspace/target 2>/dev/null; \
echo \"$(du -sh /data 2>/dev/null | cut -f1)\""
);
match tokio::process::Command::new("bash")
.arg("-c")
.arg(&script)
.output()
.await
{
Ok(output) => {
let size = String::from_utf8_lossy(&output.stdout).trim().to_string();
HttpResponse::Ok().json(serde_json::json!({
"cleaned": true,
"data_volume_size": size,
}))
}
Err(e) => {
HttpResponse::InternalServerError().json(serde_json::json!({"error": format!("{e}")}))
}
}
}
pub(super) async fn cognitive_reset(state: web::Data<NodeState>) -> HttpResponse {
let soul_db = match &state.soul_db {
Some(db) => db,
None => {
return HttpResponse::ServiceUnavailable()
.json(serde_json::json!({"error": "soul not active"}));
}
};
let version_tag = format!("manual-reset-{}", chrono::Utc::now().timestamp());
let reset = soul_db.reset_cognitive_architecture(&version_tag);
let history = soul_db.reset_history();
let _ = soul_db.set_state("commit_awaiting_benchmark", "0");
let _ = soul_db.set_state("last_commit_at", "0");
let _ = soul_db.set_state("total_think_cycles", "0");
let _ = soul_db.set_state("cycles_since_last_commit", "0");
let _ = soul_db.set_state("recent_errors", "[]");
let (thoughts, plans, nudges) = history.unwrap_or((0, 0, 0));
HttpResponse::Ok().json(serde_json::json!({
"status": "cognitive_reset_complete",
"version_tag": version_tag,
"architecture_reset": reset,
"cleared": {
"thoughts": thoughts,
"plans": plans,
"nudges": nudges,
"brain_weights": true,
"cortex": true,
"genesis": true,
"hivemind": true,
"synthesis": true,
},
"preserved": ["benchmark_history", "elo_score", "persistent_memory"],
}))
}
pub(super) async fn admin_reward(
body: web::Json<serde_json::Value>,
state: web::Data<NodeState>,
) -> HttpResponse {
let soul_db = match &state.soul_db {
Some(db) => db,
None => {
return HttpResponse::ServiceUnavailable()
.json(serde_json::json!({"error": "soul not active"}));
}
};
let commit_sha = body
.get("commit_sha")
.and_then(|v| v.as_str())
.unwrap_or("unknown");
x402_soul::code_quality::reward_upstream_acceptance(soul_db, commit_sha);
let _ = soul_db.set_state(
"last_upstream_acceptance",
&chrono::Utc::now().timestamp().to_string(),
);
HttpResponse::Ok().json(serde_json::json!({
"status": "rewarded",
"commit_sha": commit_sha,
"signal": "strong_positive (3x reinforcement)",
}))
}
pub(super) async fn admin_penalty(
body: web::Json<serde_json::Value>,
state: web::Data<NodeState>,
) -> HttpResponse {
let soul_db = match &state.soul_db {
Some(db) => db,
None => {
return HttpResponse::ServiceUnavailable()
.json(serde_json::json!({"error": "soul not active"}));
}
};
let commit_sha = body
.get("commit_sha")
.and_then(|v| v.as_str())
.unwrap_or("unknown");
x402_soul::code_quality::penalty_upstream_revert(soul_db, commit_sha);
HttpResponse::Ok().json(serde_json::json!({
"status": "penalized",
"commit_sha": commit_sha,
"signal": "strong_negative (3x reinforcement)",
}))
}