use std::path::{Path, PathBuf};
use std::process::ExitCode;
use anyhow::{anyhow, Result};
use clap::{ArgMatches, Args, Command, FromArgMatches, Subcommand, ValueEnum};
use serde::Serialize;
use serde_json::Value;
use crate::commands::describe::CommandDescriptor;
use crate::content_trust::ContentTrust;
use crate::extensions::backlog_commands;
use crate::extensions::backlog_state;
use crate::extensions::dispatch;
use crate::extensions::{Extension, StartupContext};
use crate::mcp::protocol::Tool;
use crate::mcp::tools::{build_tool, ToolDef};
use crate::output::{self, OutputFormat};
use crate::paths;
use crate::paths::state::StateLayout;
use crate::state::extension_dispatch as dispatch_state;
use crate::state::issue_refs;
#[derive(Subcommand)]
enum BacklogCommand {
Pull(BacklogPullArgs),
Scope(BacklogScopeArgs),
Next(BacklogNextArgs),
Claim(BacklogClaimArgs),
SetStatus(BacklogSetStatusArgs),
Complete(BacklogCompleteArgs),
Adapters(BacklogAdaptersArgs),
PromoteNext(BacklogPromoteNextArgs),
BootstrapGithub(BacklogBootstrapGithubArgs),
PullGithub(BacklogPullGithubArgs),
Lint(BacklogLintArgs),
Groom(BacklogGroomArgs),
}
#[derive(Args)]
struct BacklogBootstrapGithubArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long = "repo")]
github_repo: String,
#[arg(long)]
adopt_existing: bool,
}
#[derive(Args)]
struct BacklogPullGithubArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long = "repo")]
github_repo: String,
}
#[derive(Args)]
struct BacklogLintArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
}
#[derive(Args)]
struct BacklogGroomArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
}
#[derive(Args)]
struct BacklogPullArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long = "repo")]
repo_override: Option<String>,
}
#[derive(Clone, Copy, ValueEnum)]
enum BacklogScopePriorityArg {
ActiveNow,
Next,
Later,
Parked,
}
impl BacklogScopePriorityArg {
fn as_str(self) -> &'static str {
match self {
Self::ActiveNow => "active-now",
Self::Next => "next",
Self::Later => "later",
Self::Parked => "parked",
}
}
}
#[derive(Args)]
struct BacklogScopeArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long = "repo")]
repo_override: Option<String>,
#[arg(long)]
issues: String,
#[arg(long, value_enum)]
priority: BacklogScopePriorityArg,
}
#[derive(Args)]
struct BacklogNextArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
}
#[derive(Args)]
struct BacklogClaimArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long = "ccd-id")]
ccd_id: u64,
#[arg(long)]
write: bool,
}
#[derive(Clone, Copy, ValueEnum)]
enum BacklogMutationStatusArg {
Ready,
#[value(name = "in-progress")]
InProgress,
Blocked,
Parked,
Done,
}
impl BacklogMutationStatusArg {
fn as_str(self) -> &'static str {
match self {
Self::Ready => "ready",
Self::InProgress => "in-progress",
Self::Blocked => "blocked",
Self::Parked => "parked",
Self::Done => "done",
}
}
}
#[derive(Args)]
struct BacklogSetStatusArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long = "ccd-id")]
ccd_id: u64,
#[arg(long, value_enum)]
status: BacklogMutationStatusArg,
#[arg(long)]
write: bool,
}
#[derive(Args)]
struct BacklogCompleteArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long = "ccd-id")]
ccd_id: u64,
#[arg(long)]
write: bool,
}
#[derive(Args)]
struct BacklogAdaptersArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
}
#[derive(Args)]
struct BacklogPromoteNextArgs {
#[arg(long, default_value = ".")]
path: PathBuf,
#[arg(long)]
profile: Option<String>,
#[arg(long, default_value_t = 3)]
batch_size: usize,
#[arg(long)]
write: bool,
}
const COMMAND_GROUPS: &[&str] = &["backlog"];
const MCP_TOOLS: &[ToolDef] = &[ToolDef {
name: "ccd_backlog",
description: "Work queue: pull lint groom bootstrap",
commands: &[
("backlog-bootstrap-github", &["backlog", "bootstrap-github"]),
("backlog-pull-github", &["backlog", "pull-github"]),
("backlog-lint", &["backlog", "lint"]),
("backlog-groom", &["backlog", "groom"]),
],
renames: &[],
exclude: &[],
extra_props: &[],
}];
pub(crate) struct BacklogExtension;
pub(crate) static BACKLOG_EXTENSION: BacklogExtension = BacklogExtension;
impl Extension for BacklogExtension {
fn name(&self) -> &'static str {
"backlog"
}
fn command_groups(&self) -> &'static [&'static str] {
COMMAND_GROUPS
}
fn cli_command(&self) -> Option<Command> {
Some(backlog_cli_command())
}
fn dispatch_cli(
&self,
subcommand_name: &str,
matches: &ArgMatches,
output: OutputFormat,
) -> Option<Result<ExitCode>> {
if subcommand_name != "backlog" {
return None;
}
Some(run_cli_from_matches(matches, output))
}
fn mcp_tools(&self, commands: &[CommandDescriptor]) -> Vec<Tool> {
MCP_TOOLS
.iter()
.map(|tool| build_tool(tool, commands))
.collect()
}
fn dispatch_mcp(&self, tool_name: &str, args: &Value) -> Option<Result<Value>> {
if tool_name != "ccd_backlog" {
return None;
}
let command = match get_required_string(args, "command") {
Ok(command) => command,
Err(error) => return Some(Err(error)),
};
let report = (|| -> Result<Value> {
match command.as_str() {
"backlog-bootstrap-github" => {
let path = resolve_path(args)?;
let github_repo = get_required_string(args, "github_repo")?;
let report = backlog_commands::bootstrap_github(
&path,
get_opt_str(args, "profile").as_deref(),
&github_repo,
get_bool(args, "adopt_existing"),
)?;
to_value(&report)
}
"backlog-pull-github" => {
let path = resolve_path(args)?;
let github_repo = get_required_string(args, "github_repo")?;
let report = backlog_commands::pull_github(
&path,
get_opt_str(args, "profile").as_deref(),
&github_repo,
)?;
to_value(&report)
}
"backlog-lint" => {
let path = resolve_path(args)?;
let report =
backlog_commands::lint(&path, get_opt_str(args, "profile").as_deref())?;
to_value(&report)
}
"backlog-groom" => {
let path = resolve_path(args)?;
let report =
backlog_commands::groom(&path, get_opt_str(args, "profile").as_deref())?;
to_value(&report)
}
other => Err(anyhow!("unknown backlog MCP command: {other}")),
}
})();
Some(report)
}
fn work_queue_refresh_hint(
&self,
provider: Option<&str>,
repo: Option<&str>,
) -> Option<String> {
Some(pull_command_hint(provider, repo))
}
fn refresh_work_queue_cache(
&self,
repo_root: &Path,
layout: &StateLayout,
) -> Option<Result<(String, usize)>> {
Some(refresh_from_existing_cache(repo_root, layout))
}
fn load_work_queue_snapshot(
&self,
layout: &StateLayout,
) -> Result<Option<backlog_state::GitHubBacklogCache>> {
backlog_state::load_cache(layout)
}
fn health_diagnostics(
&self,
layout: &StateLayout,
repo_root: &Path,
locality_id: &str,
) -> Result<Vec<super::HealthDiagnostic>> {
let mut diagnostics = Vec::new();
if let Some(diagnostic) =
super::adapter::diagnose_github_backlog_fallback(layout, repo_root, locality_id)?
{
diagnostics.push(super::HealthDiagnostic {
check: "backlog_adapter",
severity: "warning",
file: diagnostic.repo_overlay_config_path.display().to_string(),
message: diagnostic.message(),
details: None,
});
}
Ok(diagnostics)
}
fn radar_workflow_guidance(
&self,
ctx: &StartupContext<'_>,
) -> Result<Option<super::RadarWorkflowGuidance>> {
let backlog_cache =
backlog_state::load_cache_view_from_ref(ctx.layout, ctx.raw_backlog_cache, 12)?;
let evaluation = backlog_radar_evaluation(&backlog_cache);
Ok(Some(super::RadarWorkflowGuidance {
approval_steps: backlog_radar_approval_steps(&evaluation),
evaluation,
}))
}
fn radar_behavioral_drift_signals(
&self,
ctx: &super::RadarContext<'_>,
) -> Result<Vec<super::RadarBehavioralDriftSignal>> {
let session_assignment = match ctx.active_session_id {
Some(session_id) => self.load_session_assignment(&ctx.startup, session_id)?,
None => match ctx.git {
Some(git) => self.load_branch_assignment(&ctx.startup, &git.branch)?,
None => None,
},
};
let backlog_cache = backlog_state::load_cache_view_from_ref(
ctx.startup.layout,
ctx.startup.raw_backlog_cache,
12,
)?;
Ok(vec![build_closed_issue_reference_signal(
ctx,
backlog_cache.status,
session_assignment.as_ref(),
)])
}
fn owns_dispatch(&self) -> bool {
true
}
fn load_session_assignment(
&self,
ctx: &StartupContext<'_>,
session_id: &str,
) -> Result<Option<dispatch::AssignmentView>> {
let cache = backlog_state::load_cache(ctx.layout)?;
let view = dispatch_state::resolve_session_assignment(
ctx.layout,
ctx.locality_id,
session_id,
cache.as_ref(),
)?;
Ok(view.map(|v| local_assignment_to_dispatch_view(&v)))
}
fn load_branch_assignment(
&self,
ctx: &StartupContext<'_>,
branch: &str,
) -> Result<Option<dispatch::AssignmentView>> {
let cache = backlog_state::load_cache(ctx.layout)?;
let view = dispatch_state::resolve_branch_assignment(
ctx.layout,
ctx.locality_id,
branch,
cache.as_ref(),
)?;
Ok(view.map(|v| local_assignment_to_dispatch_view(&v)))
}
fn ensure_assignment(
&self,
ctx: &StartupContext<'_>,
owner: dispatch::AssignmentOwner<'_>,
) -> Result<dispatch::AssignmentOutcome> {
let cache = backlog_state::load_cache(ctx.layout)?;
let cache_status = if ctx.allow_cached_work {
"loaded"
} else {
"stale"
};
let local_view = match owner {
dispatch::AssignmentOwner::Session {
session_id,
worktree: _,
branch,
} => dispatch_state::ensure_local_assignment(
ctx.repo_root,
ctx.layout,
ctx.locality_id,
dispatch_state::LocalAssignmentRequest {
session_id,
branch,
cache: cache.as_ref(),
cache_status,
},
)?,
dispatch::AssignmentOwner::PreSessionBranch {
worktree: _,
branch,
} => dispatch_state::ensure_local_assignment_by_branch(
ctx.repo_root,
ctx.layout,
ctx.locality_id,
branch,
cache.as_ref(),
cache_status,
)?,
};
Ok(local_dispatch_to_outcome(&local_view))
}
fn observe_next_step(
&self,
ctx: &StartupContext<'_>,
) -> Result<Option<dispatch::NextStepObservation>> {
let cache = backlog_state::load_cache(ctx.layout)?;
let dispatch_paths = ctx.layout.extension_dispatch_state_paths(ctx.locality_id)?;
let claimed_refs = dispatch_state::load_claimed_refs(&dispatch_paths)?;
let claimed_ids = dispatch_state::load_claimed_ids(&dispatch_paths)?;
if let Some(item) = cache.as_ref().and_then(|cache| {
cache.items.iter().find(|item| {
(!item.backlog_ref.is_empty() && claimed_refs.contains(&item.backlog_ref.key()))
|| (item.has_ccd_id() && claimed_ids.contains(&item.ccd_id))
})
}) {
return Ok(Some(dispatch::NextStepObservation {
item: dispatch::NextStepItem {
backlog_ref: item.backlog_ref.clone(),
ccd_id: item.ccd_id,
github_issue_number: item.github_issue_number,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog_state::display_title(&item.title),
branch: None,
},
confidence: if cache.is_some() && ctx.allow_cached_work {
dispatch::NextStepConfidence::Cached
} else {
dispatch::NextStepConfidence::Unverified
},
}));
}
if let Some(&ccd_id) = claimed_ids.iter().next() {
return Ok(Some(dispatch::NextStepObservation {
item: dispatch::NextStepItem {
backlog_ref: backlog_state::BacklogRef::default(),
ccd_id,
github_issue_number: 0,
content_trust: ContentTrust::ExternalAdapterOutput,
title: format!("ccd#{ccd_id}"),
branch: None,
},
confidence: if cache.is_some() && ctx.allow_cached_work {
dispatch::NextStepConfidence::Cached
} else {
dispatch::NextStepConfidence::Unverified
},
}));
}
Ok(None)
}
fn on_session_started(
&self,
ctx: &dispatch::SessionBoundaryContext<'_>,
) -> Result<dispatch::SessionBoundaryEffect> {
let adopted =
dispatch_state::adopt_unsessioned_entries(ctx.layout, ctx.locality_id, ctx.session_id)?;
let adopted_views = adopted
.iter()
.map(local_assignment_to_dispatch_view)
.collect();
Ok(dispatch::SessionBoundaryEffect {
adopted: adopted_views,
pruned: Vec::new(),
})
}
fn on_session_cleared(
&self,
ctx: &dispatch::SessionBoundaryContext<'_>,
) -> Result<dispatch::SessionBoundaryEffect> {
dispatch_state::remove_session_entries(ctx.layout, ctx.locality_id, ctx.session_id)?;
Ok(dispatch::SessionBoundaryEffect::empty())
}
fn resolve_assignment_references(
&self,
_ctx: &StartupContext<'_>,
assignment: &dispatch::AssignmentView,
cache: Option<&backlog_state::GitHubBacklogCache>,
) -> Result<Vec<dispatch::StartupAlert>> {
let Some(cache) = cache else {
return Ok(Vec::new());
};
let item = cache.items.iter().find(|item| {
(!assignment.backlog_ref.is_empty()
&& !item.backlog_ref.is_empty()
&& item.backlog_ref.key() == assignment.backlog_ref.key())
|| (assignment.github_issue_number != 0
&& item.backlog_ref.provider == "github-issues"
&& item.github_issue_number == assignment.github_issue_number)
|| (assignment.ccd_id != 0 && item.ccd_id == assignment.ccd_id)
});
let Some(item) = item else {
return Ok(Vec::new());
};
if item.is_active() && item.is_queue_scoped() {
return Ok(Vec::new());
}
if !item.is_active() {
let identifier =
if item.github_issue_number != 0 && item.backlog_ref.provider == "github-issues" {
let display = item.display_ref();
let gh_ref = format!("GH#{}", item.github_issue_number);
if display == gh_ref {
display
} else {
format!("{display} / {gh_ref}")
}
} else {
item.display_ref()
};
return Ok(vec![dispatch::StartupAlert {
check: "closed_issue_reference",
severity: dispatch::StartupAlertSeverity::Warning,
message: format!(
"local assignment references closed work item `{}` `{}`; \
retarget the session before relying on it",
identifier,
backlog_state::display_title(&item.title),
),
}]);
}
let out_of_queue_target =
if item.github_issue_number != 0 && item.backlog_ref.provider == "github-issues" {
format!(
"open GitHub issue `GH#{}` `{}`",
item.github_issue_number,
backlog_state::display_title(&item.title),
)
} else {
format!(
"open work item `{}` `{}`",
item.display_ref(),
backlog_state::display_title(&item.title),
)
};
Ok(vec![dispatch::StartupAlert {
check: "out_of_queue_continuity",
severity: dispatch::StartupAlertSeverity::Warning,
message: format!(
"local assignment still points at {out_of_queue_target} but it is no longer queue-scoped by CCD priority labels; keep continuity if intentional or relabel/retarget it before relying on backlog auto-selection",
),
}])
}
fn enrich_pod_status(
&self,
_pod_name: &str,
_locality_id: &str,
_profile: &str,
shared_root: &std::path::Path,
) -> Option<Vec<(String, String)>> {
let db_path = shared_root
.join("extensions")
.join("backlog")
.join("dispatch-state.db");
if !db_path.exists() {
return None;
}
let conn = match rusqlite::Connection::open_with_flags(
&db_path,
rusqlite::OpenFlags::SQLITE_OPEN_READ_ONLY,
) {
Ok(c) => c,
Err(_) => return None,
};
let mut columns = Vec::new();
if let Ok(mut stmt) = conn.prepare("SELECT branch, backlog_id FROM assignments LIMIT 3") {
if let Ok(rows) = stmt.query_map([], |row| {
Ok((
row.get::<_, Option<String>>(0)
.unwrap_or_default()
.unwrap_or_default(),
row.get::<_, String>(1).unwrap_or_default(),
))
}) {
for row in rows.flatten().take(1) {
if !row.0.is_empty() {
columns.push(("Branch".to_string(), row.0));
}
if !row.1.is_empty() {
columns.push(("Assignment".to_string(), row.1));
}
}
}
}
if columns.is_empty() {
None
} else {
Some(columns)
}
}
}
fn local_assignment_to_dispatch_view(
v: &dispatch_state::LocalAssignmentView,
) -> dispatch::AssignmentView {
let owner = if v.session_id.is_empty() {
dispatch::AssignmentOwnerView::PreSessionBranch {
branch: v.branch.clone().unwrap_or_default(),
}
} else {
dispatch::AssignmentOwnerView::Session {
session_id: v.session_id.clone(),
}
};
dispatch::AssignmentView {
backlog_ref: v.backlog_ref.clone(),
ccd_id: v.ccd_id,
github_issue_number: v.github_issue_number,
content_trust: ContentTrust::ExternalAdapterOutput,
title: v.title.clone(),
owner,
branch: v.branch.clone(),
worktree: v.worktree.clone(),
}
}
fn local_dispatch_to_outcome(
view: &dispatch_state::LocalDispatchView,
) -> dispatch::AssignmentOutcome {
let status = match view.status {
"existing" => dispatch::AssignmentStatus::Existing,
"assigned" => dispatch::AssignmentStatus::Assigned,
_ => dispatch::AssignmentStatus::Skipped,
};
dispatch::AssignmentOutcome {
status,
reason: view.reason.clone(),
next_step: view
.dispatch
.as_ref()
.map(|dispatch_view| dispatch::ExtensionNextStepView {
status: dispatch::NextStepStatus::NeedsInput,
source: dispatch::NextStepSource::BacklogAdapter,
reason: dispatch_view.reason.clone().or_else(|| {
Some(format!(
"backlog adapter reported `{}` and requires explicit actor input",
dispatch_view.status
))
}),
observation: None,
}),
assignment: view
.assignment
.as_ref()
.map(local_assignment_to_dispatch_view),
}
}
fn run_cli(command: BacklogCommand, output: OutputFormat) -> Result<ExitCode> {
match command {
BacklogCommand::Pull(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::pull(
&repo_path,
args.profile.as_deref(),
args.repo_override.as_deref(),
)?;
output::render_report(output, &report)
}
BacklogCommand::Scope(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::scope(
&repo_path,
args.profile.as_deref(),
args.repo_override.as_deref(),
&args.issues,
args.priority.as_str(),
)?;
output::render_report(output, &report)
}
BacklogCommand::Next(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::next(&repo_path, args.profile.as_deref())?;
output::render_report(output, &report)
}
BacklogCommand::Claim(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::claim(
&repo_path,
args.profile.as_deref(),
args.ccd_id,
!args.write,
)?;
output::render_report(output, &report)
}
BacklogCommand::SetStatus(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::set_status(
&repo_path,
args.profile.as_deref(),
args.ccd_id,
args.status.as_str(),
!args.write,
)?;
output::render_report(output, &report)
}
BacklogCommand::Complete(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::complete(
&repo_path,
args.profile.as_deref(),
args.ccd_id,
!args.write,
)?;
output::render_report(output, &report)
}
BacklogCommand::Adapters(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::adapters(&repo_path, args.profile.as_deref())?;
output::render_report(output, &report)
}
BacklogCommand::PromoteNext(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::promote_next(
&repo_path,
args.profile.as_deref(),
args.batch_size,
!args.write,
)?;
output::render_report(output, &report)
}
BacklogCommand::BootstrapGithub(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::bootstrap_github(
&repo_path,
args.profile.as_deref(),
&args.github_repo,
args.adopt_existing,
)?;
output::render_report(output, &report)
}
BacklogCommand::PullGithub(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::pull_github(
&repo_path,
args.profile.as_deref(),
&args.github_repo,
)?;
output::render_report(output, &report)
}
BacklogCommand::Lint(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::lint(&repo_path, args.profile.as_deref())?;
output::render_report(output, &report)
}
BacklogCommand::Groom(args) => {
let repo_path = paths::cli::resolve(&args.path)?;
let report = backlog_commands::groom(&repo_path, args.profile.as_deref())?;
output::render_report(output, &report)
}
}
}
fn backlog_cli_command() -> Command {
BacklogCommand::augment_subcommands(
Command::new("backlog")
.about("Work queue commands")
.subcommand_required(true)
.arg_required_else_help(true),
)
}
fn run_cli_from_matches(matches: &ArgMatches, output: OutputFormat) -> Result<ExitCode> {
let command = BacklogCommand::from_arg_matches(matches)?;
run_cli(command, output)
}
pub(crate) fn pull_command_hint(provider: Option<&str>, repo: Option<&str>) -> String {
match provider {
Some(provider) if provider.eq_ignore_ascii_case("local-markdown") => {
"ccd backlog pull --path .".to_owned()
}
Some(provider) if provider.eq_ignore_ascii_case("github-issues") => format!(
"ccd backlog pull --path . --repo {}",
repo.unwrap_or("owner/name")
),
_ => "ccd backlog pull --path .".to_owned(),
}
}
pub(crate) fn refresh_from_existing_cache(
repo_root: &Path,
layout: &StateLayout,
) -> Result<(String, usize)> {
backlog_commands::refresh_from_existing_cache(repo_root, layout)
}
fn backlog_radar_evaluation(
backlog_cache: &backlog_state::GitHubBacklogCacheView,
) -> super::RadarEvaluationBucket {
if backlog_cache.status == "stale" {
super::RadarEvaluationBucket {
status: "stale_cache",
summary:
"The canonical work queue cache is present but stale; refresh it before relying on priority ordering."
.to_owned(),
evidence: vec![
backlog_cache.path.clone(),
backlog_cache
.repo
.clone()
.unwrap_or_else(|| "GitHub repo unknown".to_owned()),
],
}
} else if backlog_cache.status == "loaded" {
super::RadarEvaluationBucket {
status: "loaded",
summary:
"The canonical work queue cache is loaded for this clone and can drive backlog review offline."
.to_owned(),
evidence: backlog_cache
.active_items
.iter()
.map(|item| format!("ccd#{}: {}", item.ccd_id, item.title))
.collect(),
}
} else if backlog_cache.status == "empty" {
super::RadarEvaluationBucket {
status: "empty",
summary: "The canonical work queue cache is present but contains no active items."
.to_owned(),
evidence: vec![backlog_cache.path.clone()],
}
} else {
super::RadarEvaluationBucket {
status: "no_cli_signal",
summary:
"No canonical work queue cache is active for this clone; backlog review is optional unless this repo maintains a separate shared queue."
.to_owned(),
evidence: Vec::new(),
}
}
}
fn backlog_radar_approval_steps(
evaluation: &super::RadarEvaluationBucket,
) -> Vec<super::RadarApprovalStep> {
if evaluation.status == "no_cli_signal" {
return Vec::new();
}
vec![
super::RadarApprovalStep {
id: "park_item",
question: "Did any outside event or new idea appear worth parking?",
recommended_answer: "no",
recommendation: evaluation.summary.clone(),
evidence: evaluation.evidence.clone(),
},
super::RadarApprovalStep {
id: "reprioritize",
question: "Should an existing priority be merged, deferred, or dropped?",
recommended_answer: "no",
recommendation: evaluation.summary.clone(),
evidence: evaluation.evidence.clone(),
},
]
}
fn build_closed_issue_reference_signal(
ctx: &super::RadarContext<'_>,
backlog_status: &str,
session_assignment: Option<&dispatch::AssignmentView>,
) -> super::RadarBehavioralDriftSignal {
let branch = ctx.git.map(|git| git.branch.as_str()).unwrap_or("");
let mut report = issue_refs::resolve_issue_references(
branch,
ctx.handoff_title,
ctx.handoff_immediate_actions,
ctx.startup.raw_backlog_cache,
);
if let (Some(assignment), Some(cache)) = (session_assignment, ctx.startup.raw_backlog_cache) {
let item = cache.items.iter().find(|item| {
(!assignment.backlog_ref.is_empty()
&& !item.backlog_ref.is_empty()
&& item.backlog_ref.key() == assignment.backlog_ref.key())
|| (assignment.github_issue_number != 0
&& item.backlog_ref.provider == "github-issues"
&& item.github_issue_number == assignment.github_issue_number)
|| (assignment.ccd_id != 0 && item.ccd_id == assignment.ccd_id)
});
if let Some(item) = item {
if !item.is_active() || !item.is_queue_scoped() {
let branch_context = assignment.branch.clone().unwrap_or_default();
let matched_github_issue =
item.github_issue_number != 0 && item.backlog_ref.provider == "github-issues";
report.references.push(issue_refs::IssueReference {
kind: if matched_github_issue {
issue_refs::IssueReferenceKind::GithubIssue
} else {
issue_refs::IssueReferenceKind::CcdId
},
number: if matched_github_issue {
item.github_issue_number
} else {
item.ccd_id
},
source: "locality_focus_assignment",
raw_reference: if matched_github_issue {
format!("GH#{}", item.github_issue_number)
} else {
format!("ccd#{}", item.ccd_id)
},
context: branch_context,
});
report.matches.push(issue_refs::ResolvedIssueReference {
reference: report.references.last().unwrap().clone(),
resolution_status: if item.is_active() {
issue_refs::ContinuityResolutionStatus::ResolvedOpen
} else {
issue_refs::ContinuityResolutionStatus::ResolvedClosed
},
matched_via: issue_refs::ContinuityMatchedVia::Cache,
title: Some(backlog_state::display_title(&item.title)),
url: Some(item.url.clone()),
cache_item: Some(issue_refs::ResolvedBacklogItem {
backlog_provider: item.backlog_ref.provider.clone(),
ccd_id: item.ccd_id,
github_issue_number: item.github_issue_number,
title: backlog_state::display_title(&item.title),
url: item.url.clone(),
closed: !item.is_active(),
out_of_queue: item.is_active() && !item.is_queue_scoped(),
}),
});
}
}
}
if report.references.is_empty() {
return no_signal(
"closed_issue_reference",
"No branch or handoff issue references were detected, so semantic issue freshness could not be evaluated.",
Vec::new(),
);
}
if report.has_closed_matches() {
let mut evidence = report.closed_evidence_lines();
if backlog_status == "stale" {
evidence.push(
"The workspace-local work queue cache is stale, so the closed-item signal should be verified before drafting the next work stream or handoff."
.to_owned(),
);
}
return drift(
"closed_issue_reference",
"The workspace-local handoff, work stream, or project-focus assignment still points at work items that the workspace-local work queue cache already marks closed.",
evidence,
"Retarget the next-session handoff and any issue-named work stream to an open item or use a neutral title before wrap-up.",
);
}
if report.has_out_of_queue_matches() {
let mut evidence = report.out_of_queue_evidence_lines();
if backlog_status == "stale" {
evidence.push(
"The workspace-local work queue cache is stale, so the out-of-queue signal should be revalidated before reusing backlog continuity."
.to_owned(),
);
}
return drift(
"closed_issue_reference",
"The workspace-local handoff, work stream, or project-focus assignment still points at open GitHub issues that are no longer queue-scoped.",
evidence,
"Keep continuity only if that out-of-queue issue is still intentional; otherwise relabel it or retarget the next session before wrap-up.",
);
}
if backlog_status == "stale" {
let mut evidence = report.reference_evidence_lines();
evidence.push(
"The workspace-local work queue cache is stale, so Radar cannot confirm those references are still open."
.to_owned(),
);
return no_signal(
"closed_issue_reference",
"Issue references were detected, but the workspace-local work queue cache is too stale to confirm whether they are still open.",
evidence,
);
}
if report.has_primary_reference() && report.primary_references_all_matched() {
return aligned(
"closed_issue_reference",
"Work-stream and handoff issue references still map to active items in the workspace-local work queue cache.",
report.active_evidence_lines(),
);
}
if report.matched_reference_count() != report.references.len() {
return no_signal(
"closed_issue_reference",
"Issue references were detected, but the workspace-local work queue cache could not resolve each one deterministically.",
report.reference_evidence_lines(),
);
}
aligned(
"closed_issue_reference",
"Work-stream and handoff issue references still map to active items in the workspace-local work queue cache.",
report.active_evidence_lines(),
)
}
fn aligned(
id: &'static str,
summary: &str,
evidence: Vec<String>,
) -> super::RadarBehavioralDriftSignal {
super::RadarBehavioralDriftSignal {
id,
status: super::RadarBehavioralDriftStatus::Aligned,
summary: summary.to_owned(),
evidence,
recommended_correction: None,
}
}
fn drift(
id: &'static str,
summary: &str,
evidence: Vec<String>,
recommended_correction: &str,
) -> super::RadarBehavioralDriftSignal {
super::RadarBehavioralDriftSignal {
id,
status: super::RadarBehavioralDriftStatus::Drift,
summary: summary.to_owned(),
evidence,
recommended_correction: Some(recommended_correction.to_owned()),
}
}
fn no_signal(
id: &'static str,
summary: &str,
evidence: Vec<String>,
) -> super::RadarBehavioralDriftSignal {
super::RadarBehavioralDriftSignal {
id,
status: super::RadarBehavioralDriftStatus::NoSignal,
summary: summary.to_owned(),
evidence,
recommended_correction: None,
}
}
fn resolve_path(args: &Value) -> Result<PathBuf> {
let path = get_opt_str(args, "path").unwrap_or_else(|| ".".to_owned());
paths::cli::resolve(&PathBuf::from(path))
}
fn get_required_string(args: &Value, key: &str) -> Result<String> {
get_opt_str(args, key).ok_or_else(|| anyhow!("missing required argument `{key}`"))
}
fn get_opt_str(args: &Value, key: &str) -> Option<String> {
args.get(key).and_then(Value::as_str).map(str::to_owned)
}
fn get_bool(args: &Value, key: &str) -> bool {
args.get(key).and_then(Value::as_bool).unwrap_or(false)
}
fn to_value<T: Serialize>(report: &T) -> Result<Value> {
Ok(serde_json::to_value(report)?)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn backlog_extension_reports_metadata() {
let extension: &dyn Extension = &BACKLOG_EXTENSION;
assert_eq!(extension.name(), "backlog");
assert_eq!(extension.command_groups(), &["backlog"]);
}
}