use std::collections::{BTreeMap, BTreeSet};
use std::fs;
use std::path::Path;
use std::process::{Command, ExitCode};
use anyhow::{bail, Context, Result};
use serde::{Deserialize, Serialize};
use super::adapter::{self, AdapterSource, BacklogAdapterCapabilities, ResolvedAdapter};
use crate::content_trust::ContentTrust;
use crate::extensions::backlog_config;
use crate::extensions::backlog_state as backlog;
use crate::extensions::backlog_state::{
BacklogRef, CanonicalIssueMetadata, GitHubBacklogCache, GitHubBacklogCacheView,
GitHubBacklogDispatchView, GitHubBacklogItem, GitHubDispatchState, GitHubQueueState,
MetadataStatus, UpstreamClaimState,
};
use crate::output::CommandReport;
use crate::paths::git as git_paths;
use crate::paths::state::StateLayout;
use crate::paths::write;
use crate::profile;
use crate::repo::marker as repo_marker;
const GITHUB_ISSUE_LIST_LIMIT: &str = "500";
const ALLOWED_KINDS: &[&str] = &["item", "epic", "discovery"];
const ALLOWED_STATUSES: &[&str] = &["ready", "in-progress", "blocked", "parked", "done"];
#[derive(Serialize)]
pub struct BacklogPullReport {
command: &'static str,
ok: bool,
profile: String,
repo: String,
cache: GitHubBacklogCacheView,
issue_count: usize,
active_items: usize,
}
#[derive(Serialize)]
pub struct BacklogBootstrapReport {
command: &'static str,
ok: bool,
profile: String,
repo: String,
source_backlog: String,
created: Vec<BacklogIssueRef>,
adopted: Vec<BacklogIssueRef>,
cache: GitHubBacklogCacheView,
issue_count: usize,
active_items: usize,
}
#[derive(Serialize)]
pub struct BacklogLintReport {
command: &'static str,
ok: bool,
profile: String,
cache: GitHubBacklogCacheView,
issue_count: usize,
active_items: usize,
failures: usize,
warnings: usize,
diagnostics: Vec<BacklogLintDiagnostic>,
}
#[derive(Serialize)]
pub struct BacklogGroomReport {
command: &'static str,
ok: bool,
profile: String,
cache: GitHubBacklogCacheView,
issue_count: usize,
active_items: usize,
merge_candidates: Vec<BacklogMergeCandidate>,
obsolete_candidates: Vec<BacklogObsoleteCandidate>,
stale_dependencies: Vec<BacklogStaleDependencyCandidate>,
reorder_candidates: Vec<BacklogReorderCandidate>,
warnings: Vec<String>,
errors: Vec<String>,
}
#[derive(Serialize)]
struct BacklogIssueRef {
ccd_id: u64,
github_issue_number: u64,
content_trust: ContentTrust,
title: String,
url: String,
}
#[derive(Serialize)]
struct BacklogLintDiagnostic {
status: &'static str,
severity: &'static str,
message: String,
}
#[derive(Serialize)]
struct BacklogIssueSummary {
ccd_id: u64,
github_issue_number: u64,
content_trust: ContentTrust,
title: String,
url: String,
status: String,
section: String,
#[serde(skip_serializing_if = "Option::is_none")]
priority_rank: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
module: Option<String>,
}
#[derive(Serialize)]
struct BacklogMergeCandidate {
items: Vec<BacklogIssueSummary>,
shared_terms: Vec<String>,
reason: String,
}
#[derive(Serialize)]
struct BacklogObsoleteCandidate {
active: BacklogIssueSummary,
completed: BacklogIssueSummary,
shared_terms: Vec<String>,
reason: String,
}
#[derive(Serialize)]
struct BacklogStaleDependencyCandidate {
item: BacklogIssueSummary,
dependency_ccd_id: u64,
#[serde(skip_serializing_if = "Option::is_none")]
dependency: Option<BacklogIssueSummary>,
reason: String,
}
#[derive(Serialize)]
struct BacklogReorderCandidate {
kind: &'static str,
items: Vec<BacklogIssueSummary>,
reason: String,
}
impl CommandReport for BacklogPullReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
println!(
"Cached GitHub backlog for {} in profile {}.",
self.repo, self.profile
);
println!(
"Cache: {} ({})",
self.cache.path,
render_cache_status_summary(&self.cache, self.issue_count)
);
println!("Rendered view: {}", self.cache.rendered_path);
println!(
"Open issues: {}, queue-scoped: {}, queue candidates: {}, auto-selectable: {}.",
self.cache.queue_summary.open_issues,
self.cache.queue_summary.queue_scoped,
self.cache.queue_summary.queue_candidates,
self.cache.queue_summary.auto_selectable
);
if self.cache.queue_summary.policy_conflicts > 0 {
println!(
"Queue-policy conflicts: {}.",
self.cache.queue_summary.policy_conflicts
);
}
render_zero_queue_scope_hint(&self.cache);
for item in &self.cache.active_items {
println!(
"- [{}] {} (#{}, {})",
item.display_ref(),
item.title,
item.github_issue_number,
item.status
);
}
}
}
impl CommandReport for BacklogBootstrapReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
println!(
"Bootstrapped canonical GitHub backlog issues for {} in profile {}.",
self.repo, self.profile
);
println!("Source backlog: {}", self.source_backlog);
println!(
"Created {} issue(s), adopted {} existing issue(s).",
self.created.len(),
self.adopted.len()
);
println!("Cache: {} ({})", self.cache.path, self.cache.status);
println!("Rendered view: {}", self.cache.rendered_path);
for item in &self.created {
println!(
"- Created [ccd#{}] {} (#{}).",
item.ccd_id, item.title, item.github_issue_number
);
}
for item in &self.adopted {
println!(
"- Adopted [ccd#{}] {} (#{}).",
item.ccd_id, item.title, item.github_issue_number
);
}
}
}
impl CommandReport for BacklogLintReport {
fn exit_code(&self) -> ExitCode {
if self.failures > 0 {
ExitCode::from(1)
} else {
ExitCode::SUCCESS
}
}
fn render_text(&self) {
if self.diagnostics.is_empty() {
println!(
"Validated cached canonical backlog for profile {}.",
self.profile
);
println!("Cache: {} ({})", self.cache.path, self.cache.status);
println!(
"Canonical items: {} total, {} active.",
self.issue_count, self.active_items
);
} else {
for diagnostic in &self.diagnostics {
let label = match diagnostic.status {
"pass" => "PASS",
"warn" => "WARN",
"fail" => "FAIL",
_ => "INFO",
};
println!("[{label}] {}", diagnostic.message);
}
}
println!(
"Backlog lint summary: {} failure(s), {} warning(s), {} cached item(s).",
self.failures, self.warnings, self.issue_count
);
}
}
impl CommandReport for BacklogGroomReport {
fn exit_code(&self) -> ExitCode {
if self.ok {
ExitCode::SUCCESS
} else {
ExitCode::from(1)
}
}
fn render_text(&self) {
if !self.errors.is_empty() {
for error in &self.errors {
println!("[FAIL] {error}");
}
} else {
println!("Backlog grooming summary for profile {}.", self.profile);
println!("Cache: {} ({})", self.cache.path, self.cache.status);
for warning in &self.warnings {
println!("[WARN] {warning}");
}
for candidate in &self.merge_candidates {
println!("[MERGE] {}", candidate.reason);
}
for candidate in &self.obsolete_candidates {
println!("[OBSOLETE] {}", candidate.reason);
}
for candidate in &self.stale_dependencies {
println!("[STALE-DEP] {}", candidate.reason);
}
for candidate in &self.reorder_candidates {
println!("[REORDER] {}", candidate.reason);
}
if self.merge_candidates.is_empty()
&& self.obsolete_candidates.is_empty()
&& self.stale_dependencies.is_empty()
&& self.reorder_candidates.is_empty()
&& self.warnings.is_empty()
{
println!("No backlog grooming candidates were found.");
}
println!(
"Backlog groom summary: {} merge, {} obsolete, {} stale dependency, {} reorder candidate(s).",
self.merge_candidates.len(),
self.obsolete_candidates.len(),
self.stale_dependencies.len(),
self.reorder_candidates.len()
);
}
}
}
#[derive(Debug, Clone)]
struct SourceBacklogItem {
ccd_id: u64,
title: String,
summary: String,
kind: String,
section: String,
status: String,
effort: String,
impact: String,
claimed_by: Option<String>,
depends_on: Vec<u64>,
}
#[derive(Debug, Deserialize)]
struct GhIssueRecord {
number: u64,
title: String,
body: String,
url: String,
state: String,
#[serde(default)]
labels: Vec<GhLabelRecord>,
#[serde(default)]
assignees: Vec<GhUserRecord>,
}
#[derive(Debug, Deserialize)]
struct GhLabelRecord {
name: String,
}
#[derive(Debug, Deserialize)]
struct GhUserRecord {
login: String,
}
#[derive(Debug, Deserialize)]
struct GlabIssueRecord {
iid: u64,
title: String,
#[serde(default)]
description: Option<String>,
web_url: String,
state: String,
#[serde(default)]
issue_type: Option<String>,
#[serde(default)]
labels: Vec<String>,
#[serde(default)]
assignees: Vec<GlabUserRecord>,
}
#[derive(Debug, Deserialize)]
struct GlabUserRecord {
username: String,
}
fn builtin_capabilities(provider: &str) -> BacklogAdapterCapabilities {
match provider {
"github-issues" => BacklogAdapterCapabilities::read_only(),
"gitlab-issues" => BacklogAdapterCapabilities::read_only(),
"local-markdown" => BacklogAdapterCapabilities::read_only(),
_ => BacklogAdapterCapabilities::none(),
}
}
fn resolve_capabilities(adapter: &ResolvedAdapter) -> BacklogAdapterCapabilities {
match adapter {
ResolvedAdapter::Builtin { provider, .. } => match provider {
adapter::BuiltinProvider::GithubIssues => builtin_capabilities("github-issues"),
adapter::BuiltinProvider::GitlabIssues => builtin_capabilities("gitlab-issues"),
adapter::BuiltinProvider::LocalMarkdown => builtin_capabilities("local-markdown"),
},
ResolvedAdapter::ExternalCommand(cfg) => {
if cfg.capabilities.is_empty() {
return BacklogAdapterCapabilities::none();
}
let ops: std::collections::HashSet<&str> =
cfg.capabilities.iter().map(String::as_str).collect();
BacklogAdapterCapabilities {
pull: ops.contains("pull"),
next: ops.contains("next"),
claim: ops.contains("claim"),
set_status: ops.contains("set-status"),
complete: ops.contains("complete"),
promote_next: ops.contains("promote-next"),
resolve_refs: ops.contains("resolve-refs") || ops.contains("resolve_refs"),
}
}
}
}
fn is_local_markdown_provider(provider: &str) -> bool {
provider.eq_ignore_ascii_case("local-markdown")
}
fn is_github_provider(provider: &str) -> bool {
provider.eq_ignore_ascii_case("github-issues")
}
fn is_gitlab_provider(provider: &str) -> bool {
provider.eq_ignore_ascii_case("gitlab-issues")
}
pub(crate) fn pull_command_hint(provider: Option<&str>, repo: Option<&str>) -> String {
match provider {
Some(provider) if is_local_markdown_provider(provider) => {
"ccd backlog pull --path .".to_owned()
}
Some(provider) if is_github_provider(provider) => format!(
"ccd backlog pull --path . --repo {}",
repo.unwrap_or("owner/name")
),
Some(provider) if is_gitlab_provider(provider) => format!(
"ccd backlog pull --path . --repo {}",
repo.unwrap_or("group/project")
),
_ => "ccd backlog pull --path .".to_owned(),
}
}
fn pull_hint_for_selected_adapter(layout: &StateLayout, repo_root: &Path) -> String {
let Ok((resolved, inputs)) = adapter::resolve_adapter(layout, repo_root, None) else {
return pull_command_hint(None, None);
};
match &resolved {
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::LocalMarkdown,
..
} => pull_command_hint(Some("local-markdown"), None),
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GithubIssues,
..
} => pull_command_hint(
Some("github-issues"),
inputs.get("repo").map(|s| s.as_str()),
),
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GitlabIssues,
..
} => pull_command_hint(
Some("gitlab-issues"),
inputs.get("repo").map(|s| s.as_str()),
),
_ => pull_command_hint(None, None),
}
}
#[derive(Serialize)]
pub struct BacklogNeutralPullReport {
command: &'static str,
ok: bool,
profile: String,
adapter: String,
repo: String,
cache: GitHubBacklogCacheView,
issue_count: usize,
active_items: usize,
}
#[derive(Serialize)]
pub struct BacklogScopeReport {
command: &'static str,
ok: bool,
profile: String,
repo: String,
priority_label: String,
requested_issues: Vec<u64>,
updated_issues: Vec<u64>,
unchanged_issues: Vec<u64>,
created_labels: Vec<String>,
cache: GitHubBacklogCacheView,
}
impl CommandReport for BacklogNeutralPullReport {
fn exit_code(&self) -> ExitCode {
if self.ok {
ExitCode::SUCCESS
} else {
ExitCode::FAILURE
}
}
fn render_text(&self) {
println!(
"Cached backlog for {} via adapter `{}` in profile {}.",
self.repo, self.adapter, self.profile
);
println!(
"Cache: {} ({})",
self.cache.path,
render_cache_status_summary(&self.cache, self.issue_count)
);
println!("Rendered view: {}", self.cache.rendered_path);
println!(
"Open issues: {}, queue-scoped: {}, queue candidates: {}, auto-selectable: {}.",
self.cache.queue_summary.open_issues,
self.cache.queue_summary.queue_scoped,
self.cache.queue_summary.queue_candidates,
self.cache.queue_summary.auto_selectable
);
if self.cache.queue_summary.policy_conflicts > 0 {
println!(
"Queue-policy conflicts: {}.",
self.cache.queue_summary.policy_conflicts
);
}
render_zero_queue_scope_hint(&self.cache);
for item in &self.cache.active_items {
println!(
"- [{}] {} ({})",
item.display_ref(),
item.title,
item.status
);
}
}
}
impl CommandReport for BacklogScopeReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
println!(
"Scoped {} GitHub issue(s) to `{}` in {}.",
self.updated_issues.len(),
self.priority_label,
self.repo
);
if !self.unchanged_issues.is_empty() {
println!(
"Already scoped: {}.",
render_issue_number_list(&self.unchanged_issues)
);
}
if !self.created_labels.is_empty() {
println!("Created labels: {}.", self.created_labels.join(", "));
}
println!(
"Cache: {} ({})",
self.cache.path,
render_cache_status_summary(&self.cache, self.cache.queue_summary.open_issues)
);
}
}
pub fn pull(
repo_root: &Path,
explicit_profile: Option<&str>,
repo_override: Option<&str>,
) -> Result<BacklogNeutralPullReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
ensure_profile_exists(&layout)?;
maybe_activate_github_backlog_binding(&layout, repo_root, repo_override)?;
let (resolved, inputs) = adapter::resolve_adapter(&layout, repo_root, repo_override)?;
if matches!(
&resolved,
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GithubIssues,
..
}
) {
let repo = inputs.get("repo").cloned().ok_or_else(|| {
anyhow::anyhow!(
"backlog adapter `{}` requires a `repo` input (e.g. `owner/name`). \
Pass `--repo owner/name` or set `repo = \"owner/name\"` in the repo \
overlay `config.toml` `[[extensions]]` backlog entry.",
resolved.name()
)
})?;
let github_repo = validate_github_repo(&repo)?;
let cache = fetch_cache(repo_root, github_repo)?;
backlog::write_cache(&layout, &cache)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
return Ok(BacklogNeutralPullReport {
command: "backlog pull",
ok: true,
profile: profile.to_string(),
adapter: resolved.name().to_owned(),
repo: github_repo.to_owned(),
issue_count: cache.items.len(),
active_items: cache.active_items().len(),
cache: cache_view,
});
}
if matches!(
&resolved,
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GitlabIssues,
..
}
) {
let repo = inputs.get("repo").cloned().ok_or_else(|| {
anyhow::anyhow!(
"backlog adapter `{}` requires a `repo` input (e.g. `group/project` or `https://gitlab.example.com/group/project`). \
Pass `--repo ...` or set `repo = \"...\"` in the repo overlay `config.toml` `[[extensions]]` backlog entry.",
resolved.name()
)
})?;
let gitlab_project = validate_gitlab_project_locator(&repo)?.to_owned();
let cache = fetch_gitlab_cache(repo_root, &gitlab_project)?;
backlog::write_cache(&layout, &cache)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
return Ok(BacklogNeutralPullReport {
command: "backlog pull",
ok: true,
profile: profile.to_string(),
adapter: resolved.name().to_owned(),
repo: gitlab_project.to_owned(),
issue_count: cache.items.len(),
active_items: cache.active_items().len(),
cache: cache_view,
});
}
if matches!(
&resolved,
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::LocalMarkdown,
..
}
) {
let backlog_path = repo_root.join("backlog.md");
let cache = fetch_local_markdown_cache(repo_root)?;
backlog::write_cache(&layout, &cache)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
return Ok(BacklogNeutralPullReport {
command: "backlog pull",
ok: true,
profile: profile.to_string(),
adapter: resolved.name().to_owned(),
repo: backlog_path.display().to_string(),
issue_count: cache.items.len(),
active_items: cache.active_items().len(),
cache: cache_view,
});
}
let marker = crate::repo::marker::load(repo_root).ok().flatten();
let locality_id = marker
.as_ref()
.map(|m| m.locality_id.clone())
.unwrap_or_default();
let request = adapter::AdapterRequest::new(
"backlog.pull".to_owned(),
profile.as_str().to_owned(),
repo_root.to_path_buf(),
locality_id,
inputs.clone(),
);
let response = adapter::execute_adapter_operation(&resolved, &request)?;
let items = response.items.unwrap_or_default();
let repo_display = inputs
.get("repo")
.cloned()
.unwrap_or_else(|| resolved.name().to_owned());
let fetched_at = response.fetched_at_epoch_s.unwrap_or(0);
let mut cache =
backlog::GitHubBacklogCache::new(repo_display.clone(), fetched_at, items.clone());
cache.provider = response.provider.clone();
cache.refresh_dispatch_states();
backlog::write_cache(&layout, &cache)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
Ok(BacklogNeutralPullReport {
command: "backlog pull",
ok: true,
profile: profile.to_string(),
adapter: resolved.name().to_owned(),
repo: repo_display,
issue_count: items.len(),
active_items: cache.active_items().len(),
cache: cache_view,
})
}
#[derive(Serialize)]
pub struct BacklogNextReport {
command: &'static str,
ok: bool,
profile: String,
#[serde(skip_serializing_if = "Option::is_none")]
dispatch: Option<GitHubBacklogDispatchView>,
item: Option<BacklogNextItem>,
}
#[derive(Serialize)]
pub struct BacklogNextItem {
ccd_id: u64,
backlog_ref: BacklogRef,
github_issue_number: u64,
display_ref: String,
content_trust: ContentTrust,
title: String,
section: String,
status: String,
#[serde(skip_serializing_if = "Option::is_none")]
priority_rank: Option<u64>,
url: String,
}
impl CommandReport for BacklogNextReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
match &self.item {
Some(item) => println!(
"[{}] {} (rank {:?}) — {}",
item.display_ref, item.title, item.priority_rank, item.url
),
None => match &self.dispatch {
Some(dispatch) => println!(
"No auto-selected item: {}{}",
dispatch.status,
dispatch
.reason
.as_deref()
.map(|reason| format!(" ({reason})"))
.unwrap_or_default()
),
None => println!("No ready items available in the backlog cache."),
},
}
}
}
pub fn next(repo_root: &Path, explicit_profile: Option<&str>) -> Result<BacklogNextReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
let cache = backlog::load_cache(&layout)?;
let dispatch = cache
.as_ref()
.and_then(|c| backlog::dispatch_summary_view(c, &BTreeSet::new(), &BTreeSet::new()));
let item = dispatch.as_ref().and_then(|view| {
view.selected.as_ref().map(|selected| BacklogNextItem {
ccd_id: selected.ccd_id,
backlog_ref: selected.backlog_ref.clone(),
github_issue_number: selected.github_issue_number,
display_ref: selected.display_ref(),
content_trust: selected.content_trust,
title: selected.title.clone(),
section: selected.section.clone(),
status: selected.status.clone(),
priority_rank: selected.priority_rank,
url: selected.url.clone(),
})
});
Ok(BacklogNextReport {
command: "backlog next",
ok: true,
profile: profile.to_string(),
dispatch,
item,
})
}
pub fn scope(
repo_root: &Path,
explicit_profile: Option<&str>,
repo_override: Option<&str>,
issues: &str,
priority: &str,
) -> Result<BacklogScopeReport> {
let requested_issues = parse_issue_selector(issues)?;
let priority_bucket = parse_priority_bucket(priority)?;
let priority_label = priority_bucket.as_label().to_owned();
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
ensure_profile_exists(&layout)?;
maybe_activate_github_backlog_binding(&layout, repo_root, repo_override)?;
let (resolved, inputs) = adapter::resolve_adapter(&layout, repo_root, repo_override)?;
let github_repo = match &resolved {
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GithubIssues,
..
} => inputs.get("repo").cloned().ok_or_else(|| {
anyhow::anyhow!(
"backlog adapter `{}` requires a `repo` input (e.g. `owner/name`). Pass `--repo owner/name` or set `repo = \"owner/name\"` in the repo overlay `config.toml` `[[extensions]]` backlog entry.",
resolved.name()
)
})?,
_ => bail!(
"`ccd backlog scope` currently supports only the built-in `github-issues` adapter; active adapter is `{}`",
resolved.name()
),
};
let github_repo = validate_github_repo(&github_repo)?.to_owned();
let initial_cache = fetch_cache(repo_root, &github_repo)?;
let indexed = initial_cache
.items
.iter()
.map(|item| (item.github_issue_number, item))
.collect::<BTreeMap<_, _>>();
let missing = requested_issues
.iter()
.copied()
.filter(|issue_number| !indexed.contains_key(issue_number))
.collect::<Vec<_>>();
if !missing.is_empty() {
bail!(
"GitHub repo `{github_repo}` does not contain issue(s): {}",
render_issue_number_list(&missing)
);
}
let closed = requested_issues
.iter()
.copied()
.filter(|issue_number| {
indexed
.get(issue_number)
.map(|item| !item.is_active())
.unwrap_or(false)
})
.collect::<Vec<_>>();
if !closed.is_empty() {
bail!(
"`ccd backlog scope` only supports open GitHub issues; closed issue(s): {}",
render_issue_number_list(&closed)
);
}
let mut created_labels = Vec::new();
if ensure_github_label_exists(repo_root, &github_repo, priority_bucket)? {
created_labels.push(priority_label.clone());
}
let mut updated_issues = Vec::new();
let mut unchanged_issues = Vec::new();
for issue_number in &requested_issues {
let item = indexed.get(issue_number).expect("validated issue");
let existing_priority_labels = item
.labels
.iter()
.filter(|label| is_priority_label(label))
.cloned()
.collect::<Vec<_>>();
let already_scoped = existing_priority_labels.len() == 1
&& existing_priority_labels.first().map(String::as_str)
== Some(priority_label.as_str());
if already_scoped {
unchanged_issues.push(*issue_number);
continue;
}
let mut args = vec![
"issue".to_owned(),
"edit".to_owned(),
issue_number.to_string(),
"--repo".to_owned(),
github_repo.clone(),
];
for label in existing_priority_labels
.iter()
.filter(|label| label.as_str() != priority_label)
{
args.push("--remove-label".to_owned());
args.push(label.clone());
}
args.push("--add-label".to_owned());
args.push(priority_label.clone());
run_gh(repo_root, &args)?;
updated_issues.push(*issue_number);
}
let refreshed_cache = fetch_cache(repo_root, &github_repo)?;
backlog::write_cache(&layout, &refreshed_cache)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
Ok(BacklogScopeReport {
command: "backlog scope",
ok: true,
profile: profile.to_string(),
repo: github_repo,
priority_label,
requested_issues,
updated_issues,
unchanged_issues,
created_labels,
cache: cache_view,
})
}
#[derive(Serialize)]
pub struct BacklogMutationReport {
command: &'static str,
ok: bool,
profile: String,
ccd_id: u64,
dry_run: bool,
#[serde(skip_serializing_if = "Option::is_none")]
status: Option<String>,
}
impl CommandReport for BacklogMutationReport {
fn exit_code(&self) -> ExitCode {
if self.ok {
ExitCode::SUCCESS
} else {
ExitCode::FAILURE
}
}
fn render_text(&self) {
if self.ok && self.dry_run {
println!(
"[ccd#{}] preview only. Pass --write to apply {}.",
self.ccd_id, self.command
);
if let Some(status) = &self.status {
println!("Target status: {status}");
}
} else if self.ok {
println!("[ccd#{}] mutation applied.", self.ccd_id);
if let Some(status) = &self.status {
println!("Status: {status}");
}
} else {
eprintln!("Backlog mutation failed.");
}
}
}
#[derive(Serialize)]
pub struct BacklogAdapterEntry {
pub name: String,
pub kind: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
pub capabilities: BacklogAdapterCapabilities,
}
#[derive(Serialize)]
pub struct BacklogAdaptersReport {
command: &'static str,
ok: bool,
profile: String,
active_adapter: String,
active_adapter_source: &'static str,
adapters: Vec<BacklogAdapterEntry>,
}
impl CommandReport for BacklogAdaptersReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
println!("Registered backlog adapters for profile {}:", self.profile);
for entry in &self.adapters {
let ops = {
let ops = entry.capabilities.supported_ops();
if ops.is_empty() {
"none".to_owned()
} else {
ops.join(", ")
}
};
let active_marker = if entry.name == self.active_adapter {
" *"
} else {
""
};
println!(
" {}{} ({}) — {}",
entry.name, active_marker, entry.kind, ops
);
}
println!(
"Active adapter: {} (from {})",
self.active_adapter, self.active_adapter_source
);
}
}
pub fn claim(
repo_root: &Path,
explicit_profile: Option<&str>,
ccd_id: u64,
dry_run: bool,
) -> Result<BacklogMutationReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
let (resolved, inputs) = adapter::resolve_adapter(&layout, repo_root, None)?;
let caps = resolve_capabilities(&resolved);
if !caps.claim {
bail!(
"ccd backlog claim cannot write through adapter \"{}\" because it does not support operation \"backlog.claim\"; switch to an adapter that declares claim capability or use a supported read-only backlog command",
resolved.name()
);
}
let marker = crate::repo::marker::load(repo_root).ok().flatten();
let locality_id = marker
.as_ref()
.map(|m| m.locality_id.clone())
.unwrap_or_default();
let mut op_inputs = inputs.clone();
op_inputs.insert("ccd_id".to_owned(), ccd_id.to_string());
let request = adapter::AdapterRequest::new(
"backlog.claim".to_owned(),
profile.as_str().to_owned(),
repo_root.to_path_buf(),
locality_id,
op_inputs,
);
if !dry_run {
adapter::execute_adapter_operation(&resolved, &request)?;
}
Ok(BacklogMutationReport {
command: "backlog claim",
ok: true,
profile: profile.to_string(),
ccd_id,
dry_run,
status: None,
})
}
pub fn set_status(
repo_root: &Path,
explicit_profile: Option<&str>,
ccd_id: u64,
status: &str,
dry_run: bool,
) -> Result<BacklogMutationReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
let (resolved, inputs) = adapter::resolve_adapter(&layout, repo_root, None)?;
let caps = resolve_capabilities(&resolved);
if !caps.set_status {
bail!(
"ccd backlog set-status cannot write through adapter \"{}\" because it does not support operation \"backlog.set_status\"; switch to an adapter that declares set-status capability or use a supported read-only backlog command",
resolved.name()
);
}
let marker = crate::repo::marker::load(repo_root).ok().flatten();
let locality_id = marker
.as_ref()
.map(|m| m.locality_id.clone())
.unwrap_or_default();
let mut op_inputs = inputs.clone();
op_inputs.insert("ccd_id".to_owned(), ccd_id.to_string());
op_inputs.insert("status".to_owned(), status.to_owned());
let request = adapter::AdapterRequest::new(
"backlog.set_status".to_owned(),
profile.as_str().to_owned(),
repo_root.to_path_buf(),
locality_id,
op_inputs,
);
if !dry_run {
adapter::execute_adapter_operation(&resolved, &request)?;
}
Ok(BacklogMutationReport {
command: "backlog set-status",
ok: true,
profile: profile.to_string(),
ccd_id,
dry_run,
status: Some(status.to_owned()),
})
}
pub fn complete(
repo_root: &Path,
explicit_profile: Option<&str>,
ccd_id: u64,
dry_run: bool,
) -> Result<BacklogMutationReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
let (resolved, inputs) = adapter::resolve_adapter(&layout, repo_root, None)?;
let caps = resolve_capabilities(&resolved);
if !caps.complete {
bail!(
"ccd backlog complete cannot write through adapter \"{}\" because it does not support operation \"backlog.complete\"; switch to an adapter that declares complete capability or use a supported read-only backlog command",
resolved.name()
);
}
let marker = crate::repo::marker::load(repo_root).ok().flatten();
let locality_id = marker
.as_ref()
.map(|m| m.locality_id.clone())
.unwrap_or_default();
let mut op_inputs = inputs.clone();
op_inputs.insert("ccd_id".to_owned(), ccd_id.to_string());
let request = adapter::AdapterRequest::new(
"backlog.complete".to_owned(),
profile.as_str().to_owned(),
repo_root.to_path_buf(),
locality_id,
op_inputs,
);
if !dry_run {
adapter::execute_adapter_operation(&resolved, &request)?;
}
Ok(BacklogMutationReport {
command: "backlog complete",
ok: true,
profile: profile.to_string(),
ccd_id,
dry_run,
status: None,
})
}
pub fn adapters(repo_root: &Path, explicit_profile: Option<&str>) -> Result<BacklogAdaptersReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
let profile_config = backlog_config::load_profile_config(&layout)?;
let (active_adapter, _inputs) = adapter::resolve_adapter(&layout, repo_root, None)?;
let active_name = active_adapter.name().to_owned();
let source = active_adapter.source_str();
let mut entries: Vec<BacklogAdapterEntry> = profile_config
.adapters
.iter()
.map(|(name, cfg)| {
adapter::validate_registered_adapter(name, cfg)?;
let resolved =
adapter::build_resolved_adapter(name, Some(cfg), AdapterSource::ProfileDefault);
Ok(BacklogAdapterEntry {
name: name.clone(),
kind: cfg.kind.as_config_str().to_owned(),
provider: cfg
.provider
.as_ref()
.map(|provider| provider.as_config_str().to_owned()),
capabilities: resolve_capabilities(&resolved),
})
})
.collect::<Result<_>>()?;
if !entries.iter().any(|e| e.name == active_name) {
let fallback =
adapter::build_resolved_adapter(&active_name, None, AdapterSource::BuiltinFallback);
entries.insert(
0,
BacklogAdapterEntry {
capabilities: resolve_capabilities(&fallback),
name: active_name.clone(),
kind: "builtin".to_owned(),
provider: if fallback.is_github_adapter() {
Some("github-issues".to_owned())
} else {
Some(active_name.clone())
},
},
);
}
Ok(BacklogAdaptersReport {
command: "backlog adapters",
ok: true,
profile: profile.to_string(),
active_adapter: active_name,
active_adapter_source: source,
adapters: entries,
})
}
#[derive(Serialize)]
pub struct BacklogPromoteNextItem {
pub ccd_id: u64,
pub content_trust: ContentTrust,
pub title: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub priority_rank: Option<u64>,
pub url: String,
}
#[derive(Serialize)]
pub struct BacklogPromoteNextExcludedItem {
pub ccd_id: u64,
pub content_trust: ContentTrust,
pub title: String,
pub reason: &'static str,
}
#[derive(Serialize)]
pub struct BacklogPromoteNextReport {
command: &'static str,
ok: bool,
dry_run: bool,
profile: String,
batch_size: usize,
group_key: &'static str,
group_reason: String,
selected: Vec<BacklogPromoteNextItem>,
excluded: Vec<BacklogPromoteNextExcludedItem>,
}
impl CommandReport for BacklogPromoteNextReport {
fn exit_code(&self) -> ExitCode {
ExitCode::SUCCESS
}
fn render_text(&self) {
let mode = if self.dry_run { "dry-run" } else { "write" };
println!(
"Promote-next batch ({}) — group: {} \"{}\"",
mode, self.group_key, self.group_reason
);
if self.selected.is_empty() {
println!(" (no ready items eligible for promotion)");
} else {
for item in &self.selected {
let rank = item
.priority_rank
.map(|r| format!("rank {r}"))
.unwrap_or_else(|| "no rank".to_owned());
println!(" [ccd#{}] {} ({})", item.ccd_id, item.title, rank);
}
}
if !self.excluded.is_empty() {
println!("\nExcluded ready items:");
for ex in &self.excluded {
println!(" [ccd#{}] {} — {}", ex.ccd_id, ex.title, ex.reason);
}
}
if self.dry_run {
println!(
"\nWrite mode: pass --write to apply (status: ready \u{2192} in-progress via active adapter)."
);
}
}
}
pub fn promote_next(
repo_root: &Path,
explicit_profile: Option<&str>,
batch_size: usize,
dry_run: bool,
) -> Result<BacklogPromoteNextReport> {
if batch_size == 0 {
bail!("--batch-size must be >= 1");
}
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
if !dry_run {
let (resolved, _inputs) = adapter::resolve_adapter(&layout, repo_root, None)?;
let caps = resolve_capabilities(&resolved);
if !caps.promote_next {
let ops = caps.supported_ops();
bail!(
"adapter \"{}\" does not support `promote-next`. Supported operations: {}.",
resolved.name(),
if ops.is_empty() {
"none (declare capabilities in profile config.toml)".to_owned()
} else {
ops.join(", ")
}
);
}
let Some(cache) = backlog::load_cache(&layout)? else {
bail!(
"no workspace-local backlog cache is present at {}; run `ccd backlog pull --path . --repo owner/name` first",
crate::extensions::work_queue::work_queue_cache_path(&layout).display()
);
};
let batch = backlog::promote_next_batch(&cache, batch_size);
if batch.selected.is_empty() {
return Ok(BacklogPromoteNextReport {
command: "backlog promote-next",
ok: true,
dry_run: false,
profile: profile.to_string(),
batch_size,
group_key: batch.group_key.kind_str(),
group_reason: batch.group_reason,
selected: Vec::new(),
excluded: batch
.excluded
.iter()
.map(|ex| BacklogPromoteNextExcludedItem {
ccd_id: ex.ccd_id,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog::display_title(&ex.title),
reason: ex.reason,
})
.collect(),
});
}
let marker = crate::repo::marker::load(repo_root).ok().flatten();
let locality_id = marker
.as_ref()
.map(|m| m.locality_id.clone())
.unwrap_or_default();
let (_, inputs) = adapter::resolve_adapter(&layout, repo_root, None)?;
for item in &batch.selected {
let request = adapter::AdapterRequest::new(
"backlog.set_status".to_owned(),
layout.profile().as_str().to_owned(),
repo_root.to_path_buf(),
locality_id.clone(),
{
let mut req_inputs = inputs.clone();
req_inputs.insert("ccd_id".to_owned(), item.ccd_id.to_string());
req_inputs.insert("status".to_owned(), "in-progress".to_owned());
req_inputs
},
);
adapter::execute_adapter_operation(&resolved, &request)?;
}
let selected = batch
.selected
.iter()
.map(|item| BacklogPromoteNextItem {
ccd_id: item.ccd_id,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog::display_title(&item.title),
priority_rank: item.priority_rank,
url: item.url.clone(),
})
.collect();
let excluded = batch
.excluded
.iter()
.map(|ex| BacklogPromoteNextExcludedItem {
ccd_id: ex.ccd_id,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog::display_title(&ex.title),
reason: ex.reason,
})
.collect();
return Ok(BacklogPromoteNextReport {
command: "backlog promote-next",
ok: true,
dry_run: false,
profile: profile.to_string(),
batch_size,
group_key: batch.group_key.kind_str(),
group_reason: batch.group_reason,
selected,
excluded,
});
}
let Some(cache) = backlog::load_cache(&layout)? else {
let pull_hint = pull_hint_for_selected_adapter(&layout, repo_root);
bail!(
"no workspace-local backlog cache is present at {}; run `{pull_hint}` first",
crate::extensions::work_queue::work_queue_cache_path(&layout).display(),
);
};
let batch = backlog::promote_next_batch(&cache, batch_size);
let selected = batch
.selected
.iter()
.map(|item| BacklogPromoteNextItem {
ccd_id: item.ccd_id,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog::display_title(&item.title),
priority_rank: item.priority_rank,
url: item.url.clone(),
})
.collect();
let excluded = batch
.excluded
.iter()
.map(|ex| BacklogPromoteNextExcludedItem {
ccd_id: ex.ccd_id,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog::display_title(&ex.title),
reason: ex.reason,
})
.collect();
Ok(BacklogPromoteNextReport {
command: "backlog promote-next",
ok: true,
dry_run,
profile: profile.to_string(),
batch_size,
group_key: batch.group_key.kind_str(),
group_reason: batch.group_reason,
selected,
excluded,
})
}
pub fn pull_github(
repo_root: &Path,
explicit_profile: Option<&str>,
github_repo: &str,
) -> Result<BacklogPullReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
ensure_profile_exists(&layout)?;
let github_repo = validate_github_repo(github_repo)?;
let cache = fetch_cache(repo_root, github_repo)?;
backlog::write_cache(&layout, &cache)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
Ok(BacklogPullReport {
command: "backlog pull-github",
ok: true,
profile: profile.to_string(),
repo: github_repo.to_owned(),
issue_count: cache.items.len(),
active_items: cache.active_items().len(),
cache: cache_view,
})
}
pub fn bootstrap_github(
repo_root: &Path,
explicit_profile: Option<&str>,
github_repo: &str,
adopt_existing: bool,
) -> Result<BacklogBootstrapReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
ensure_profile_exists(&layout)?;
let github_repo = validate_github_repo(github_repo)?;
let backlog_path = repo_root.join("backlog.md");
if !backlog_path.is_file() {
bail!(
"{}",
bootstrap_github_migration_only_message(
&backlog_path,
github_repo,
&format!("{} does not exist", backlog_path.display()),
)
);
}
let source_items = parse_source_backlog(&backlog_path)?;
if source_items.is_empty() {
bail!(
"{}",
bootstrap_github_migration_only_message(
&backlog_path,
github_repo,
&format!(
"no active backlog items were found in {}",
backlog_path.display()
),
)
);
}
let existing = fetch_canonical_issues(repo_root, github_repo)?;
let existing_by_id = index_items_by_ccd_id(&existing, github_repo)?;
let conflicting = source_items
.iter()
.filter(|item| existing_by_id.contains_key(&item.ccd_id))
.map(|item| item.ccd_id)
.collect::<Vec<_>>();
if !adopt_existing && !conflicting.is_empty() {
let rendered = conflicting
.iter()
.map(|id| format!("ccd#{id}"))
.collect::<Vec<_>>()
.join(", ");
bail!(
"canonical GitHub issues already exist for {rendered}; rerun with `--adopt-existing` to reuse them"
);
}
for item in &source_items {
if existing_by_id.contains_key(&item.ccd_id) {
continue;
}
create_github_issue(repo_root, github_repo, item)?;
}
let cache = fetch_cache(repo_root, github_repo)?;
backlog::write_cache(&layout, &cache)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
let cache_by_id = index_items_by_ccd_id(&cache.items, github_repo)?;
let mut created = Vec::new();
let mut adopted = Vec::new();
for item in &source_items {
let issue = cache_by_id.get(&item.ccd_id).ok_or_else(|| {
anyhow::anyhow!(
"canonical issue for ccd#{} is missing after bootstrap refresh",
item.ccd_id
)
})?;
let issue_ref = BacklogIssueRef {
ccd_id: issue.ccd_id,
github_issue_number: issue.github_issue_number,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog::display_title(&issue.title),
url: issue.url.clone(),
};
if existing_by_id.contains_key(&item.ccd_id) {
adopted.push(issue_ref);
} else {
created.push(issue_ref);
}
}
Ok(BacklogBootstrapReport {
command: "backlog bootstrap-github",
ok: true,
profile: profile.to_string(),
repo: github_repo.to_owned(),
source_backlog: backlog_path.display().to_string(),
issue_count: cache.items.len(),
active_items: cache.active_items().len(),
created,
adopted,
cache: cache_view,
})
}
pub fn lint(repo_root: &Path, explicit_profile: Option<&str>) -> Result<BacklogLintReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
ensure_profile_exists(&layout)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
let Some(cache) = backlog::load_cache(&layout)? else {
let pull_hint = pull_hint_for_selected_adapter(&layout, repo_root);
let diagnostics = vec![fail(format!(
"no workspace-local backlog cache is present at {}; run `{pull_hint}` first",
cache_view.path,
))];
return Ok(BacklogLintReport {
command: "backlog lint",
ok: false,
profile: profile.to_string(),
cache: cache_view,
issue_count: 0,
active_items: 0,
failures: diagnostics.len(),
warnings: 0,
diagnostics,
});
};
let mut diagnostics = validate_cache(&cache);
if backlog::is_stale(&cache, backlog::now_epoch_s()?) {
let pull_hint = pull_command_hint(Some(cache.provider.as_str()), Some(cache.repo.as_str()));
diagnostics.push(warn(format!(
"workspace-local backlog cache at {} is stale; refresh it with `{pull_hint}` before relying on queue order",
cache_view.path,
)));
}
let failures = diagnostics
.iter()
.filter(|diagnostic| diagnostic.severity == "error")
.count();
let warnings = diagnostics
.iter()
.filter(|diagnostic| diagnostic.severity == "warning")
.count();
Ok(BacklogLintReport {
command: "backlog lint",
ok: failures == 0,
profile: profile.to_string(),
cache: cache_view,
issue_count: cache.items.len(),
active_items: cache.active_items().len(),
failures,
warnings,
diagnostics,
})
}
pub fn groom(repo_root: &Path, explicit_profile: Option<&str>) -> Result<BacklogGroomReport> {
let profile = profile::resolve(explicit_profile)?;
let layout = StateLayout::resolve(repo_root, profile.clone())?;
ensure_profile_exists(&layout)?;
let cache_view = backlog::load_cache_view(&layout, 12)?;
let Some(cache) = backlog::load_cache(&layout)? else {
let pull_hint = pull_hint_for_selected_adapter(&layout, repo_root);
let errors = vec![format!(
"no workspace-local backlog cache is present at {}; run `{pull_hint}` first",
cache_view.path,
)];
return Ok(BacklogGroomReport {
command: "backlog groom",
ok: false,
profile: profile.to_string(),
cache: cache_view,
issue_count: 0,
active_items: 0,
merge_candidates: Vec::new(),
obsolete_candidates: Vec::new(),
stale_dependencies: Vec::new(),
reorder_candidates: Vec::new(),
warnings: Vec::new(),
errors,
});
};
let mut warnings = Vec::new();
if backlog::is_stale(&cache, backlog::now_epoch_s()?) {
let pull_hint = pull_command_hint(Some(cache.provider.as_str()), Some(cache.repo.as_str()));
warnings.push(format!(
"workspace-local backlog cache at {} is stale; refresh it with `{pull_hint}` before making queue decisions",
cache_view.path,
));
}
let (merge_candidates, obsolete_candidates, stale_dependencies, reorder_candidates) =
analyze_grooming_candidates(&cache);
Ok(BacklogGroomReport {
command: "backlog groom",
ok: true,
profile: profile.to_string(),
cache: cache_view,
issue_count: cache.items.len(),
active_items: cache.active_items().len(),
merge_candidates,
obsolete_candidates,
stale_dependencies,
reorder_candidates,
warnings,
errors: Vec::new(),
})
}
fn ensure_profile_exists(layout: &StateLayout) -> Result<()> {
let profile_root = layout.profile_root();
if profile_root.is_dir() {
return Ok(());
}
bail!(
"profile `{}` does not exist at {}; bootstrap it with `ccd attach` before using backlog commands",
layout.profile(),
profile_root.display()
)
}
fn maybe_activate_github_backlog_binding(
layout: &StateLayout,
repo_root: &Path,
repo_override: Option<&str>,
) -> Result<()> {
let (resolved, inputs) = adapter::resolve_adapter(layout, repo_root, None)?;
let github_repo = if let Some(repo) = repo_override {
let github_repo = validate_github_repo(repo)?.to_owned();
match &resolved {
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GithubIssues,
..
} => github_repo,
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GitlabIssues,
..
} => return Ok(()),
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::LocalMarkdown,
..
} => {
if resolved.source() != AdapterSource::BuiltinFallback
|| repo_root.join("backlog.md").is_file()
{
return Ok(());
}
github_repo
}
ResolvedAdapter::ExternalCommand(_) => return Ok(()),
}
} else {
match &resolved {
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GithubIssues,
..
} => {
if inputs.contains_key("repo") {
return Ok(());
}
detect_github_repo_from_origin(repo_root)?
}
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::GitlabIssues,
..
} => return Ok(()),
ResolvedAdapter::Builtin {
provider: adapter::BuiltinProvider::LocalMarkdown,
..
} => {
if resolved.source() != AdapterSource::BuiltinFallback
|| repo_root.join("backlog.md").is_file()
{
return Ok(());
}
let Some(repo) = maybe_detect_github_repo_from_origin(repo_root)? else {
return Ok(());
};
repo
}
ResolvedAdapter::ExternalCommand(_) => return Ok(()),
}
};
persist_github_backlog_binding(layout, repo_root, &github_repo)
}
fn detect_github_repo_from_origin(repo_root: &Path) -> Result<String> {
let Some(origin_url) = git_paths::origin_remote_url(repo_root)? else {
bail!(
"could not activate the GitHub backlog from `origin` because no `origin` remote is configured; pass `--repo owner/name` explicitly"
);
};
if let Some(repo) = git_paths::parse_github_owner_repo(&origin_url) {
return Ok(repo);
}
if origin_url.contains("github.com") {
bail!(
"could not activate the GitHub backlog from `origin` because `{origin_url}` is ambiguous; pass `--repo owner/name` explicitly"
);
}
bail!(
"could not activate the GitHub backlog from `origin` because `{origin_url}` is not a supported github.com remote; pass `--repo owner/name` explicitly"
);
}
fn maybe_detect_github_repo_from_origin(repo_root: &Path) -> Result<Option<String>> {
let Some(origin_url) = git_paths::origin_remote_url(repo_root)? else {
return Ok(None);
};
if let Some(repo) = git_paths::parse_github_owner_repo(&origin_url) {
return Ok(Some(repo));
}
if origin_url.contains("github.com") {
bail!(
"could not activate the GitHub backlog from `origin` because `{origin_url}` is ambiguous; pass `--repo owner/name` explicitly"
);
}
Ok(None)
}
fn persist_github_backlog_binding(
layout: &StateLayout,
repo_root: &Path,
github_repo: &str,
) -> Result<()> {
let Some(marker) = repo_marker::load(repo_root)? else {
bail!(
"GitHub backlog activation requires an attached repo; run `ccd attach --path .` first"
);
};
let config_path = layout.repo_overlay_config_path(&marker.locality_id)?;
let mut root = load_repo_overlay_config_table(&config_path)?;
upsert_github_backlog_binding(&mut root, &config_path, github_repo)?;
let rendered = toml::to_string_pretty(&root)
.with_context(|| format!("failed to serialize {}", config_path.display()))?;
write::replace_text(&config_path, &rendered, None)
.with_context(|| format!("failed to write {}", config_path.display()))?;
Ok(())
}
fn load_repo_overlay_config_table(path: &Path) -> Result<toml::Table> {
let raw = match fs::read_to_string(path) {
Ok(contents) => contents,
Err(error) if error.kind() == std::io::ErrorKind::NotFound => {
return Ok(toml::Table::new());
}
Err(error) => {
return Err(error).with_context(|| format!("failed to read {}", path.display()));
}
};
if raw.trim().is_empty() {
return Ok(toml::Table::new());
}
let value: toml::Value =
toml::from_str(&raw).with_context(|| format!("failed to parse {}", path.display()))?;
value
.as_table()
.cloned()
.ok_or_else(|| anyhow::anyhow!("{} must be a TOML table", path.display()))
}
fn upsert_github_backlog_binding(
root: &mut toml::Table,
path: &Path,
github_repo: &str,
) -> Result<()> {
let extensions = root
.entry("extensions".to_owned())
.or_insert_with(|| toml::Value::Array(Vec::new()));
let array = extensions
.as_array_mut()
.ok_or_else(|| anyhow::anyhow!("{} `extensions` entry must be an array", path.display()))?;
for entry in array.iter_mut() {
let table = entry.as_table_mut().ok_or_else(|| {
anyhow::anyhow!(
"{} `extensions` entries must be TOML tables",
path.display()
)
})?;
if table.get("type").and_then(|value| value.as_str()) != Some("backlog") {
continue;
}
table.insert("type".to_owned(), toml::Value::String("backlog".to_owned()));
table.insert("name".to_owned(), toml::Value::String("github".to_owned()));
table.insert(
"repo".to_owned(),
toml::Value::String(github_repo.to_owned()),
);
for key in ["kind", "provider", "command", "timeout_s", "capabilities"] {
table.remove(key);
}
return Ok(());
}
let mut table = toml::map::Map::new();
table.insert("type".to_owned(), toml::Value::String("backlog".to_owned()));
table.insert("name".to_owned(), toml::Value::String("github".to_owned()));
table.insert(
"repo".to_owned(),
toml::Value::String(github_repo.to_owned()),
);
array.push(toml::Value::Table(table));
Ok(())
}
fn validate_github_repo(value: &str) -> Result<&str> {
let trimmed = value.trim();
if trimmed.is_empty() {
bail!("GitHub repo cannot be empty");
}
let mut parts = trimmed.split('/');
let owner = parts.next().unwrap_or_default();
let repo = parts.next().unwrap_or_default();
if owner.is_empty() || repo.is_empty() || parts.next().is_some() {
bail!("GitHub repo must use the `owner/name` format");
}
Ok(trimmed)
}
fn validate_gitlab_project_locator(value: &str) -> Result<&str> {
let trimmed = value.trim();
if trimmed.is_empty() {
bail!("GitLab project locator cannot be empty");
}
Ok(trimmed)
}
fn bootstrap_github_migration_only_message(
_backlog_path: &Path,
github_repo: &str,
reason: &str,
) -> String {
format!(
"`ccd backlog bootstrap-github` is migration-only: it exports repo-root `backlog.md` items into canonical GitHub Issues. {reason}. If this repo already uses canonical GitHub issues, run `ccd backlog pull --path . --repo {github_repo}` instead to activate the GitHub backlog binding."
)
}
fn validate_cache(cache: &GitHubBacklogCache) -> Vec<BacklogLintDiagnostic> {
let mut diagnostics = Vec::new();
if cache.provider.trim().is_empty() {
diagnostics.push(fail(
"cached backlog provider must not be empty.".to_owned(),
));
}
if cache.repo.trim().is_empty() {
diagnostics.push(fail("cached backlog repo must not be empty.".to_owned()));
}
let mut indexed = BTreeMap::<u64, &GitHubBacklogItem>::new();
for item in &cache.items {
if item.has_ccd_id() {
if let Some(previous) = indexed.insert(item.ccd_id, item) {
diagnostics.push(fail(format!(
"cached backlog contains duplicate `ccd#{}` entries (`#{}` and `#{}`).",
item.ccd_id, previous.github_issue_number, item.github_issue_number
)));
}
}
validate_item_shape(item, cache.provider.as_str(), &mut diagnostics);
}
for item in cache.items.iter().filter(|item| item.is_active()) {
validate_active_dependencies(item, &mut diagnostics);
}
validate_dependency_graph_cycles(cache, &mut diagnostics);
diagnostics
}
fn validate_item_shape(
item: &GitHubBacklogItem,
provider: &str,
diagnostics: &mut Vec<BacklogLintDiagnostic>,
) {
if is_github_provider(provider) || is_gitlab_provider(provider) {
validate_hosted_item_shape(item, provider, diagnostics);
return;
}
if let Some(title_id) = backlog::parse_title_ccd_id(&item.title) {
if title_id != item.ccd_id {
diagnostics.push(fail(format!(
"cached `ccd#{}` title encodes mismatched id `ccd#{}`.",
item.ccd_id, title_id
)));
}
}
validate_allowed_value(item.ccd_id, "kind", &item.kind, ALLOWED_KINDS, diagnostics);
validate_allowed_value(
item.ccd_id,
"status",
&item.status,
ALLOWED_STATUSES,
diagnostics,
);
validate_non_empty_value(item.ccd_id, "effort", &item.effort, diagnostics);
validate_non_empty_value(item.ccd_id, "impact", &item.impact, diagnostics);
if item.section.trim().is_empty() {
diagnostics.push(fail(format!(
"cached `ccd#{}` section must not be empty.",
item.ccd_id
)));
} else if backlog::slugify(&item.section) != item.section {
diagnostics.push(fail(format!(
"cached `ccd#{}` section `{}` must already be a normalized slug.",
item.ccd_id, item.section
)));
}
if item.summary.trim().is_empty() {
diagnostics.push(fail(format!(
"cached `ccd#{}` summary must not be empty.",
item.ccd_id
)));
}
if item.github_state.eq_ignore_ascii_case("closed") && item.status != "done" {
diagnostics.push(fail(format!(
"cached `ccd#{}` is closed but has status `{}` instead of `done`.",
item.ccd_id, item.status
)));
}
if item.is_active() && item.status == "done" {
diagnostics.push(fail(format!(
"cached `ccd#{}` is still open but is marked `done`.",
item.ccd_id
)));
}
validate_module_labels(item, diagnostics);
}
fn validate_hosted_item_shape(
item: &GitHubBacklogItem,
provider: &str,
diagnostics: &mut Vec<BacklogLintDiagnostic>,
) {
if matches!(item.queue_state, GitHubQueueState::QueuePolicyConflict) {
diagnostics.push(fail(format!(
"{} `{}` has conflicting queue labels; multiple `ccd/priority:*` or `ccd/status:*` labels were found (exactly one of each is allowed).",
hosted_issue_label(provider),
backlog::provider_native_issue_ref(provider, item.github_issue_number)
)));
}
if matches!(item.metadata_status, MetadataStatus::Invalid) {
diagnostics.push(warn(invalid_metadata_warning(item, provider)));
}
if !item.has_ccd_id() {
if item.summary.trim().is_empty() {
diagnostics.push(fail(format!(
"{} `{}` must expose a non-empty summary or title.",
hosted_issue_label(provider),
backlog::provider_native_issue_ref(provider, item.github_issue_number)
)));
}
return;
}
if let Some(title_id) = backlog::parse_title_ccd_id(&item.title) {
if title_id != item.ccd_id {
diagnostics.push(fail(format!(
"cached `ccd#{}` title encodes mismatched id `ccd#{}`.",
item.ccd_id, title_id
)));
}
}
validate_allowed_value(item.ccd_id, "kind", &item.kind, ALLOWED_KINDS, diagnostics);
validate_allowed_value(
item.ccd_id,
"status",
&item.status,
ALLOWED_STATUSES,
diagnostics,
);
validate_non_empty_value(item.ccd_id, "effort", &item.effort, diagnostics);
validate_non_empty_value(item.ccd_id, "impact", &item.impact, diagnostics);
if item.section.trim().is_empty() {
diagnostics.push(fail(format!(
"cached `ccd#{}` section must not be empty.",
item.ccd_id
)));
} else if backlog::slugify(&item.section) != item.section {
diagnostics.push(fail(format!(
"cached `ccd#{}` section `{}` must already be a normalized slug.",
item.ccd_id, item.section
)));
}
if item.summary.trim().is_empty() {
diagnostics.push(fail(format!(
"cached `ccd#{}` summary must not be empty.",
item.ccd_id
)));
}
if item.github_state.eq_ignore_ascii_case("closed") && item.status != "done" {
diagnostics.push(fail(format!(
"cached `ccd#{}` is closed but has status `{}` instead of `done`.",
item.ccd_id, item.status
)));
}
if item.is_active() && item.status == "done" {
diagnostics.push(fail(format!(
"cached `ccd#{}` is still open but is marked `done`.",
item.ccd_id
)));
}
validate_module_labels(item, diagnostics);
}
fn validate_allowed_value(
ccd_id: u64,
field: &str,
value: &str,
allowed: &[&str],
diagnostics: &mut Vec<BacklogLintDiagnostic>,
) {
if allowed.contains(&value) {
return;
}
diagnostics.push(fail(format!(
"cached `ccd#{}` has invalid {} `{}`; expected one of: {}.",
ccd_id,
field,
value,
allowed.join(", ")
)));
}
fn validate_non_empty_value(
ccd_id: u64,
field: &str,
value: &str,
diagnostics: &mut Vec<BacklogLintDiagnostic>,
) {
if !value.trim().is_empty() {
return;
}
diagnostics.push(fail(format!(
"cached `ccd#{ccd_id}` must include a non-empty {field} value.",
)));
}
fn validate_module_labels(item: &GitHubBacklogItem, diagnostics: &mut Vec<BacklogLintDiagnostic>) {
let module_labels = item
.labels
.iter()
.filter_map(|label| label.strip_prefix("ccd/module:"))
.collect::<Vec<_>>();
match (&item.module, module_labels.as_slice()) {
(None, []) => {}
(None, labels) => diagnostics.push(fail(format!(
"cached `ccd#{}` has module label(s) `{}` but no canonical `module` field.",
item.ccd_id,
labels.join(", ")
))),
(Some(module), [label]) if *label == module => {}
(Some(module), []) => diagnostics.push(fail(format!(
"cached `ccd#{}` module `{}` must be mirrored by exactly one `ccd/module:*` label.",
item.ccd_id, module
))),
(Some(module), labels) => diagnostics.push(fail(format!(
"cached `ccd#{}` module `{}` does not match `ccd/module:*` label(s) `{}`.",
item.ccd_id,
module,
labels.join(", ")
))),
}
}
fn validate_active_dependencies(
item: &GitHubBacklogItem,
diagnostics: &mut Vec<BacklogLintDiagnostic>,
) {
if !item.has_ccd_id() {
return;
}
let mut seen = BTreeSet::new();
for dependency_id in &item.depends_on {
if !seen.insert(*dependency_id) {
diagnostics.push(fail(format!(
"cached `ccd#{}` lists duplicate dependency `ccd#{}`.",
item.ccd_id, dependency_id
)));
}
}
}
fn validate_dependency_graph_cycles(
cache: &GitHubBacklogCache,
diagnostics: &mut Vec<BacklogLintDiagnostic>,
) {
for message in backlog::dep_graph_errors(cache) {
diagnostics.push(fail(message));
}
}
fn fail(message: String) -> BacklogLintDiagnostic {
BacklogLintDiagnostic {
status: "fail",
severity: "error",
message,
}
}
fn warn(message: String) -> BacklogLintDiagnostic {
BacklogLintDiagnostic {
status: "warn",
severity: "warning",
message,
}
}
fn render_cache_status_summary(cache: &GitHubBacklogCacheView, issue_count: usize) -> String {
let base_status = if cache.status == "empty" && issue_count > 0 {
"loaded"
} else {
cache.status
};
if matches!(base_status, "missing" | "empty") {
return base_status.to_owned();
}
if cache
.provider
.as_deref()
.map(|provider| is_github_provider(provider) || is_gitlab_provider(provider))
.unwrap_or(false)
{
let open_issues = cache.queue_summary.open_issues;
let closed_issues = issue_count.saturating_sub(open_issues);
if closed_issues > 0 {
return format!("{base_status}, {open_issues} open, {closed_issues} closed");
}
return format!("{base_status}, {open_issues} open");
}
format!("{base_status}, {issue_count} items")
}
fn render_zero_queue_scope_hint(cache: &GitHubBacklogCacheView) {
let Some(provider) = cache.provider.as_deref() else {
return;
};
if !(is_github_provider(provider) || is_gitlab_provider(provider))
|| cache.queue_summary.open_issues == 0
|| cache.queue_summary.queue_scoped != 0
{
return;
}
println!(
"Hint: no open {} are queue-scoped because none have exactly one `ccd/priority:*` label.",
if is_gitlab_provider(provider) {
"GitLab work items"
} else {
"GitHub issues"
}
);
println!(
"Add one of: `ccd/priority:active-now`, `ccd/priority:next`, `ccd/priority:later`, `ccd/priority:parked`."
);
if is_gitlab_provider(provider) {
println!(
"Example: `glab issue update <iid> -R <group/project> --label \"ccd/priority:active-now\"`."
);
} else {
println!("Example: `gh issue edit <number> --add-label \"ccd/priority:active-now\"`.");
}
}
fn render_issue_number_list(issue_numbers: &[u64]) -> String {
issue_numbers
.iter()
.map(|issue_number| format!("#{issue_number}"))
.collect::<Vec<_>>()
.join(", ")
}
fn hosted_issue_label(provider: &str) -> &'static str {
if is_gitlab_provider(provider) {
"GitLab work item"
} else {
"GitHub issue"
}
}
fn invalid_metadata_warning(item: &GitHubBacklogItem, provider: &str) -> String {
let native_ref = backlog::provider_native_issue_ref(provider, item.github_issue_number);
let Some(reason) = item.metadata_error.as_deref() else {
return format!(
"{} `{}` has invalid optional `ccd-backlog` metadata; queue visibility is preserved but enrichment is unavailable.",
hosted_issue_label(provider),
native_ref
);
};
let reason = reason.split_whitespace().collect::<Vec<_>>().join(" ");
format!(
"{} `{}` has invalid optional `ccd-backlog` metadata: {}; queue visibility is preserved but enrichment is unavailable.",
hosted_issue_label(provider),
native_ref,
reason
)
}
fn parse_issue_selector(value: &str) -> Result<Vec<u64>> {
let mut parsed = Vec::new();
let mut seen = BTreeSet::new();
for token in value.split(',') {
let token = token.trim();
if token.is_empty() {
continue;
}
if let Some((start, end)) = token.split_once('-') {
let start = parse_issue_number(start)?;
let end = parse_issue_number(end)?;
if start > end {
bail!("invalid issue range `{token}`: start must be <= end");
}
for issue_number in start..=end {
if seen.insert(issue_number) {
parsed.push(issue_number);
}
}
continue;
}
let issue_number = parse_issue_number(token)?;
if seen.insert(issue_number) {
parsed.push(issue_number);
}
}
if parsed.is_empty() {
bail!("--issues must include at least one issue number or range");
}
Ok(parsed)
}
fn parse_issue_number(value: &str) -> Result<u64> {
value
.trim()
.parse::<u64>()
.with_context(|| format!("invalid issue number `{}`", value.trim()))
}
fn parse_priority_bucket(value: &str) -> Result<backlog::GitHubPriorityBucket> {
match value {
"active-now" => Ok(backlog::GitHubPriorityBucket::ActiveNow),
"next" => Ok(backlog::GitHubPriorityBucket::Next),
"later" => Ok(backlog::GitHubPriorityBucket::Later),
"parked" => Ok(backlog::GitHubPriorityBucket::Parked),
_ => bail!(
"unsupported priority `{value}`; expected one of: active-now, next, later, parked"
),
}
}
fn is_priority_label(label: &str) -> bool {
matches!(
label,
"ccd/priority:active-now"
| "ccd/priority:next"
| "ccd/priority:later"
| "ccd/priority:parked"
)
}
fn analyze_grooming_candidates(
cache: &GitHubBacklogCache,
) -> (
Vec<BacklogMergeCandidate>,
Vec<BacklogObsoleteCandidate>,
Vec<BacklogStaleDependencyCandidate>,
Vec<BacklogReorderCandidate>,
) {
let require_ccd_id =
is_github_provider(cache.provider.as_str()) || is_gitlab_provider(cache.provider.as_str());
let mut active = cache
.items
.iter()
.filter(|item| item.is_active())
.filter(|item| !require_ccd_id || item.has_ccd_id())
.collect::<Vec<_>>();
active.sort_by_key(|item| item.ccd_id);
let mut completed = cache
.items
.iter()
.filter(|item| !item.is_active() || item.status == "done")
.filter(|item| !require_ccd_id || item.has_ccd_id())
.collect::<Vec<_>>();
completed.sort_by_key(|item| item.ccd_id);
let indexed = cache
.items
.iter()
.filter(|item| !require_ccd_id || item.has_ccd_id())
.map(|item| (item.ccd_id, item))
.collect::<BTreeMap<_, _>>();
(
collect_merge_candidates(&active),
collect_obsolete_candidates(&active, &completed),
collect_stale_dependency_candidates(&active, &indexed),
collect_reorder_candidates(&active, &indexed),
)
}
fn collect_merge_candidates(active: &[&GitHubBacklogItem]) -> Vec<BacklogMergeCandidate> {
let mut candidates = Vec::new();
for (index, left) in active.iter().enumerate() {
for right in active.iter().skip(index + 1) {
if !share_grooming_context(left, right) {
continue;
}
let Some(shared_terms) = matching_significant_terms(left, right) else {
continue;
};
candidates.push(BacklogMergeCandidate {
items: vec![issue_summary(left), issue_summary(right)],
reason: format!(
"`ccd#{}` and `ccd#{}` share significant terms ({}) in `{}`; review whether they should merge or be differentiated more clearly.",
left.ccd_id,
right.ccd_id,
render_terms(&shared_terms),
left.section
),
shared_terms,
});
}
}
candidates
}
fn collect_obsolete_candidates(
active: &[&GitHubBacklogItem],
completed: &[&GitHubBacklogItem],
) -> Vec<BacklogObsoleteCandidate> {
let mut candidates = Vec::new();
for active_item in active {
for completed_item in completed {
if active_item.ccd_id == completed_item.ccd_id {
continue;
}
if !share_grooming_context(active_item, completed_item) {
continue;
}
let Some(shared_terms) = matching_significant_terms(active_item, completed_item) else {
continue;
};
candidates.push(BacklogObsoleteCandidate {
active: issue_summary(active_item),
completed: issue_summary(completed_item),
reason: format!(
"`ccd#{}` overlaps with completed `ccd#{}` on ({}) in `{}`; review whether the active item is obsolete, superseded, or needs a sharper scope boundary.",
active_item.ccd_id,
completed_item.ccd_id,
render_terms(&shared_terms),
active_item.section
),
shared_terms,
});
}
}
candidates
}
fn collect_stale_dependency_candidates(
active: &[&GitHubBacklogItem],
indexed: &BTreeMap<u64, &GitHubBacklogItem>,
) -> Vec<BacklogStaleDependencyCandidate> {
let mut candidates = Vec::new();
for item in active {
let mut seen = BTreeSet::new();
for dependency_id in &item.depends_on {
if !seen.insert(*dependency_id) {
candidates.push(BacklogStaleDependencyCandidate {
item: issue_summary(item),
dependency_ccd_id: *dependency_id,
dependency: None,
reason: format!(
"`ccd#{}` lists duplicate dependency `ccd#{}`; collapse repeated blockers before reordering the queue.",
item.ccd_id, dependency_id
),
});
continue;
}
if *dependency_id == item.ccd_id {
candidates.push(BacklogStaleDependencyCandidate {
item: issue_summary(item),
dependency_ccd_id: *dependency_id,
dependency: None,
reason: format!(
"`ccd#{}` depends on itself; remove the self-reference from canonical backlog metadata.",
item.ccd_id
),
});
continue;
}
let Some(dependency) = indexed.get(dependency_id) else {
candidates.push(BacklogStaleDependencyCandidate {
item: issue_summary(item),
dependency_ccd_id: *dependency_id,
dependency: None,
reason: format!(
"`ccd#{}` still references missing `ccd#{}`; clean the dependency list before reprioritizing.",
item.ccd_id, dependency_id
),
});
continue;
};
if !dependency.is_active() || dependency.status == "done" {
candidates.push(BacklogStaleDependencyCandidate {
item: issue_summary(item),
dependency_ccd_id: *dependency_id,
dependency: Some(issue_summary(dependency)),
reason: format!(
"`ccd#{}` still depends on completed `ccd#{}`; remove closed blockers from the active dependency graph.",
item.ccd_id, dependency_id
),
});
}
}
}
candidates
}
fn collect_reorder_candidates(
active: &[&GitHubBacklogItem],
indexed: &BTreeMap<u64, &GitHubBacklogItem>,
) -> Vec<BacklogReorderCandidate> {
let mut candidates = Vec::new();
let mut by_rank = BTreeMap::<u64, Vec<&GitHubBacklogItem>>::new();
for item in active {
if let Some(rank) = item.priority_rank {
by_rank.entry(rank).or_default().push(*item);
}
}
for (rank, items) in by_rank {
if items.len() < 2 {
continue;
}
let mut items = items.into_iter().map(issue_summary).collect::<Vec<_>>();
items.sort_by_key(|item| item.ccd_id);
let rendered_ids = items
.iter()
.map(|item| format!("ccd#{}", item.ccd_id))
.collect::<Vec<_>>()
.join(", ");
candidates.push(BacklogReorderCandidate {
kind: "duplicate_priority_rank",
reason: format!(
"{rendered_ids} share priority rank `{rank}`; review the queue so the active order stays total and deterministic."
),
items,
});
}
for item in active {
let Some(item_rank) = item.priority_rank else {
continue;
};
for dependency_id in &item.depends_on {
let Some(dependency) = indexed.get(dependency_id) else {
continue;
};
if !dependency.is_active() {
continue;
}
let Some(dependency_rank) = dependency.priority_rank else {
continue;
};
if item_rank >= dependency_rank {
continue;
}
candidates.push(BacklogReorderCandidate {
kind: "dependency_priority_conflict",
reason: format!(
"`ccd#{}` outranks its active blocker `ccd#{}` (priority `{}` vs `{}`); move the blocker earlier or lower the blocked item.",
item.ccd_id, dependency.ccd_id, item_rank, dependency_rank
),
items: vec![issue_summary(dependency), issue_summary(item)],
});
}
}
candidates.sort_by(|left, right| {
let left_ids = left
.items
.iter()
.map(|item| item.ccd_id)
.collect::<Vec<_>>();
let right_ids = right
.items
.iter()
.map(|item| item.ccd_id)
.collect::<Vec<_>>();
left.kind.cmp(right.kind).then(left_ids.cmp(&right_ids))
});
candidates
}
fn issue_summary(item: &GitHubBacklogItem) -> BacklogIssueSummary {
BacklogIssueSummary {
ccd_id: item.ccd_id,
github_issue_number: item.github_issue_number,
content_trust: ContentTrust::ExternalAdapterOutput,
title: backlog::display_title(&item.title),
url: item.url.clone(),
status: item.status.clone(),
section: item.section.clone(),
priority_rank: item.priority_rank,
module: item.module.clone(),
}
}
fn share_grooming_context(left: &GitHubBacklogItem, right: &GitHubBacklogItem) -> bool {
left.section == right.section
|| matches!(
(&left.module, &right.module),
(Some(left_module), Some(right_module)) if left_module == right_module
)
}
fn matching_significant_terms(
left: &GitHubBacklogItem,
right: &GitHubBacklogItem,
) -> Option<Vec<String>> {
let left_terms = significant_terms(left);
let right_terms = significant_terms(right);
let shared_terms = left_terms
.intersection(&right_terms)
.cloned()
.collect::<Vec<_>>();
if shared_terms.len() < 2 {
return None;
}
let min_terms = left_terms.len().min(right_terms.len());
if min_terms >= 2 && shared_terms.len() * 2 >= min_terms {
Some(shared_terms)
} else {
None
}
}
fn significant_terms(item: &GitHubBacklogItem) -> BTreeSet<String> {
let title = backlog::display_title(&item.title);
let combined = format!("{title} {}", item.summary);
combined
.split(|ch: char| !ch.is_ascii_alphanumeric())
.filter_map(|term| {
let normalized = term.trim().to_ascii_lowercase();
if normalized.len() < 4 || normalized == "ccd" {
None
} else {
Some(normalized)
}
})
.collect()
}
fn render_terms(terms: &[String]) -> String {
terms
.iter()
.map(|term| format!("`{term}`"))
.collect::<Vec<_>>()
.join(", ")
}
fn fetch_cache(repo_root: &Path, github_repo: &str) -> Result<GitHubBacklogCache> {
let items = fetch_github_issues(repo_root, github_repo)?;
let fetched_at_epoch_s = backlog::now_epoch_s()?;
Ok(GitHubBacklogCache::new(
github_repo,
fetched_at_epoch_s,
items,
))
}
fn fetch_gitlab_cache(repo_root: &Path, gitlab_project: &str) -> Result<GitHubBacklogCache> {
let items = fetch_gitlab_issues(repo_root, gitlab_project)?;
let fetched_at_epoch_s = backlog::now_epoch_s()?;
let mut cache = GitHubBacklogCache::new(gitlab_project, fetched_at_epoch_s, items);
cache.provider = "gitlab-issues".to_owned();
cache.refresh_dispatch_states();
Ok(cache)
}
fn fetch_local_markdown_cache(repo_root: &Path) -> Result<GitHubBacklogCache> {
let backlog_path = repo_root.join("backlog.md");
let source_items = parse_source_backlog(&backlog_path).with_context(|| {
format!(
"local-markdown backlog adapter expects {} to exist and contain markdown backlog tables",
backlog_path.display()
)
})?;
let fetched_at_epoch_s = backlog::now_epoch_s()?;
let source_path = backlog_path.display().to_string();
let items = source_items
.into_iter()
.enumerate()
.map(|(index, item)| {
let claimed_by = item.claimed_by.clone();
let mut cached_item = GitHubBacklogItem {
backlog_ref: BacklogRef {
provider: "local-markdown".to_owned(),
kind: "item".to_owned(),
id: item.ccd_id.to_string(),
url: source_path.clone(),
},
ccd_id: item.ccd_id,
github_issue_number: item.ccd_id,
github_state: if item.status == "done" {
"closed".to_owned()
} else {
"open".to_owned()
},
title: item.title,
url: source_path.clone(),
kind: item.kind,
section: item.section,
status: item.status,
effort: item.effort,
impact: item.impact,
module: None,
priority_rank: Some(((index + 1) as u64) * 10),
claimed_by: claimed_by.clone(),
depends_on: item.depends_on,
roadmap_epic: None,
spec_refs: Vec::new(),
summary: item.summary,
acceptance_criteria: Vec::new(),
related_specs: Vec::new(),
operator_notes: Vec::new(),
labels: Vec::new(),
assignees: item.claimed_by.into_iter().collect(),
queue_state: GitHubQueueState::QueueCandidate,
priority_label: None,
metadata_status: MetadataStatus::Enriched,
metadata_error: None,
upstream_claim: if claimed_by.is_some() {
UpstreamClaimState::Claimed
} else {
UpstreamClaimState::Unclaimed
},
dispatch_state: GitHubDispatchState::NotReady,
};
cached_item.dispatch_state =
backlog::persisted_dispatch_state_for_provider("local-markdown", &cached_item);
cached_item
})
.collect();
let mut cache = GitHubBacklogCache::new(source_path, fetched_at_epoch_s, items);
cache.provider = "local-markdown".to_owned();
cache.refresh_dispatch_states();
Ok(cache)
}
fn fetch_github_issues(repo_root: &Path, github_repo: &str) -> Result<Vec<GitHubBacklogItem>> {
let issues = list_github_issues(repo_root, github_repo)?;
let mut items = Vec::new();
for issue in issues {
let labels = issue.labels.into_iter().map(|label| label.name).collect();
let assignees = issue
.assignees
.into_iter()
.map(|assignee| assignee.login)
.collect();
items.push(backlog::classify_github_issue(
issue.number,
&issue.title,
&issue.body,
&issue.url,
&issue.state,
labels,
assignees,
)?);
}
Ok(items)
}
fn fetch_gitlab_issues(repo_root: &Path, gitlab_project: &str) -> Result<Vec<GitHubBacklogItem>> {
let issues = list_gitlab_issues(repo_root, gitlab_project)?;
let mut items = Vec::new();
for issue in issues {
let assignees = issue
.assignees
.into_iter()
.map(|assignee| assignee.username)
.collect();
items.push(backlog::classify_gitlab_issue(backlog::GitlabIssueInput {
number: issue.iid,
issue_type: issue.issue_type.as_deref(),
title: &issue.title,
body: issue.description.as_deref().unwrap_or(""),
url: &issue.web_url,
state: &issue.state,
labels: issue.labels,
assignees,
})?);
}
Ok(items)
}
fn fetch_canonical_issues(repo_root: &Path, github_repo: &str) -> Result<Vec<GitHubBacklogItem>> {
let items = fetch_github_issues(repo_root, github_repo)?
.into_iter()
.filter(|item| item.has_ccd_id())
.collect::<Vec<_>>();
let _ = index_items_by_ccd_id(&items, github_repo)?;
Ok(items)
}
fn list_github_issues(repo_root: &Path, github_repo: &str) -> Result<Vec<GhIssueRecord>> {
let output = run_gh(
repo_root,
&[
"issue".to_owned(),
"list".to_owned(),
"--repo".to_owned(),
github_repo.to_owned(),
"--state".to_owned(),
"all".to_owned(),
"--limit".to_owned(),
GITHUB_ISSUE_LIST_LIMIT.to_owned(),
"--json".to_owned(),
"number,title,body,url,state,labels,assignees".to_owned(),
],
)?;
serde_json::from_str(&output).context("failed to parse `gh issue list` JSON output")
}
fn list_gitlab_issues(repo_root: &Path, gitlab_project: &str) -> Result<Vec<GlabIssueRecord>> {
let output = run_glab(
repo_root,
&[
"issue".to_owned(),
"list".to_owned(),
"--all".to_owned(),
"-R".to_owned(),
gitlab_project.to_owned(),
"-O".to_owned(),
"json".to_owned(),
],
)?;
serde_json::from_str(&output).context("failed to parse `glab issue list` JSON output")
}
fn ensure_github_label_exists(
repo_root: &Path,
github_repo: &str,
priority: backlog::GitHubPriorityBucket,
) -> Result<bool> {
let target = priority.as_label();
let existing = list_github_labels(repo_root, github_repo)?;
if existing.iter().any(|label| label.name == target) {
return Ok(false);
}
let (color, description) = priority_label_style(priority);
run_gh(
repo_root,
&[
"label".to_owned(),
"create".to_owned(),
target.to_owned(),
"--repo".to_owned(),
github_repo.to_owned(),
"--color".to_owned(),
color.to_owned(),
"--description".to_owned(),
description.to_owned(),
],
)?;
Ok(true)
}
fn list_github_labels(repo_root: &Path, github_repo: &str) -> Result<Vec<GhLabelRecord>> {
let output = run_gh(
repo_root,
&[
"label".to_owned(),
"list".to_owned(),
"--repo".to_owned(),
github_repo.to_owned(),
"--limit".to_owned(),
"200".to_owned(),
"--json".to_owned(),
"name".to_owned(),
],
)?;
serde_json::from_str(&output).context("failed to parse `gh label list` JSON output")
}
fn priority_label_style(priority: backlog::GitHubPriorityBucket) -> (&'static str, &'static str) {
match priority {
backlog::GitHubPriorityBucket::ActiveNow => ("B60205", "CCD queue: active now"),
backlog::GitHubPriorityBucket::Next => ("FBCA04", "CCD queue: next"),
backlog::GitHubPriorityBucket::Later => ("0E8A16", "CCD queue: later"),
backlog::GitHubPriorityBucket::Parked => ("6E7781", "CCD queue: parked"),
}
}
fn create_github_issue(
repo_root: &Path,
github_repo: &str,
item: &SourceBacklogItem,
) -> Result<()> {
let title = item.title.clone();
let metadata = CanonicalIssueMetadata {
id: item.ccd_id,
kind: item.kind.clone(),
section: item.section.clone(),
status: item.status.clone(),
effort: item.effort.clone(),
impact: item.impact.clone(),
module: None,
priority_rank: None,
claimed_by: item.claimed_by.clone(),
depends_on: item.depends_on.clone(),
roadmap_epic: None,
spec_refs: Vec::new(),
};
let metadata_block =
toml::to_string(&metadata).context("failed to serialize backlog metadata")?;
let dependencies = if item.depends_on.is_empty() {
"- None.\n".to_owned()
} else {
item.depends_on
.iter()
.map(|id| format!("- `ccd#{id}`"))
.collect::<Vec<_>>()
.join("\n")
+ "\n"
};
let body = format!(
"```ccd-backlog\n{metadata_block}```\n\n## Summary\n\n{}\n\n## Acceptance Criteria\n\n- Complete the work described in the summary.\n- Keep the canonical `ccd-backlog` metadata block aligned with the issue state.\n\n## Dependencies\n\n{}## Related Specs\n\n- None recorded in the source backlog item.\n\n## Operator Notes\n\n- Migrated from `backlog.md` section `{}`.\n- Source row metadata: effort=`{}`, impact=`{}`.\n",
item.summary,
dependencies,
item.section,
item.effort,
item.impact
);
run_gh(
repo_root,
&[
"issue".to_owned(),
"create".to_owned(),
"--repo".to_owned(),
github_repo.to_owned(),
"--title".to_owned(),
title,
"--body".to_owned(),
body,
],
)?;
Ok(())
}
fn run_gh(repo_root: &Path, args: &[String]) -> Result<String> {
let output = Command::new("gh")
.args(args)
.current_dir(repo_root)
.env("GH_PAGER", "cat")
.env("NO_COLOR", "1")
.output()
.with_context(|| {
format!(
"failed to run `gh {}` in {}",
args.join(" "),
repo_root.display()
)
})?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
let detail = if !stderr.trim().is_empty() {
stderr.trim().to_owned()
} else if !stdout.trim().is_empty() {
stdout.trim().to_owned()
} else {
"GitHub CLI returned a non-zero exit status".to_owned()
};
bail!("`gh {}` failed: {detail}", args.join(" "));
}
String::from_utf8(output.stdout).context("GitHub CLI returned non-UTF-8 output")
}
fn run_glab(repo_root: &Path, args: &[String]) -> Result<String> {
let output = Command::new("glab")
.args(args)
.current_dir(repo_root)
.env("NO_COLOR", "1")
.output()
.with_context(|| {
format!(
"failed to run `glab {}` in {}",
args.join(" "),
repo_root.display()
)
})?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
let detail = if !stderr.trim().is_empty() {
stderr.trim().to_owned()
} else if !stdout.trim().is_empty() {
stdout.trim().to_owned()
} else {
"GitLab CLI returned a non-zero exit status".to_owned()
};
bail!("`glab {}` failed: {detail}", args.join(" "));
}
String::from_utf8(output.stdout).context("GitLab CLI returned non-UTF-8 output")
}
fn parse_source_backlog(path: &Path) -> Result<Vec<SourceBacklogItem>> {
let contents =
fs::read_to_string(path).with_context(|| format!("failed to read {}", path.display()))?;
let lines = contents.lines().collect::<Vec<_>>();
let mut items = Vec::new();
let mut section = String::new();
let mut index = 0usize;
while index < lines.len() {
let line = lines[index].trim();
if let Some(heading) = line.strip_prefix("## ") {
section = heading.trim().to_owned();
index += 1;
continue;
}
if is_table_header(lines.get(index), lines.get(index + 1)) {
let headers = parse_markdown_row(lines[index]);
index += 2;
while index < lines.len() {
let row_line = lines[index].trim();
if !row_line.starts_with('|') {
break;
}
let row = parse_markdown_row(row_line);
if let Some(item) = parse_source_backlog_row(path, §ion, &headers, &row)? {
items.push(item);
}
index += 1;
}
continue;
}
index += 1;
}
let mut seen = BTreeSet::new();
for item in &items {
if !seen.insert(item.ccd_id) {
bail!(
"backlog contains a duplicate active item id `#{}`",
item.ccd_id
);
}
}
Ok(items)
}
fn is_table_header(current: Option<&&str>, next: Option<&&str>) -> bool {
let Some(current) = current else {
return false;
};
let Some(next) = next else {
return false;
};
current.trim().starts_with('|') && is_table_separator(next)
}
fn is_table_separator(line: &str) -> bool {
let trimmed = line.trim();
trimmed.starts_with('|')
&& trimmed
.chars()
.all(|ch| matches!(ch, '|' | '-' | ':' | ' '))
&& trimmed.contains("---")
}
fn parse_source_backlog_row(
path: &Path,
section: &str,
headers: &[String],
row: &[String],
) -> Result<Option<SourceBacklogItem>> {
if row.is_empty() {
return Ok(None);
}
let header_map = headers
.iter()
.enumerate()
.map(|(index, header)| (normalize_header(header), index))
.collect::<BTreeMap<_, _>>();
let id_index = header_map
.get("#")
.copied()
.ok_or_else(|| anyhow::anyhow!("table in {} is missing a `#` column", path.display()))?;
let item_index = header_map
.get("item")
.copied()
.or_else(|| header_map.get("ticket").copied())
.ok_or_else(|| {
anyhow::anyhow!(
"table in {} is missing an `Item` or `Ticket` column",
path.display()
)
})?;
let Some(id_cell) = row.get(id_index) else {
return Ok(None);
};
let Some(ccd_id) = parse_active_ccd_id(id_cell)? else {
return Ok(None);
};
let item_text = row
.get(item_index)
.map(|value| clean_markdown_text(value))
.unwrap_or_default();
if item_text.is_empty() {
bail!(
"backlog item `#{ccd_id}` in {} is missing summary text",
path.display()
);
}
let section_slug = backlog::slugify(section);
let effort = header_map
.get("effort")
.and_then(|index| row.get(*index))
.map(|value| backlog::slugify(value))
.filter(|value| !value.is_empty())
.unwrap_or_else(|| "unknown".to_owned());
let impact = header_map
.get("impact")
.and_then(|index| row.get(*index))
.map(|value| backlog::slugify(value))
.filter(|value| !value.is_empty())
.unwrap_or_else(|| "unknown".to_owned());
let claimed_by = header_map
.get("claimed by")
.or_else(|| header_map.get("claimed_by"))
.or_else(|| header_map.get("agent"))
.or_else(|| header_map.get("owner"))
.and_then(|index| row.get(*index))
.map(|value| clean_markdown_text(value))
.filter(|value| !value.is_empty() && value != "-");
let depends_on = header_map
.get("dependencies")
.or_else(|| header_map.get("deps"))
.and_then(|index| row.get(*index))
.map(|value| parse_dependency_ids(value))
.unwrap_or_default();
let kind = if section.eq_ignore_ascii_case("Radar")
|| item_text.to_ascii_lowercase().starts_with("discovery:")
{
"discovery"
} else {
"item"
};
let status = header_map
.get("status")
.and_then(|index| row.get(*index))
.map(|value| parse_markdown_status(value))
.transpose()?
.unwrap_or_else(|| {
if section.eq_ignore_ascii_case("Radar") {
"parked".to_owned()
} else {
"ready".to_owned()
}
});
Ok(Some(SourceBacklogItem {
ccd_id,
title: item_text.clone(),
summary: item_text,
kind: kind.to_owned(),
section: section_slug,
status: status.to_owned(),
effort,
impact,
claimed_by,
depends_on,
}))
}
fn parse_markdown_row(line: &str) -> Vec<String> {
line.trim()
.trim_matches('|')
.split('|')
.map(|cell| cell.trim().to_owned())
.collect()
}
fn normalize_header(value: &str) -> String {
let cleaned = clean_markdown_text(value);
if cleaned == "#" {
return cleaned;
}
cleaned.to_ascii_lowercase()
}
fn parse_active_ccd_id(cell: &str) -> Result<Option<u64>> {
let trimmed = cell.trim();
if trimmed.contains("~~") {
return Ok(None);
}
let digits = trimmed
.chars()
.filter(|ch| ch.is_ascii_digit())
.collect::<String>();
if digits.is_empty() {
bail!("backlog row has an invalid item id cell `{trimmed}`");
}
Ok(Some(digits.parse().with_context(|| {
format!("failed to parse backlog item id from `{trimmed}`")
})?))
}
fn parse_dependency_ids(cell: &str) -> Vec<u64> {
let mut ids = Vec::new();
let mut current = String::new();
for ch in cell.chars() {
if ch.is_ascii_digit() {
current.push(ch);
continue;
}
if !current.is_empty() {
if let Ok(id) = current.parse() {
ids.push(id);
}
current.clear();
}
}
if !current.is_empty() {
if let Ok(id) = current.parse() {
ids.push(id);
}
}
ids
}
fn clean_markdown_text(value: &str) -> String {
let without_markup = value.replace("**", "").replace("~~", "").replace('`', "");
without_markup
.split_whitespace()
.collect::<Vec<_>>()
.join(" ")
.trim()
.to_owned()
}
fn parse_markdown_status(value: &str) -> Result<String> {
let cleaned = clean_markdown_text(value);
let normalized = cleaned.trim().to_ascii_lowercase();
if normalized.is_empty() || normalized == "-" {
return Ok("ready".to_owned());
}
match normalized.as_str() {
"ready" | "queued" | "queue" | "todo" | "open" => Ok("ready".to_owned()),
"active" | "in-progress" | "in progress" | "claimed" => Ok("in-progress".to_owned()),
"blocked" => Ok("blocked".to_owned()),
"parked" | "later" => Ok("parked".to_owned()),
"done" | "closed" | "complete" | "completed" => Ok("done".to_owned()),
_ => bail!(
"backlog status cell `{cleaned}` is not recognized; use one of ready, active, blocked, parked, or done"
),
}
}
fn index_items_by_ccd_id<'a>(
items: &'a [GitHubBacklogItem],
github_repo: &str,
) -> Result<BTreeMap<u64, &'a GitHubBacklogItem>> {
let mut indexed = BTreeMap::new();
for item in items {
if !item.has_ccd_id() {
continue;
}
if indexed.insert(item.ccd_id, item).is_some() {
bail!(
"GitHub repo `{github_repo}` contains duplicate canonical issue ids for `ccd#{}`",
item.ccd_id
);
}
}
Ok(indexed)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parses_active_backlog_rows_from_markdown_tables() {
let temp = tempfile::tempdir().expect("tempdir");
let backlog_path = temp.path().join("backlog.md");
fs::write(
&backlog_path,
r#"# Backlog
## GitHub Integration
| # | Item | Claimed By | Effort | Impact | Dependencies |
|---|------|------------|--------|--------|-------------|
| 13 | **Bootstrap GitHub backlog** - export items. | `codex` | Medium | High | `31` |
| ~~14~~ | ~~Done item.~~ | ~~`codex`~~ | ~~Medium~~ | ~~High~~ | ~~`13`~~ |
## Radar
| # | Ticket | Trigger | Next review |
|---|--------|---------|-------------|
| 25 | Discovery: host adapters | Later | Later |
"#,
)
.expect("backlog");
let items = parse_source_backlog(&backlog_path).expect("items parse");
assert_eq!(items.len(), 2);
assert_eq!(items[0].ccd_id, 13);
assert_eq!(items[0].section, "github-integration");
assert_eq!(items[0].claimed_by.as_deref(), Some("codex"));
assert_eq!(items[0].depends_on, vec![31]);
assert_eq!(items[1].kind, "discovery");
assert_eq!(items[1].status, "parked");
}
#[test]
fn parses_extended_markdown_backlog_columns() {
let temp = tempfile::tempdir().expect("tempdir");
let backlog_path = temp.path().join("backlog.md");
fs::write(
&backlog_path,
r#"# Backlog
## Shared Queue
| # | Item | Owner | Status | Effort | Impact | Deps |
|---|------|-------|--------|--------|--------|------|
| 17 | Agent input hardening | @alice | active | Low | Med-High | `9` |
| 18 | Queue follow-up | - | queued | Low | Medium | - |
"#,
)
.expect("backlog");
let items = parse_source_backlog(&backlog_path).expect("items parse");
assert_eq!(items.len(), 2);
assert_eq!(items[0].claimed_by.as_deref(), Some("@alice"));
assert_eq!(items[0].status, "in-progress");
assert_eq!(items[0].depends_on, vec![9]);
assert_eq!(items[1].status, "ready");
}
}