use std::collections::{BTreeMap, BTreeSet};
use std::fs;
use std::path::Path;
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(any(feature = "extension-backlog", test))]
use anyhow::bail;
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use crate::content_trust::ContentTrust;
use crate::paths::state::StateLayout;
#[cfg(feature = "extension-backlog")]
use crate::paths::write;
#[cfg(any(feature = "extension-backlog", test))]
const CACHE_SCHEMA_VERSION: u32 = 2;
const STALE_AFTER_SECS: u64 = 24 * 60 * 60;
#[cfg(any(feature = "extension-backlog", test))]
const CANONICAL_METADATA_FENCE: &str = "```ccd-backlog";
const DEFAULT_WORK_QUEUE_PROVIDER: &str = "github-issues";
#[cfg(any(feature = "extension-backlog", test))]
const GITHUB_PRIORITY_ACTIVE_NOW: &str = "ccd/priority:active-now";
#[cfg(any(feature = "extension-backlog", test))]
const GITHUB_PRIORITY_NEXT: &str = "ccd/priority:next";
#[cfg(any(feature = "extension-backlog", test))]
const GITHUB_PRIORITY_LATER: &str = "ccd/priority:later";
#[cfg(any(feature = "extension-backlog", test))]
const GITHUB_PRIORITY_PARKED: &str = "ccd/priority:parked";
#[cfg(any(feature = "extension-backlog", test))]
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct CanonicalIssueMetadata {
pub id: u64,
pub kind: String,
pub section: String,
pub status: String,
pub effort: String,
pub impact: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub module: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub priority_rank: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[serde(alias = "agent_owner")]
pub claimed_by: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub depends_on: Vec<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub roadmap_epic: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub spec_refs: Vec<String>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct BacklogRef {
pub provider: String,
pub kind: String,
pub id: String,
pub url: String,
}
impl BacklogRef {
pub fn key(&self) -> String {
format!("{}:{}:{}", self.provider, self.kind, self.id)
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn is_empty(&self) -> bool {
self.provider.is_empty() || self.kind.is_empty() || self.id.is_empty()
}
}
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum GitHubQueueState {
OutOfQueue,
#[default]
QueueCandidate,
QueuePolicyConflict,
}
impl GitHubQueueState {
pub fn as_str(self) -> &'static str {
match self {
Self::OutOfQueue => "out_of_queue",
Self::QueueCandidate => "queue_candidate",
Self::QueuePolicyConflict => "queue_policy_conflict",
}
}
}
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum MetadataStatus {
#[default]
Enriched,
Absent,
Partial,
Invalid,
}
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum UpstreamClaimState {
#[default]
Unclaimed,
Claimed,
}
#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum GitHubDispatchState {
#[default]
Ready,
UpstreamClaimed,
LocallyClaimed,
DependencyBlocked,
NotReady,
Parked,
QueuePolicyConflict,
OutOfQueue,
}
impl GitHubDispatchState {
pub fn as_str(self) -> &'static str {
match self {
Self::Ready => "ready",
Self::UpstreamClaimed => "upstream_claimed",
Self::LocallyClaimed => "locally_claimed",
Self::DependencyBlocked => "dependency_blocked",
Self::NotReady => "not_ready",
Self::Parked => "parked",
Self::QueuePolicyConflict => "queue_policy_conflict",
Self::OutOfQueue => "out_of_queue",
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
pub enum GitHubPriorityBucket {
ActiveNow,
Next,
Later,
Parked,
}
impl GitHubPriorityBucket {
#[cfg(any(feature = "extension-backlog", test))]
pub fn as_label(self) -> &'static str {
match self {
Self::ActiveNow => GITHUB_PRIORITY_ACTIVE_NOW,
Self::Next => GITHUB_PRIORITY_NEXT,
Self::Later => GITHUB_PRIORITY_LATER,
Self::Parked => GITHUB_PRIORITY_PARKED,
}
}
pub fn short_name(self) -> &'static str {
match self {
Self::ActiveNow => "active-now",
Self::Next => "next",
Self::Later => "later",
Self::Parked => "parked",
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct GitHubQueueSummary {
pub open_issues: usize,
pub queue_scoped: usize,
pub queue_candidates: usize,
pub policy_conflicts: usize,
pub metadata_invalid: usize,
pub upstream_claimed: usize,
pub auto_selectable: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct GitHubBacklogItem {
#[serde(default)]
pub backlog_ref: BacklogRef,
pub ccd_id: u64,
pub github_issue_number: u64,
pub github_state: String,
pub title: String,
pub url: String,
pub kind: String,
pub section: String,
pub status: String,
pub effort: String,
pub impact: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub module: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub priority_rank: Option<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
#[serde(alias = "agent_owner")]
pub claimed_by: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub depends_on: Vec<u64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub roadmap_epic: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub spec_refs: Vec<String>,
pub summary: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub acceptance_criteria: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub related_specs: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub operator_notes: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub labels: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub assignees: Vec<String>,
#[serde(default)]
pub queue_state: GitHubQueueState,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub priority_label: Option<GitHubPriorityBucket>,
#[serde(default)]
pub metadata_status: MetadataStatus,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata_error: Option<String>,
#[serde(default)]
pub upstream_claim: UpstreamClaimState,
#[serde(default)]
pub dispatch_state: GitHubDispatchState,
}
impl GitHubBacklogItem {
pub fn is_active(&self) -> bool {
!self.github_state.eq_ignore_ascii_case("closed")
}
pub fn is_ready(&self) -> bool {
self.is_active() && self.status.eq_ignore_ascii_case("ready")
}
pub fn has_ccd_id(&self) -> bool {
self.ccd_id != 0
}
pub fn is_queue_scoped(&self) -> bool {
!matches!(self.queue_state, GitHubQueueState::OutOfQueue)
}
pub fn is_queue_candidate(&self) -> bool {
matches!(self.queue_state, GitHubQueueState::QueueCandidate)
}
pub fn is_upstream_claimed(&self) -> bool {
matches!(self.upstream_claim, UpstreamClaimState::Claimed)
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn display_ref(&self) -> String {
if self.has_ccd_id() {
format!("ccd#{}", self.ccd_id)
} else {
format!("GH#{}", self.github_issue_number)
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct GitHubBacklogCache {
pub schema_version: u32,
#[serde(default = "default_work_queue_provider")]
pub provider: String,
pub repo: String,
pub fetched_at_epoch_s: u64,
pub items: Vec<GitHubBacklogItem>,
}
impl GitHubBacklogCache {
#[cfg(any(feature = "extension-backlog", test))]
pub fn new(
repo: impl Into<String>,
fetched_at_epoch_s: u64,
items: Vec<GitHubBacklogItem>,
) -> Self {
let mut items = items;
items.sort_by_key(|item| {
(
effective_priority_label_for_provider(DEFAULT_WORK_QUEUE_PROVIDER, item)
.map(priority_bucket_rank)
.unwrap_or(u8::MAX),
item.priority_rank.unwrap_or(u64::MAX),
item.github_issue_number,
item.ccd_id,
)
});
let mut cache = Self {
schema_version: CACHE_SCHEMA_VERSION,
provider: default_work_queue_provider(),
repo: repo.into(),
fetched_at_epoch_s,
items,
};
cache.refresh_dispatch_states();
cache
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn refresh_dispatch_states(&mut self) {
let empty_ref_claims = BTreeSet::new();
let empty_id_claims = BTreeSet::new();
let dispatch_states = self
.items
.iter()
.map(|item| dispatch_state_for_item(self, item, &empty_ref_claims, &empty_id_claims))
.collect::<Vec<_>>();
for (item, dispatch_state) in self.items.iter_mut().zip(dispatch_states) {
item.dispatch_state = dispatch_state;
}
}
pub fn active_items(&self) -> Vec<&GitHubBacklogItem> {
let mut active = self
.items
.iter()
.filter(|item| item.is_active())
.filter(|item| {
if self.provider.eq_ignore_ascii_case("github-issues") {
item.is_queue_scoped()
} else {
true
}
})
.collect::<Vec<_>>();
active.sort_by_key(|item| {
(
effective_priority_label(self, item)
.map(priority_bucket_rank)
.unwrap_or(u8::MAX),
item.priority_rank.unwrap_or(u64::MAX),
item.github_issue_number,
item.ccd_id,
)
});
active
}
pub fn queue_summary(
&self,
claimed_ref_keys: &BTreeSet<String>,
claimed_ids: &BTreeSet<u64>,
) -> GitHubQueueSummary {
if !self.provider.eq_ignore_ascii_case("github-issues") {
let auto_selectable = self
.items
.iter()
.filter(|item| item.is_active())
.filter(|item| item.is_ready())
.filter(|item| item.claimed_by.is_none())
.filter(|item| !claimed_ids.contains(&item.ccd_id))
.count();
return GitHubQueueSummary {
open_issues: self.items.iter().filter(|item| item.is_active()).count(),
queue_scoped: self.items.iter().filter(|item| item.is_active()).count(),
queue_candidates: self.items.iter().filter(|item| item.is_active()).count(),
policy_conflicts: 0,
metadata_invalid: 0,
upstream_claimed: self
.items
.iter()
.filter(|item| item.is_active() && item.claimed_by.is_some())
.count(),
auto_selectable,
};
}
let open_items = self
.items
.iter()
.filter(|item| item.is_active())
.collect::<Vec<_>>();
GitHubQueueSummary {
open_issues: open_items.len(),
queue_scoped: open_items
.iter()
.filter(|item| item.is_queue_scoped())
.count(),
queue_candidates: open_items
.iter()
.filter(|item| item.is_queue_candidate())
.count(),
policy_conflicts: open_items
.iter()
.filter(|item| matches!(item.queue_state, GitHubQueueState::QueuePolicyConflict))
.count(),
metadata_invalid: open_items
.iter()
.filter(|item| matches!(item.metadata_status, MetadataStatus::Invalid))
.count(),
upstream_claimed: open_items
.iter()
.filter(|item| item.is_upstream_claimed())
.count(),
auto_selectable: open_items
.iter()
.filter(|item| {
matches!(
dispatch_state_for_item(self, item, claimed_ref_keys, claimed_ids),
GitHubDispatchState::Ready
)
})
.count(),
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct GitHubBacklogSummaryItem {
pub ccd_id: u64,
pub github_issue_number: u64,
pub backlog_ref: BacklogRef,
pub content_trust: ContentTrust,
pub title: String,
pub url: String,
pub section: String,
pub status: String,
pub queue_state: GitHubQueueState,
pub dispatch_state: GitHubDispatchState,
pub metadata_status: MetadataStatus,
pub upstream_claim: UpstreamClaimState,
#[serde(skip_serializing_if = "Option::is_none")]
pub priority_label: Option<GitHubPriorityBucket>,
#[serde(skip_serializing_if = "Option::is_none")]
pub claimed_by: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub priority_rank: Option<u64>,
}
impl GitHubBacklogSummaryItem {
pub fn display_ref(&self) -> String {
if self.ccd_id != 0 {
format!("ccd#{}", self.ccd_id)
} else {
format!("GH#{}", self.github_issue_number)
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct GitHubBacklogDispatchView {
pub status: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
pub priority_label: Option<GitHubPriorityBucket>,
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub selected: Option<GitHubBacklogSummaryItem>,
}
#[derive(Debug, Clone, Serialize)]
pub struct GitHubBacklogCacheView {
pub path: String,
pub rendered_path: String,
pub status: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
pub content_trust: Option<ContentTrust>,
#[serde(skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repo: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub fetched_at_epoch_s: Option<u64>,
pub stale_after_s: u64,
pub queue_summary: GitHubQueueSummary,
#[serde(skip_serializing_if = "Option::is_none")]
pub dispatch: Option<GitHubBacklogDispatchView>,
pub active_items: Vec<GitHubBacklogSummaryItem>,
}
pub fn load_cache(layout: &StateLayout) -> Result<Option<GitHubBacklogCache>> {
read_cache_file(&layout.work_queue_cache_path())
}
pub fn load_cache_view(layout: &StateLayout, limit: usize) -> Result<GitHubBacklogCacheView> {
let cache = load_cache(layout)?;
load_cache_view_from(layout, cache, limit)
}
pub fn load_cache_view_from(
layout: &StateLayout,
cache: Option<GitHubBacklogCache>,
limit: usize,
) -> Result<GitHubBacklogCacheView> {
load_cache_view_from_ref(layout, cache.as_ref(), limit)
}
pub fn load_cache_view_from_ref(
layout: &StateLayout,
cache: Option<&GitHubBacklogCache>,
limit: usize,
) -> Result<GitHubBacklogCacheView> {
let path = layout.work_queue_cache_path();
let rendered_path = layout.work_queue_view_path();
let Some(cache) = cache else {
return Ok(GitHubBacklogCacheView {
path: path.display().to_string(),
rendered_path: rendered_path.display().to_string(),
status: "missing",
content_trust: None,
provider: None,
repo: None,
fetched_at_epoch_s: None,
stale_after_s: STALE_AFTER_SECS,
queue_summary: GitHubQueueSummary {
open_issues: 0,
queue_scoped: 0,
queue_candidates: 0,
policy_conflicts: 0,
metadata_invalid: 0,
upstream_claimed: 0,
auto_selectable: 0,
},
dispatch: None,
active_items: Vec::new(),
});
};
let empty_ref_claims = BTreeSet::new();
let empty_id_claims = BTreeSet::new();
let queue_summary = cache.queue_summary(&empty_ref_claims, &empty_id_claims);
let active_items = cache
.active_items()
.into_iter()
.take(limit)
.map(|item| GitHubBacklogSummaryItem {
ccd_id: item.ccd_id,
github_issue_number: item.github_issue_number,
backlog_ref: item.backlog_ref.clone(),
content_trust: ContentTrust::ExternalAdapterOutput,
title: display_title(&item.title),
url: item.url.clone(),
section: item.section.clone(),
status: item.status.clone(),
queue_state: item.queue_state,
dispatch_state: dispatch_state_for_item(
cache,
item,
&empty_ref_claims,
&empty_id_claims,
),
metadata_status: item.metadata_status,
upstream_claim: item.upstream_claim,
priority_label: item.priority_label,
claimed_by: item.claimed_by.clone(),
priority_rank: item.priority_rank,
})
.collect::<Vec<_>>();
let status = if is_stale(cache, now_epoch_s()?) {
"stale"
} else if active_items.is_empty() {
"empty"
} else {
"loaded"
};
Ok(GitHubBacklogCacheView {
path: path.display().to_string(),
rendered_path: rendered_path.display().to_string(),
status,
content_trust: Some(ContentTrust::ExternalAdapterOutput),
provider: Some(cache.provider.clone()),
repo: Some(cache.repo.clone()),
fetched_at_epoch_s: Some(cache.fetched_at_epoch_s),
stale_after_s: STALE_AFTER_SECS,
queue_summary,
dispatch: dispatch_summary_view(cache, &empty_ref_claims, &empty_id_claims),
active_items,
})
}
#[cfg(feature = "extension-backlog")]
pub fn write_cache(layout: &StateLayout, cache: &GitHubBacklogCache) -> Result<()> {
let clone_profile_root = layout.clone_profile_root();
fs::create_dir_all(&clone_profile_root).with_context(|| {
format!(
"failed to create directory {}",
clone_profile_root.display()
)
})?;
let cache_json = serde_json::to_string_pretty(cache)?;
let rendered = render_cache_markdown(cache);
let cache_path = layout.work_queue_cache_path();
write::replace_text(&cache_path, &cache_json, None)
.with_context(|| format!("failed to write {}", cache_path.display()))?;
let view_path = layout.work_queue_view_path();
write::replace_text(&view_path, &rendered, None)
.with_context(|| format!("failed to write {}", view_path.display()))?;
Ok(())
}
pub fn is_stale(cache: &GitHubBacklogCache, now_epoch_s: u64) -> bool {
now_epoch_s.saturating_sub(cache.fetched_at_epoch_s) > STALE_AFTER_SECS
}
pub fn should_revalidate_on_refresh(cache: &GitHubBacklogCache) -> bool {
cache
.provider
.eq_ignore_ascii_case(DEFAULT_WORK_QUEUE_PROVIDER)
}
#[derive(Debug, Clone)]
pub struct DispatchDecision<'a> {
pub status: &'static str,
pub priority_label: Option<GitHubPriorityBucket>,
pub selected: Option<&'a GitHubBacklogItem>,
pub reason: Option<String>,
}
pub fn dispatch_view<'a>(
cache: &'a GitHubBacklogCache,
claimed_ref_keys: &BTreeSet<String>,
claimed_ids: &BTreeSet<u64>,
) -> Option<DispatchDecision<'a>> {
if cache.items.is_empty() {
return None;
}
if !cache.provider.eq_ignore_ascii_case("github-issues") {
let selected = cache.active_items().into_iter().find(|item| {
matches!(
dispatch_state_for_item(cache, item, claimed_ref_keys, claimed_ids),
GitHubDispatchState::Ready
)
});
return Some(DispatchDecision {
status: if selected.is_some() {
"selected"
} else {
"no_queue_candidates"
},
priority_label: None,
selected,
reason: if selected.is_some() {
None
} else {
Some("no ready unclaimed backlog item is available".to_owned())
},
});
}
let open_items = cache
.items
.iter()
.filter(|item| item.is_active())
.collect::<Vec<_>>();
let queue_candidates = open_items
.iter()
.copied()
.filter(|item| item.is_queue_candidate())
.collect::<Vec<_>>();
if queue_candidates.is_empty() {
let has_conflicts = open_items
.iter()
.any(|item| matches!(item.queue_state, GitHubQueueState::QueuePolicyConflict));
return Some(DispatchDecision {
status: if has_conflicts {
"queue_policy_conflict"
} else {
"no_queue_candidates"
},
priority_label: None,
selected: None,
reason: Some(if has_conflicts {
"queue-scoped GitHub issues exist, but malformed native queue labels prevent deterministic selection".to_owned()
} else {
"no queue-scoped GitHub issues are currently available".to_owned()
}),
});
}
let dispatch_buckets = queue_candidates
.iter()
.filter_map(|item| effective_priority_label(cache, item))
.filter(|bucket| *bucket != GitHubPriorityBucket::Parked)
.collect::<Vec<_>>();
let top_bucket = dispatch_buckets
.into_iter()
.min_by_key(|bucket| priority_bucket_rank(*bucket));
let Some(top_bucket) = top_bucket else {
return Some(DispatchDecision {
status: "no_auto_selectable_candidates",
priority_label: Some(GitHubPriorityBucket::Parked),
selected: None,
reason: Some("all queue candidates are parked; none are auto-selectable".to_owned()),
});
};
let top_bucket_items = queue_candidates
.into_iter()
.filter(|item| effective_priority_label(cache, item) == Some(top_bucket))
.collect::<Vec<_>>();
let ready_items = top_bucket_items
.iter()
.copied()
.filter(|item| {
matches!(
dispatch_state_for_item(cache, item, claimed_ref_keys, claimed_ids),
GitHubDispatchState::Ready
)
})
.collect::<Vec<_>>();
if ready_items.is_empty() {
let mut reason_counts = BTreeMap::<&'static str, usize>::new();
for item in &top_bucket_items {
let key = match dispatch_state_for_item(cache, item, claimed_ref_keys, claimed_ids) {
GitHubDispatchState::UpstreamClaimed => "upstream claimed",
GitHubDispatchState::LocallyClaimed => "locally claimed",
GitHubDispatchState::DependencyBlocked => "dependency blocked",
GitHubDispatchState::NotReady => "not ready",
GitHubDispatchState::Parked => "parked",
GitHubDispatchState::QueuePolicyConflict => "queue policy conflict",
GitHubDispatchState::OutOfQueue => "out of queue",
GitHubDispatchState::Ready => "ready",
};
*reason_counts.entry(key).or_insert(0) += 1;
}
let reason = reason_counts
.into_iter()
.map(|(label, count)| format!("{count} {label}"))
.collect::<Vec<_>>()
.join(", ");
return Some(DispatchDecision {
status: "no_auto_selectable_candidates",
priority_label: Some(top_bucket),
selected: None,
reason: Some(format!(
"top priority bucket `{}` has no auto-selectable candidates ({reason})",
top_bucket.short_name()
)),
});
}
let mut ready_items = ready_items;
ready_items.sort_by_key(|item| {
(
item.priority_rank.unwrap_or(u64::MAX),
item.github_issue_number,
item.ccd_id,
)
});
if ready_items.len() > 1 {
let best_rank = ready_items[0].priority_rank.unwrap_or(u64::MAX);
let best_rank_count = ready_items
.iter()
.take_while(|item| item.priority_rank.unwrap_or(u64::MAX) == best_rank)
.count();
if best_rank_count == 1 {
return Some(DispatchDecision {
status: "selected",
priority_label: Some(top_bucket),
selected: ready_items.into_iter().next(),
reason: None,
});
}
return Some(DispatchDecision {
status: "ambiguous_candidates",
priority_label: Some(top_bucket),
selected: None,
reason: Some(format!(
"top priority bucket `{}` has {} plausible candidates; actor input is required",
top_bucket.short_name(),
best_rank_count
)),
});
}
Some(DispatchDecision {
status: "selected",
priority_label: Some(top_bucket),
selected: ready_items.into_iter().next(),
reason: None,
})
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn next_dispatch_item<'a>(
cache: &'a GitHubBacklogCache,
claimed_ref_keys: &BTreeSet<String>,
claimed_ids: &BTreeSet<u64>,
) -> Option<&'a GitHubBacklogItem> {
dispatch_view(cache, claimed_ref_keys, claimed_ids).and_then(|view| view.selected)
}
pub fn dispatch_summary_view(
cache: &GitHubBacklogCache,
claimed_ref_keys: &BTreeSet<String>,
claimed_ids: &BTreeSet<u64>,
) -> Option<GitHubBacklogDispatchView> {
dispatch_view(cache, claimed_ref_keys, claimed_ids).map(|view| {
let dispatch_state = view
.selected
.map(|item| dispatch_state_for_item(cache, item, claimed_ref_keys, claimed_ids))
.unwrap_or(GitHubDispatchState::NotReady);
GitHubBacklogDispatchView {
status: view.status,
priority_label: view.priority_label,
reason: view.reason.clone(),
selected: view
.selected
.map(|item| summary_item_for(item, dispatch_state)),
}
})
}
#[allow(dead_code)] #[cfg(any(feature = "extension-backlog", test))]
pub fn has_dispatchable_work(
cache: &GitHubBacklogCache,
claimed_ref_keys: &BTreeSet<String>,
claimed_ids: &BTreeSet<u64>,
) -> bool {
next_dispatch_item(cache, claimed_ref_keys, claimed_ids).is_some()
}
fn priority_bucket_rank(bucket: GitHubPriorityBucket) -> u8 {
match bucket {
GitHubPriorityBucket::ActiveNow => 0,
GitHubPriorityBucket::Next => 1,
GitHubPriorityBucket::Later => 2,
GitHubPriorityBucket::Parked => 3,
}
}
fn effective_priority_label(
cache: &GitHubBacklogCache,
item: &GitHubBacklogItem,
) -> Option<GitHubPriorityBucket> {
effective_priority_label_for_provider(cache.provider.as_str(), item)
}
fn effective_priority_label_for_provider(
provider: &str,
item: &GitHubBacklogItem,
) -> Option<GitHubPriorityBucket> {
item.priority_label.or_else(|| {
if provider.eq_ignore_ascii_case("github-issues")
&& item.has_ccd_id()
&& item.is_queue_candidate()
{
Some(GitHubPriorityBucket::Next)
} else {
None
}
})
}
pub(crate) fn persisted_dispatch_state_for_provider(
provider: &str,
item: &GitHubBacklogItem,
) -> GitHubDispatchState {
if !item.is_active() {
return GitHubDispatchState::NotReady;
}
if provider.eq_ignore_ascii_case("github-issues") {
match item.queue_state {
GitHubQueueState::OutOfQueue => return GitHubDispatchState::OutOfQueue,
GitHubQueueState::QueuePolicyConflict => {
return GitHubDispatchState::QueuePolicyConflict
}
GitHubQueueState::QueueCandidate => {}
}
}
if effective_priority_label_for_provider(provider, item) == Some(GitHubPriorityBucket::Parked)
|| item.status.eq_ignore_ascii_case("parked")
{
return GitHubDispatchState::Parked;
}
if item.is_upstream_claimed() {
return GitHubDispatchState::UpstreamClaimed;
}
if !item.is_ready() {
return GitHubDispatchState::NotReady;
}
GitHubDispatchState::Ready
}
fn dispatch_state_for_item(
cache: &GitHubBacklogCache,
item: &GitHubBacklogItem,
claimed_ref_keys: &BTreeSet<String>,
claimed_ids: &BTreeSet<u64>,
) -> GitHubDispatchState {
let base_dispatch_state = persisted_dispatch_state_for_provider(cache.provider.as_str(), item);
if !matches!(base_dispatch_state, GitHubDispatchState::Ready) {
return base_dispatch_state;
}
if claimed_ref_keys.contains(&item.backlog_ref.key())
|| (item.has_ccd_id() && claimed_ids.contains(&item.ccd_id))
{
return GitHubDispatchState::LocallyClaimed;
}
let active_ids = cache
.items
.iter()
.filter(|candidate| candidate.is_active() && candidate.has_ccd_id())
.map(|candidate| candidate.ccd_id)
.collect::<BTreeSet<_>>();
if item.depends_on.iter().any(|id| active_ids.contains(id)) {
return GitHubDispatchState::DependencyBlocked;
}
GitHubDispatchState::Ready
}
fn summary_item_for(
item: &GitHubBacklogItem,
dispatch_state: GitHubDispatchState,
) -> GitHubBacklogSummaryItem {
GitHubBacklogSummaryItem {
ccd_id: item.ccd_id,
github_issue_number: item.github_issue_number,
backlog_ref: item.backlog_ref.clone(),
content_trust: ContentTrust::ExternalAdapterOutput,
title: display_title(&item.title),
url: item.url.clone(),
section: item.section.clone(),
status: item.status.clone(),
queue_state: item.queue_state,
dispatch_state,
metadata_status: item.metadata_status,
upstream_claim: item.upstream_claim,
priority_label: item.priority_label,
claimed_by: item.claimed_by.clone(),
priority_rank: item.priority_rank,
}
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn dep_graph_errors(cache: &GitHubBacklogCache) -> Vec<String> {
let indexed: BTreeMap<u64, &GitHubBacklogItem> = cache
.items
.iter()
.filter(|item| item.has_ccd_id())
.map(|item| (item.ccd_id, item))
.collect();
let mut errors = Vec::new();
for item in cache
.items
.iter()
.filter(|i| i.is_active() && i.has_ccd_id())
{
let mut checked = BTreeSet::new();
for &dep_id in &item.depends_on {
if !checked.insert(dep_id) {
continue;
}
if dep_id == item.ccd_id {
errors.push(format!("`ccd#{}` cannot depend on itself.", item.ccd_id));
} else if let Some(dep) = indexed.get(&dep_id) {
if !dep.is_active() || dep.status == "done" {
errors.push(format!(
"`ccd#{}` depends on closed or completed `ccd#{}`; remove completed blockers from the active dependency graph.",
item.ccd_id, dep_id
));
}
} else {
errors.push(format!(
"`ccd#{}` depends on missing `ccd#{}`.",
item.ccd_id, dep_id
));
}
}
}
let mut visited = BTreeSet::new();
for item in cache
.items
.iter()
.filter(|i| i.is_active() && i.has_ccd_id())
{
if !visited.contains(&item.ccd_id) {
detect_cycle(
item.ccd_id,
&indexed,
&mut BTreeSet::new(),
&mut visited,
&mut errors,
);
}
}
errors
}
#[cfg(any(feature = "extension-backlog", test))]
fn detect_cycle(
id: u64,
indexed: &BTreeMap<u64, &GitHubBacklogItem>,
path: &mut BTreeSet<u64>,
visited: &mut BTreeSet<u64>,
errors: &mut Vec<String>,
) {
if !path.insert(id) {
errors.push(format!("dependency cycle detected involving `ccd#{id}`."));
return;
}
if let Some(item) = indexed.get(&id) {
for &dep_id in &item.depends_on {
if dep_id != id && !visited.contains(&dep_id) {
detect_cycle(dep_id, indexed, path, visited, errors);
}
}
}
path.remove(&id);
visited.insert(id);
}
pub fn now_epoch_s() -> Result<u64> {
Ok(SystemTime::now()
.duration_since(UNIX_EPOCH)
.context("system clock is before UNIX_EPOCH")?
.as_secs())
}
#[cfg(feature = "extension-backlog")]
pub fn render_cache_markdown(cache: &GitHubBacklogCache) -> String {
let empty_ref_claims = BTreeSet::new();
let empty_id_claims = BTreeSet::new();
let summary = cache.queue_summary(&empty_ref_claims, &empty_id_claims);
let mut rendered = String::new();
rendered.push_str("# Cached Work Queue\n\n");
rendered.push_str(&format!("- Provider: `{}`\n", cache.provider));
rendered.push_str(&format!("- Source: `{}`\n", cache.repo));
rendered.push_str(&format!(
"- Fetched at epoch: `{}`\n",
cache.fetched_at_epoch_s
));
rendered.push_str(&format!("- Open issues: `{}`\n", summary.open_issues));
rendered.push_str(&format!("- Queue scoped: `{}`\n", summary.queue_scoped));
rendered.push_str(&format!(
"- Queue candidates: `{}`\n",
summary.queue_candidates
));
rendered.push_str(&format!(
"- Queue policy conflicts: `{}`\n",
summary.policy_conflicts
));
rendered.push_str(&format!(
"- Auto-selectable: `{}`\n\n",
summary.auto_selectable
));
rendered.push_str("## Active Items\n\n");
let active_items = cache.active_items();
if active_items.is_empty() {
rendered.push_str("- No active queue-scoped work items are cached.\n");
return rendered;
}
for item in active_items {
if cache.provider.eq_ignore_ascii_case("local-markdown") {
rendered.push_str(&format!(
"- [ccd#{}] {} ({}, {})\n",
item.ccd_id,
display_title(&item.title),
item.status,
item.section
));
} else {
rendered.push_str(&format!(
"- [{}] {} (#{}, {}, {})\n",
item.display_ref(),
display_title(&item.title),
item.github_issue_number,
item.status,
item.section
));
}
rendered.push_str(&format!(" - {}\n", item.url));
}
rendered
}
fn default_work_queue_provider() -> String {
DEFAULT_WORK_QUEUE_PROVIDER.to_owned()
}
fn read_cache_file(path: &Path) -> Result<Option<GitHubBacklogCache>> {
match fs::read_to_string(path) {
Ok(contents) => {
let cache: GitHubBacklogCache = serde_json::from_str(&contents)
.with_context(|| format!("failed to parse {}", path.display()))?;
Ok(Some(cache))
}
Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(error) => Err(error).with_context(|| format!("failed to read {}", path.display())),
}
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn classify_github_issue(
number: u64,
title: &str,
body: &str,
url: &str,
state: &str,
labels: Vec<String>,
assignees: Vec<String>,
) -> Result<GitHubBacklogItem> {
let title_id = parse_title_ccd_id(title);
let metadata_parse = extract_metadata_block(body);
let (metadata, metadata_status, metadata_error, parse_sections_from) = match metadata_parse {
Ok(Some((metadata, remainder))) => {
if title_id.is_some() && title_id != Some(metadata.id) {
(
None,
MetadataStatus::Invalid,
Some(format!(
"title prefix `ccd#{}` does not match metadata id `ccd#{}`",
title_id.unwrap_or_default(),
metadata.id
)),
body.to_owned(),
)
} else {
let summary_sections = parse_sections(&remainder);
let status = if section_text(&summary_sections, "Summary").is_empty() {
MetadataStatus::Partial
} else {
MetadataStatus::Enriched
};
(Some(metadata), status, None, remainder)
}
}
Ok(None) => {
if title_id.is_some() {
(
None,
MetadataStatus::Invalid,
Some(format!(
"title prefix `ccd#{}` requires a matching `ccd-backlog` fence at the start of the issue body",
title_id.unwrap_or_default()
)),
body.to_owned(),
)
} else {
(None, MetadataStatus::Absent, None, body.to_owned())
}
}
Err(error) => (
None,
MetadataStatus::Invalid,
Some(format!("{error:#}")),
body.to_owned(),
),
};
let sections = parse_sections(&parse_sections_from);
let priority_labels = parse_priority_labels(&labels);
let status_labels = parse_status_labels(&labels);
let priority_label = if priority_labels.len() == 1 {
priority_labels.first().copied()
} else {
None
};
let queue_state = if priority_labels.len() > 1 || status_labels.len() > 1 {
GitHubQueueState::QueuePolicyConflict
} else if !priority_labels.is_empty() {
GitHubQueueState::QueueCandidate
} else {
GitHubQueueState::OutOfQueue
};
let status = if state.eq_ignore_ascii_case("closed") {
"done".to_owned()
} else if status_labels.len() == 1 {
status_labels[0].to_owned()
} else if let Some(metadata) = metadata.as_ref() {
metadata.status.clone()
} else {
"ready".to_owned()
};
let claimed_by = assignees.first().cloned();
let summary = section_text(§ions, "Summary");
let summary = if summary.is_empty() {
display_title(title)
} else {
summary
};
let acceptance_criteria = section_list(§ions, "Acceptance Criteria");
let related_specs = section_list(§ions, "Related Specs");
let operator_notes = section_list(§ions, "Operator Notes");
let upstream_claim = if assignees.is_empty() {
UpstreamClaimState::Unclaimed
} else {
UpstreamClaimState::Claimed
};
let mut item = GitHubBacklogItem {
backlog_ref: BacklogRef {
provider: "github-issues".to_owned(),
kind: "issue".to_owned(),
id: number.to_string(),
url: url.to_owned(),
},
ccd_id: metadata.as_ref().map(|value| value.id).unwrap_or(0),
github_issue_number: number,
github_state: state.to_ascii_lowercase(),
title: title.to_owned(),
url: url.to_owned(),
kind: metadata
.as_ref()
.map(|value| value.kind.clone())
.unwrap_or_else(|| "item".to_owned()),
section: metadata
.as_ref()
.map(|value| value.section.clone())
.unwrap_or_else(|| "github-issues".to_owned()),
status,
effort: metadata
.as_ref()
.map(|value| value.effort.clone())
.unwrap_or_default(),
impact: metadata
.as_ref()
.map(|value| value.impact.clone())
.unwrap_or_default(),
module: metadata.as_ref().and_then(|value| value.module.clone()),
priority_rank: metadata.as_ref().and_then(|value| value.priority_rank),
claimed_by,
depends_on: metadata
.as_ref()
.map(|value| value.depends_on.clone())
.unwrap_or_default(),
roadmap_epic: metadata
.as_ref()
.and_then(|value| value.roadmap_epic.clone()),
spec_refs: metadata
.as_ref()
.map(|value| value.spec_refs.clone())
.unwrap_or_default(),
summary,
acceptance_criteria,
related_specs,
operator_notes,
labels,
assignees,
queue_state,
priority_label,
metadata_status,
metadata_error,
upstream_claim,
dispatch_state: GitHubDispatchState::NotReady,
};
item.dispatch_state = persisted_dispatch_state_for_provider("github-issues", &item);
Ok(item)
}
pub fn parse_title_ccd_id(title: &str) -> Option<u64> {
let trimmed = title.trim();
let rest = trimmed.strip_prefix("[ccd#")?;
let end = rest.find(']')?;
rest[..end].parse().ok()
}
#[cfg(any(feature = "extension-backlog", test))]
fn extract_metadata_block(body: &str) -> Result<Option<(CanonicalIssueMetadata, String)>> {
let mut lines = body.lines();
let Some(first_line) = lines.next() else {
return Ok(None);
};
if first_line.trim() != CANONICAL_METADATA_FENCE {
return Ok(None);
}
let mut metadata_lines = Vec::new();
let mut remainder_lines = Vec::new();
let mut in_metadata = true;
for line in lines {
if in_metadata {
let trimmed = line.trim_start();
if let Some(remainder) = trimmed.strip_prefix("```") {
in_metadata = false;
let remainder = remainder.trim_start();
if !remainder.is_empty() {
remainder_lines.push(remainder.to_owned());
}
continue;
}
metadata_lines.push(line);
} else {
remainder_lines.push(line.to_owned());
}
}
if in_metadata {
bail!("canonical backlog metadata fence is not closed");
}
let metadata = toml::from_str::<CanonicalIssueMetadata>(&metadata_lines.join("\n"))
.context("failed to parse canonical backlog metadata")?;
Ok(Some((
metadata,
remainder_lines.join("\n").trim().to_owned(),
)))
}
#[cfg(any(feature = "extension-backlog", test))]
fn parse_priority_labels(labels: &[String]) -> Vec<GitHubPriorityBucket> {
labels
.iter()
.filter_map(|label| match label.as_str() {
GITHUB_PRIORITY_ACTIVE_NOW => Some(GitHubPriorityBucket::ActiveNow),
GITHUB_PRIORITY_NEXT => Some(GitHubPriorityBucket::Next),
GITHUB_PRIORITY_LATER => Some(GitHubPriorityBucket::Later),
GITHUB_PRIORITY_PARKED => Some(GitHubPriorityBucket::Parked),
_ => None,
})
.collect()
}
#[cfg(any(feature = "extension-backlog", test))]
fn parse_status_labels(labels: &[String]) -> Vec<&str> {
labels
.iter()
.filter_map(|label| match label.as_str() {
"ccd/status:ready" => Some("ready"),
"ccd/status:in-progress" => Some("in-progress"),
"ccd/status:blocked" => Some("blocked"),
"ccd/status:parked" => Some("parked"),
"ccd/status:done" => Some("done"),
_ => None,
})
.collect()
}
#[cfg(any(feature = "extension-backlog", test))]
fn parse_sections(body: &str) -> BTreeMap<String, Vec<String>> {
let mut sections = BTreeMap::new();
let mut current: Option<String> = None;
for line in body.lines() {
if let Some(heading) = line.strip_prefix("## ") {
current = Some(heading.trim().to_owned());
sections
.entry(heading.trim().to_owned())
.or_insert_with(Vec::new);
continue;
}
if let Some(current) = ¤t {
sections
.entry(current.clone())
.or_insert_with(Vec::new)
.push(line.to_owned());
}
}
sections
}
#[cfg(any(feature = "extension-backlog", test))]
fn section_text(sections: &BTreeMap<String, Vec<String>>, name: &str) -> String {
sections
.get(name)
.map(|lines| {
lines
.iter()
.map(|line| line.trim())
.filter(|line| !line.is_empty())
.collect::<Vec<_>>()
.join("\n")
})
.unwrap_or_default()
}
#[cfg(any(feature = "extension-backlog", test))]
fn section_list(sections: &BTreeMap<String, Vec<String>>, name: &str) -> Vec<String> {
sections
.get(name)
.map(|lines| {
lines
.iter()
.map(|line| line.trim())
.filter_map(|line| {
line.strip_prefix("- ")
.or_else(|| line.strip_prefix("* "))
.map(str::trim)
.filter(|entry| !entry.is_empty())
.map(str::to_owned)
})
.collect()
})
.unwrap_or_default()
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn slugify(value: &str) -> String {
let mut slug = String::new();
let mut last_was_dash = false;
for ch in value.chars() {
let mapped = if ch.is_ascii_alphanumeric() {
Some(ch.to_ascii_lowercase())
} else {
None
};
match mapped {
Some(ch) => {
slug.push(ch);
last_was_dash = false;
}
None if !slug.is_empty() && !last_was_dash => {
slug.push('-');
last_was_dash = true;
}
None => {}
}
}
slug.trim_matches('-').to_owned()
}
pub(crate) fn display_title(title: &str) -> String {
if let Some(ccd_id) = parse_title_ccd_id(title) {
let prefix = format!("[ccd#{ccd_id}] ");
if let Some(rest) = title.strip_prefix(&prefix) {
return rest.to_owned();
}
}
title.to_owned()
}
#[cfg(any(feature = "extension-backlog", test))]
#[derive(Debug, Clone)]
pub enum BatchGroupKey {
RoadmapEpic,
SpecRef,
Section,
}
#[cfg(any(feature = "extension-backlog", test))]
impl BatchGroupKey {
pub fn kind_str(&self) -> &'static str {
match self {
BatchGroupKey::RoadmapEpic => "roadmap_epic",
BatchGroupKey::SpecRef => "spec_ref",
BatchGroupKey::Section => "section",
}
}
}
#[cfg(any(feature = "extension-backlog", test))]
#[derive(Debug, Clone)]
pub struct PromoteNextExcluded {
pub ccd_id: u64,
pub title: String,
pub reason: &'static str,
}
#[cfg(any(feature = "extension-backlog", test))]
#[derive(Debug, Clone)]
pub struct PromoteNextBatch {
pub selected: Vec<GitHubBacklogItem>,
pub group_key: BatchGroupKey,
pub group_reason: String,
pub excluded: Vec<PromoteNextExcluded>,
}
#[cfg(any(feature = "extension-backlog", test))]
pub fn promote_next_batch(cache: &GitHubBacklogCache, batch_size: usize) -> PromoteNextBatch {
let active_batch_ids: BTreeSet<u64> = cache
.items
.iter()
.filter(|item| item.status.eq_ignore_ascii_case("in-progress") || item.claimed_by.is_some())
.map(|item| item.ccd_id)
.collect();
let open_issue_ids: BTreeSet<u64> = cache
.items
.iter()
.filter(|item| item.is_active())
.map(|item| item.ccd_id)
.collect();
let candidates: Vec<&GitHubBacklogItem> = cache
.active_items()
.into_iter()
.filter(|item| {
item.is_ready()
&& !active_batch_ids.contains(&item.ccd_id)
&& !item.depends_on.iter().any(|id| open_issue_ids.contains(id))
})
.collect();
if candidates.is_empty() {
return PromoteNextBatch {
selected: vec![],
group_key: BatchGroupKey::Section,
group_reason: String::new(),
excluded: vec![],
};
}
let mut groups: BTreeMap<(u8, String), Vec<&GitHubBacklogItem>> = BTreeMap::new();
for item in &candidates {
let key = if let Some(epic) = &item.roadmap_epic {
(0u8, epic.clone())
} else if let Some(spec) = item.spec_refs.first() {
(1u8, spec.clone())
} else {
(2u8, item.section.clone())
};
groups.entry(key).or_default().push(item);
}
let winning_key = groups
.iter()
.max_by(|(_, a), (_, b)| {
let a_len = a.len();
let b_len = b.len();
match a_len.cmp(&b_len) {
std::cmp::Ordering::Equal => {
let a_best = a
.iter()
.map(|item| (item.priority_rank.unwrap_or(u64::MAX), item.ccd_id))
.min()
.unwrap_or((u64::MAX, u64::MAX));
let b_best = b
.iter()
.map(|item| (item.priority_rank.unwrap_or(u64::MAX), item.ccd_id))
.min()
.unwrap_or((u64::MAX, u64::MAX));
b_best.cmp(&a_best)
}
other => other,
}
})
.map(|(k, _)| k.clone())
.expect("groups is non-empty (checked candidates.is_empty() above)");
let winning_group_items = &groups[&winning_key];
let mut sorted_group: Vec<&GitHubBacklogItem> = winning_group_items.to_vec();
sorted_group.sort_by_key(|item| (item.priority_rank.unwrap_or(u64::MAX), item.ccd_id));
let selected: Vec<GitHubBacklogItem> = sorted_group
.iter()
.take(batch_size)
.map(|&item| item.clone())
.collect();
let selected_ids: BTreeSet<u64> = selected.iter().map(|i| i.ccd_id).collect();
let excluded: Vec<PromoteNextExcluded> = candidates
.iter()
.filter(|item| !selected_ids.contains(&item.ccd_id))
.map(|item| {
let reason = if winning_group_items.iter().any(|g| g.ccd_id == item.ccd_id) {
"batch full"
} else {
"different group"
};
PromoteNextExcluded {
ccd_id: item.ccd_id,
title: item.title.clone(),
reason,
}
})
.collect();
let group_key = match winning_key.0 {
0 => BatchGroupKey::RoadmapEpic,
1 => BatchGroupKey::SpecRef,
_ => BatchGroupKey::Section,
};
PromoteNextBatch {
selected,
group_key,
group_reason: winning_key.1,
excluded,
}
}
#[cfg(test)]
mod tests {
use std::fs;
use std::path::PathBuf;
use crate::paths::state::StateLayout;
use crate::profile::ProfileName;
use super::*;
fn parse_canonical_issue(
number: u64,
title: &str,
body: &str,
url: &str,
state: &str,
labels: Vec<String>,
) -> Result<Option<GitHubBacklogItem>> {
let item = classify_github_issue(number, title, body, url, state, labels, Vec::new())?;
if !matches!(
item.metadata_status,
MetadataStatus::Enriched | MetadataStatus::Partial
) || !item.has_ccd_id()
{
return Ok(None);
}
Ok(Some(item))
}
#[test]
fn parses_canonical_issue_body() {
let issue = parse_canonical_issue(
13,
"Bootstrap backlog export",
r#"```ccd-backlog
id = 13
kind = "item"
section = "github-integration"
status = "ready"
effort = "medium"
impact = "high"
depends_on = [31]
spec_refs = ["docs/specs/cli/2026-03-11-ccd-github-backlog-cutover-plan.md"]
```
## Summary
Export the markdown backlog into canonical GitHub Issues.
## Acceptance Criteria
- Bootstrap the issue set.
## Dependencies
- `ccd#31`
## Related Specs
- `docs/specs/cli/2026-03-11-ccd-github-backlog-cutover-plan.md`
## Operator Notes
- Migrated from `backlog.md`.
"#,
"https://github.com/example/repo/issues/13",
"OPEN",
vec!["ccd/type:item".to_owned()],
)
.expect("issue should parse")
.expect("canonical issue");
assert_eq!(issue.ccd_id, 13);
assert_eq!(issue.status, "ready");
assert_eq!(issue.depends_on, vec![31]);
assert_eq!(issue.related_specs.len(), 1);
}
#[test]
fn parses_prefixed_canonical_issue_title_for_compatibility() {
let issue = parse_canonical_issue(
13,
"[ccd#13] Bootstrap backlog export",
r#"```ccd-backlog
id = 13
kind = "item"
section = "github-integration"
status = "ready"
effort = "medium"
impact = "high"
```
## Summary
Export the markdown backlog into canonical GitHub Issues.
"#,
"https://github.com/example/repo/issues/13",
"OPEN",
vec!["ccd/type:item".to_owned()],
)
.expect("issue should parse")
.expect("canonical issue");
assert_eq!(display_title(&issue.title), "Bootstrap backlog export");
}
#[test]
fn ignores_legacy_agent_owner_metadata_for_github_claim_state() {
let issue = parse_canonical_issue(
13,
"[ccd#13] Bootstrap backlog export",
r#"```ccd-backlog
id = 13
kind = "item"
section = "github-integration"
status = "ready"
effort = "medium"
impact = "high"
agent_owner = "codex"
```
## Summary
Export the markdown backlog into canonical GitHub Issues.
"#,
"https://github.com/example/repo/issues/13",
"OPEN",
vec!["ccd/type:item".to_owned()],
)
.expect("issue should parse")
.expect("canonical issue");
assert_eq!(issue.claimed_by, None);
assert_eq!(issue.upstream_claim, UpstreamClaimState::Unclaimed);
}
#[test]
fn canonical_parse_skips_title_only_issue_without_metadata() {
let issue = parse_canonical_issue(
13,
"[ccd#13] Bootstrap backlog export",
"## Summary\n\nMissing metadata.\n",
"https://github.com/example/repo/issues/13",
"OPEN",
Vec::new(),
)
.expect("parse should succeed");
assert!(issue.is_none());
}
#[test]
fn classify_github_issue_treats_label_native_queue_membership_as_metadata_optional() {
let issue = classify_github_issue(
175,
"Provide a supported backlog-extension activation path",
"## Summary\n\nActivate GitHub backlog without manual config edits.\n",
"https://github.com/example/repo/issues/175",
"OPEN",
vec![
"ccd/module:backlog".to_owned(),
"ccd/module:commands".to_owned(),
"ccd/priority:active-now".to_owned(),
],
Vec::new(),
)
.expect("issue should classify");
assert_eq!(issue.ccd_id, 0);
assert_eq!(issue.status, "ready");
assert_eq!(issue.metadata_status, MetadataStatus::Absent);
assert_eq!(issue.queue_state, GitHubQueueState::QueueCandidate);
assert_eq!(issue.priority_label, Some(GitHubPriorityBucket::ActiveNow));
}
#[test]
fn classify_github_issue_marks_prefixed_title_without_metadata_as_invalid() {
let issue = classify_github_issue(
101,
"[ccd#13] Bootstrap backlog export",
"## Summary\n\nUse labels as the canonical discovery surface.\n",
"https://github.com/example/repo/issues/101",
"OPEN",
vec!["ccd/priority:next".to_owned()],
Vec::new(),
)
.expect("issue should classify");
assert_eq!(issue.ccd_id, 0);
assert_eq!(issue.metadata_status, MetadataStatus::Invalid);
assert_eq!(issue.queue_state, GitHubQueueState::QueueCandidate);
assert_eq!(issue.priority_label, Some(GitHubPriorityBucket::Next));
assert_eq!(
issue.metadata_error.as_deref(),
Some(
"title prefix `ccd#13` requires a matching `ccd-backlog` fence at the start of the issue body"
)
);
}
#[test]
fn classify_github_issue_accepts_inline_content_after_closing_metadata_fence() {
let issue = classify_github_issue(
146,
"[ccd#146] Inline fence close",
r#"```ccd-backlog
id = 146
kind = "item"
section = "feature"
status = "ready"
effort = "medium"
impact = "high"
```## Summary
Inline closing fence content should still parse.
"#,
"https://github.com/example/repo/issues/146",
"OPEN",
vec!["ccd/priority:next".to_owned()],
Vec::new(),
)
.expect("issue should classify");
assert_eq!(issue.ccd_id, 146);
assert_eq!(issue.metadata_status, MetadataStatus::Enriched);
assert_eq!(issue.metadata_error, None);
assert_eq!(
issue.summary,
"Inline closing fence content should still parse."
);
}
#[test]
fn slugify_normalizes_mixed_case_labels() {
assert_eq!(slugify("Medium-High"), "medium-high");
assert_eq!(slugify("GitHub Integration"), "github-integration");
}
#[test]
fn load_cache_reads_work_queue_path() {
let temp = tempfile::tempdir().expect("tempdir");
let layout = StateLayout::new(
temp.path().join(".ccd"),
temp.path().join(".git/ccd"),
ProfileName::new("main").expect("profile"),
);
fs::create_dir_all(layout.clone_profile_root()).expect("clone profile root");
let cache = GitHubBacklogCache::new(
"example/repo",
123,
vec![GitHubBacklogItem {
backlog_ref: BacklogRef {
provider: "github-issues".to_owned(),
kind: "issue".to_owned(),
id: "5".to_owned(),
url: "https://github.com/example/repo/issues/5".to_owned(),
},
ccd_id: 10,
github_issue_number: 5,
github_state: "open".to_owned(),
title: "[ccd#10] Example".to_owned(),
url: "https://github.com/example/repo/issues/5".to_owned(),
kind: "item".to_owned(),
section: "workflow-surfaces".to_owned(),
status: "ready".to_owned(),
effort: "medium".to_owned(),
impact: "high".to_owned(),
module: None,
priority_rank: None,
claimed_by: None,
depends_on: Vec::new(),
roadmap_epic: None,
spec_refs: Vec::new(),
summary: "Example".to_owned(),
acceptance_criteria: Vec::new(),
related_specs: Vec::new(),
operator_notes: Vec::new(),
labels: Vec::new(),
assignees: Vec::new(),
queue_state: GitHubQueueState::QueueCandidate,
priority_label: Some(GitHubPriorityBucket::Next),
metadata_status: MetadataStatus::Enriched,
metadata_error: None,
upstream_claim: UpstreamClaimState::Unclaimed,
dispatch_state: GitHubDispatchState::Ready,
}],
);
fs::write(
layout.work_queue_cache_path(),
serde_json::to_string_pretty(&cache).expect("serialize cache"),
)
.expect("write work queue cache");
let loaded = load_cache(&layout)
.expect("load cache")
.expect("cache exists");
let view = load_cache_view(&layout, 12).expect("load cache view");
assert_eq!(loaded.items[0].ccd_id, 10);
assert_eq!(PathBuf::from(view.path), layout.work_queue_cache_path());
}
#[allow(clippy::too_many_arguments)]
fn make_item(
ccd_id: u64,
status: &str,
section: &str,
priority_rank: Option<u64>,
roadmap_epic: Option<&str>,
spec_refs: Vec<&str>,
depends_on: Vec<u64>,
agent_owner: Option<&str>,
) -> GitHubBacklogItem {
GitHubBacklogItem {
backlog_ref: BacklogRef {
provider: "github-issues".to_owned(),
kind: "issue".to_owned(),
id: (ccd_id * 10).to_string(),
url: format!("https://example.com/issues/{ccd_id}"),
},
ccd_id,
github_issue_number: ccd_id * 10,
github_state: "open".to_owned(),
title: format!("[ccd#{ccd_id}] test item {ccd_id}"),
url: format!("https://example.com/issues/{ccd_id}"),
kind: "item".to_owned(),
section: section.to_owned(),
status: status.to_owned(),
effort: "medium".to_owned(),
impact: "high".to_owned(),
module: None,
priority_rank,
claimed_by: agent_owner.map(str::to_owned),
depends_on,
roadmap_epic: roadmap_epic.map(str::to_owned),
spec_refs: spec_refs.iter().map(|&s| s.to_owned()).collect(),
summary: format!("test item {ccd_id}"),
acceptance_criteria: vec![],
related_specs: vec![],
operator_notes: vec![],
labels: vec![],
assignees: agent_owner
.map(|owner| owner.to_owned())
.into_iter()
.collect(),
queue_state: GitHubQueueState::QueueCandidate,
priority_label: Some(GitHubPriorityBucket::Next),
metadata_status: MetadataStatus::Enriched,
metadata_error: None,
upstream_claim: if agent_owner.is_some() {
UpstreamClaimState::Claimed
} else {
UpstreamClaimState::Unclaimed
},
dispatch_state: GitHubDispatchState::Ready,
}
}
fn make_cache(items: Vec<GitHubBacklogItem>) -> GitHubBacklogCache {
GitHubBacklogCache::new("test/repo", 0, items)
}
#[test]
fn classify_github_issue_seeds_persisted_dispatch_state_for_out_of_queue_items() {
let issue = classify_github_issue(
17,
"Plain GitHub issue",
"No CCD metadata here.",
"https://github.com/example/repo/issues/17",
"OPEN",
Vec::new(),
Vec::new(),
)
.expect("classification should succeed");
assert_eq!(issue.queue_state, GitHubQueueState::OutOfQueue);
assert_eq!(issue.dispatch_state, GitHubDispatchState::OutOfQueue);
}
#[test]
fn refresh_dispatch_states_recomputes_local_markdown_items_after_provider_override() {
let mut cache = make_cache(vec![make_item(
42,
"ready",
"workflow-surfaces",
Some(10),
None,
Vec::new(),
Vec::new(),
Some("alice"),
)]);
cache.provider = "local-markdown".to_owned();
cache.refresh_dispatch_states();
assert_eq!(
cache.items[0].dispatch_state,
GitHubDispatchState::UpstreamClaimed
);
}
#[test]
fn next_dispatch_item_skips_upstream_claimed_items() {
let cache = make_cache(vec![
make_item(
1,
"ready",
"sec-a",
Some(10),
None,
vec![],
vec![],
Some("@alice"),
),
make_item(2, "ready", "sec-a", Some(20), None, vec![], vec![], None),
]);
let next =
next_dispatch_item(&cache, &BTreeSet::new(), &BTreeSet::new()).expect("dispatch item");
assert_eq!(next.ccd_id, 2);
}
#[test]
fn promote_next_selects_cohesive_roadmap_epic_group() {
let cache = make_cache(vec![
make_item(
1,
"ready",
"sec-a",
Some(100),
Some("R1: Epic One"),
vec![],
vec![],
None,
),
make_item(
2,
"ready",
"sec-a",
Some(110),
Some("R1: Epic One"),
vec![],
vec![],
None,
),
make_item(
10,
"ready",
"sec-b",
Some(200),
Some("R4: Epic Four"),
vec![],
vec![],
None,
),
make_item(
20,
"ready",
"sec-b",
Some(210),
Some("R4: Epic Four"),
vec![],
vec![],
None,
),
make_item(
30,
"ready",
"sec-b",
Some(220),
Some("R4: Epic Four"),
vec![],
vec![],
None,
),
]);
let batch = promote_next_batch(&cache, 5);
assert_eq!(batch.selected.len(), 3);
assert!(
batch
.selected
.iter()
.all(|i| i.roadmap_epic.as_deref() == Some("R4: Epic Four")),
"all selected items must be from R4"
);
assert_eq!(batch.group_key.kind_str(), "roadmap_epic");
assert_eq!(batch.group_reason, "R4: Epic Four");
assert_eq!(batch.excluded.len(), 2);
assert!(batch.excluded.iter().all(|e| e.reason == "different group"));
}
#[test]
fn promote_next_falls_back_to_section_when_no_epic() {
let cache = make_cache(vec![
make_item(
1,
"ready",
"section-a",
Some(100),
None,
vec![],
vec![],
None,
),
make_item(
2,
"ready",
"section-a",
Some(110),
None,
vec![],
vec![],
None,
),
make_item(
3,
"ready",
"section-a",
Some(120),
None,
vec![],
vec![],
None,
),
make_item(
10,
"ready",
"section-b",
Some(200),
None,
vec![],
vec![],
None,
),
make_item(
20,
"ready",
"section-b",
Some(210),
None,
vec![],
vec![],
None,
),
]);
let batch = promote_next_batch(&cache, 5);
assert_eq!(batch.selected.len(), 3);
assert!(batch.selected.iter().all(|i| i.section == "section-a"));
assert_eq!(batch.group_key.kind_str(), "section");
assert_eq!(batch.group_reason, "section-a");
}
#[test]
fn promote_next_excludes_items_with_open_deps() {
let cache = make_cache(vec![
make_item(
1,
"in-progress",
"sec-a",
Some(100),
None,
vec![],
vec![],
None,
),
make_item(2, "ready", "sec-a", Some(110), None, vec![], vec![1], None),
make_item(3, "ready", "sec-b", Some(200), None, vec![], vec![], None),
]);
let batch = promote_next_batch(&cache, 5);
assert!(
!batch.selected.iter().any(|i| i.ccd_id == 2),
"ccd#2 depends on open ccd#1 and must not be selected"
);
assert_eq!(batch.selected.len(), 1);
assert_eq!(batch.selected[0].ccd_id, 3);
}
#[test]
fn promote_next_excludes_in_progress_items() {
let cache = make_cache(vec![
make_item(
1,
"in-progress",
"sec-a",
Some(100),
None,
vec![],
vec![],
None,
),
make_item(2, "ready", "sec-a", Some(110), None, vec![], vec![], None),
]);
let batch = promote_next_batch(&cache, 5);
assert!(!batch.selected.iter().any(|i| i.ccd_id == 1));
assert_eq!(batch.selected.len(), 1);
assert_eq!(batch.selected[0].ccd_id, 2);
}
#[test]
fn promote_next_respects_batch_size() {
let cache = make_cache(vec![
make_item(
1,
"ready",
"sec-a",
Some(100),
Some("R1"),
vec![],
vec![],
None,
),
make_item(
2,
"ready",
"sec-a",
Some(110),
Some("R1"),
vec![],
vec![],
None,
),
make_item(
3,
"ready",
"sec-a",
Some(120),
Some("R1"),
vec![],
vec![],
None,
),
make_item(
4,
"ready",
"sec-a",
Some(130),
Some("R1"),
vec![],
vec![],
None,
),
make_item(
5,
"ready",
"sec-a",
Some(140),
Some("R1"),
vec![],
vec![],
None,
),
]);
let batch = promote_next_batch(&cache, 3);
assert_eq!(batch.selected.len(), 3);
assert_eq!(batch.selected[0].ccd_id, 1);
assert_eq!(batch.selected[1].ccd_id, 2);
assert_eq!(batch.selected[2].ccd_id, 3);
assert_eq!(batch.excluded.len(), 2);
assert!(batch.excluded.iter().all(|e| e.reason == "batch full"));
assert_eq!(batch.excluded[0].ccd_id, 4);
assert_eq!(batch.excluded[0].title, "[ccd#4] test item 4");
assert_eq!(batch.excluded[1].ccd_id, 5);
assert_eq!(batch.excluded[1].title, "[ccd#5] test item 5");
}
#[test]
fn promote_next_returns_empty_when_no_candidates() {
let cache = make_cache(vec![
make_item(
1,
"in-progress",
"sec-a",
Some(100),
None,
vec![],
vec![],
None,
),
make_item(2, "ready", "sec-a", Some(110), None, vec![], vec![1], None),
]);
let batch = promote_next_batch(&cache, 5);
assert!(batch.selected.is_empty());
assert!(batch.excluded.is_empty());
}
#[test]
fn promote_next_breaks_ties_by_ccd_id() {
let cache = make_cache(vec![
make_item(
1,
"ready",
"sec-a",
Some(100),
Some("R1"),
vec![],
vec![],
None,
),
make_item(
2,
"ready",
"sec-a",
Some(100),
Some("R1"),
vec![],
vec![],
None,
),
make_item(
10,
"ready",
"sec-b",
Some(100),
Some("R2"),
vec![],
vec![],
None,
),
make_item(
20,
"ready",
"sec-b",
Some(100),
Some("R2"),
vec![],
vec![],
None,
),
]);
let batch = promote_next_batch(&cache, 5);
assert_eq!(batch.group_reason, "R1");
assert_eq!(batch.selected[0].ccd_id, 1);
assert_eq!(batch.selected[1].ccd_id, 2);
}
#[test]
fn promote_next_groups_by_spec_ref_when_no_epic() {
let cache = make_cache(vec![
make_item(
1,
"ready",
"sec-a",
Some(100),
None,
vec!["docs/specs/foo.md"],
vec![],
None,
),
make_item(
2,
"ready",
"sec-a",
Some(110),
None,
vec!["docs/specs/foo.md"],
vec![],
None,
),
make_item(
10,
"ready",
"sec-b",
Some(200),
None,
vec!["docs/specs/bar.md"],
vec![],
None,
),
]);
let batch = promote_next_batch(&cache, 5);
assert_eq!(batch.selected.len(), 2);
assert_eq!(batch.group_key.kind_str(), "spec_ref");
assert_eq!(batch.group_reason, "docs/specs/foo.md");
assert!(batch
.selected
.iter()
.all(|i| i.spec_refs.first().map(|s| s.as_str()) == Some("docs/specs/foo.md")));
}
#[test]
fn has_dispatchable_work_true_when_ready_unclaimed_item_exists() {
let cache = make_cache(vec![make_item(
1,
"ready",
"sec-a",
Some(10),
None,
vec![],
vec![],
None,
)]);
assert!(has_dispatchable_work(
&cache,
&BTreeSet::new(),
&BTreeSet::new()
));
}
#[test]
fn has_dispatchable_work_false_when_all_claimed() {
let cache = make_cache(vec![make_item(
1,
"ready",
"sec-a",
Some(10),
None,
vec![],
vec![],
Some("@alice"),
)]);
assert!(!has_dispatchable_work(
&cache,
&BTreeSet::new(),
&BTreeSet::new()
));
}
#[test]
fn has_dispatchable_work_false_for_empty_cache() {
let cache = make_cache(vec![]);
assert!(!has_dispatchable_work(
&cache,
&BTreeSet::new(),
&BTreeSet::new()
));
}
#[test]
fn has_dispatchable_work_false_when_locally_claimed() {
let cache = make_cache(vec![make_item(
1,
"ready",
"sec-a",
Some(10),
None,
vec![],
vec![],
None,
)]);
assert!(!has_dispatchable_work(
&cache,
&BTreeSet::new(),
&BTreeSet::from([1])
));
}
}