yarli 0.2.0

CLI, stream mode renderer, interactive TUI, scheduler, store, and API
Documentation
[core]
# Durable-by-default backend for write commands.
backend = "postgres"
safe_mode = "execute"

[postgres]
# Replace with your deployment DSN, or set DATABASE_URL in the runtime environment.
# For Kubernetes, mount credentials as a file and set this instead:
# database_url_file = "/run/secrets/yarli-postgres-url"
database_url = "postgres://postgres:postgres@localhost:5432/yarli"

[cli]
# Optional backend alias (codex | claude | gemini | custom).
# backend = "codex"
# arg | stdin
prompt_mode = "arg"
command = "codex"
args = ["exec", "--json"]
# Unset selected variables before launching the CLI command.
# env_unset = ["CLAUDECODE"]

[features]
# Parallel execution is enabled by default.
parallel = true
# Use git worktrees instead of full directory copies for parallel workspaces.
# Falls back to copy mode when git worktrees are unavailable.
# parallel_worktree = true

[queue]
claim_batch_size = 4
lease_ttl_seconds = 60
heartbeat_interval_seconds = 5
reclaim_interval_seconds = 10
reclaim_grace_seconds = 15
per_run_cap = 8
io_cap = 4
cpu_cap = 2
git_cap = 2
tool_cap = 2

[execution]
# native | overwatch
runner = "native"
# `~` and `$ENV_VAR` tokens are expanded for execution paths.
working_dir = "."
# Required when [features].parallel = true.
# Leave unset to force an explicit per-project choice before `yarli run`.
# Task workspaces are created under this root and auto-merged on RunCompleted.
# worktree_root = ".yarl/workspaces"
# Directory names or paths to exclude from per-task workspace copies.
# Helps avoid copying large/generated trees in parallel mode.
worktree_exclude_paths = [".yarl/workspaces", ".yarli", "target", "node_modules", ".venv", "venv", "__pycache__"]
command_timeout_seconds = 900
tick_interval_ms = 200

[execution.overwatch]
# Used only when execution.runner = "overwatch"
service_url = "http://127.0.0.1:8089"
# profile = "default"
# soft_timeout_seconds = 900
# silent_timeout_seconds = 300
# max_log_bytes = 131072

[run]
# Optional default prompt file for `yarli run`.
# Resolution precedence: --prompt-file > run.prompt_file > PROMPT.md fallback.
# prompt_file = "PROMPT.md"
# Optional default objective when no prompt override is present.
# objective = "verify workspace"
continue_wait_timeout_seconds = 0
# Legacy compatibility toggle; prefer run.auto_advance_policy.
allow_stable_auto_advance = false
# improving-only | stable-ok | always
auto_advance_policy = "stable-ok"
# Optional task-health actions by trend (checkpoint-now|force-pivot|stop-and-summarize|continue)
# improve/stable/deteriorating actions.
[run.task_health]
improving = "continue"
stable = "continue"
deteriorating = "continue"
# Soft cap ratio of [budgets].max_run_total_tokens that triggers checkpoint-now.
# Set to 0 to disable.
soft_token_cap_ratio = 0.9
# 0 = unlimited auto-advance tranches in one invocation.
max_auto_advance_tranches = 0
# Group adjacent open plan entries with matching `tranche_group=<name>` metadata.
enable_plan_tranche_grouping = false
# 0 = unlimited tasks per grouped tranche.
max_grouped_tasks_per_tranche = 0
# Surface `allowed_paths=...` plan metadata as explicit per-tranche scope constraints.
enforce_plan_tranche_allowed_paths = false
# Merge strategy for parallel workspace patch conflicts: fail | manual | auto-repair
# merge_conflict_resolution = "fail"
# Auto-commit YARLI state files after every N tranches (0 = disabled).
# auto_commit_interval = 1
# Template for auto-commit messages.
# auto_commit_message = "chore(state): checkpoint runtime state"
# Optional run-spec task catalog (project-level verification/work commands).
# [[run.tasks]]
# key = "lint"
# cmd = "cargo clippy --workspace -- -D warnings"
# class = "cpu"
#
# [[run.tasks]]
# key = "test"
# cmd = "cargo test --workspace"
# class = "io"
#
# Optional explicit tranche ordering for run-spec execution.
# [[run.tranches]]
# key = "verify"
# objective = "verification tranche"
# task_keys = ["lint", "test"]
#
# Optional plan guard for run-spec execution.
# [run.plan_guard]
# target = "CARD-R8-01"
# mode = "implement"

[budgets]
# Optional hard limits; unset values are unlimited.
# Values are per-task unless prefixed with max_run_.
# Token accounting currently uses deterministic char-count estimates.
max_task_total_tokens = 25000
max_run_total_tokens = 250000
max_task_rss_bytes = 1073741824      # 1 GiB
max_run_peak_rss_bytes = 2147483648  # 2 GiB

[policy]
enforce_policies = true
audit_decisions = true

[memory]
# Enable memory integrations and choose a named provider from [memory.providers.<name>].
# enabled = true
# project_id = "my-project"
# provider = "default"

# Generic provider registry (adapter pattern).
# Configure one or more providers and select with [memory].provider.
[memory.providers.default]
type = "cli"
enabled = false
command = "memory-backend"
# project_dir = "."
query_limit = 8
inject_on_run_start = true
inject_on_failure = true

# Example alternative plugin provider:
# [memory.providers.kafka]
# type = "cli"
# enabled = false
# command = "memory-kafka-adapter"
# query_limit = 8
# inject_on_run_start = true
# inject_on_failure = true

# Legacy fallback (still supported):
# [memory.backend]
# enabled = true
# command = "memory-backend"
# query_limit = 8

[observability]
audit_file = ".yarl/audit.jsonl"

[ui]
# auto | stream | tui
mode = "auto"
# Stream command output to terminal scrollback when true.
verbose_output = false
# Attach additional diagnostic context to cancellation provenance when true.
cancellation_diagnostics = false

# Explicit ephemeral override for local throwaway usage only.
# Uncomment to allow write commands with core.backend = "in-memory".
#
# [core]
# allow_in_memory_writes = true