Skip to main content

tandem_server/
http.rs

1use std::collections::HashMap;
2use std::net::SocketAddr;
3use std::path::Path as FsPath;
4use std::path::PathBuf;
5use std::sync::Arc;
6use std::time::Duration;
7
8use async_trait::async_trait;
9use axum::extract::Extension;
10use axum::extract::{Path, Query, State};
11use axum::http::header::{self, HeaderValue};
12use axum::http::{HeaderMap, StatusCode};
13use axum::response::sse::{Event, KeepAlive, Sse};
14use axum::response::IntoResponse;
15use axum::response::Response;
16use axum::{Json, Router};
17use futures::Stream;
18use regex::Regex;
19use serde::{Deserialize, Serialize};
20use serde_json::{json, Value};
21use sha2::{Digest, Sha256};
22use tandem_memory::types::GlobalMemoryRecord;
23use tandem_memory::{
24    db::MemoryDatabase, MemoryCapabilities, MemoryCapabilityToken, MemoryPromoteRequest,
25    MemoryPromoteResponse, MemoryPutRequest, MemoryPutResponse, MemorySearchRequest,
26    MemorySearchResponse, ScrubReport, ScrubStatus,
27};
28use tandem_skills::{SkillBundleArtifacts, SkillLocation, SkillService, SkillsConflictPolicy};
29use tokio_stream::wrappers::BroadcastStream;
30use tokio_stream::StreamExt;
31use uuid::Uuid;
32
33use tandem_tools::Tool;
34use tandem_types::{
35    CreateSessionRequest, EngineEvent, Message, MessagePart, MessagePartInput, MessageRole,
36    SendMessageRequest, Session, TenantContext, TodoItem, ToolResult, ToolSchema,
37};
38use tandem_wire::{WireSession, WireSessionMessage};
39
40use crate::ResourceStoreError;
41use crate::{
42    capability_resolver::{
43        classify_missing_required, providers_for_capability, CapabilityBindingsFile,
44        CapabilityBlockingIssue, CapabilityReadinessInput, CapabilityReadinessOutput,
45        CapabilityResolveInput,
46    },
47    mcp_catalog,
48    pack_manager::{PackExportRequest, PackInstallRequest, PackUninstallRequest},
49    ActiveRun, AppState, DiscordConfigFile, SlackConfigFile, TelegramConfigFile,
50};
51
52mod approvals;
53mod automation_projection_runtime;
54pub(crate) mod bug_monitor;
55mod capabilities;
56pub(crate) mod channel_automation_drafts;
57mod channels_api;
58mod coder;
59pub(crate) mod config_providers;
60pub(crate) mod context_packs;
61mod context_run_ledger;
62mod context_run_mutation_checkpoints;
63pub(crate) mod context_runs;
64pub(crate) mod context_types;
65mod discord_interactions;
66mod enterprise;
67mod external_actions;
68mod global;
69pub(crate) mod governance;
70mod marketplace;
71pub(crate) mod mcp;
72pub(crate) mod mcp_discovery;
73mod middleware;
74mod mission_builder;
75mod mission_builder_host;
76mod mission_builder_runtime;
77mod missions_teams;
78mod optimizations;
79mod pack_builder;
80mod packs;
81mod permissions_questions;
82mod presets;
83mod resources;
84mod router;
85mod routes_approvals;
86mod routes_bug_monitor;
87mod routes_capabilities;
88mod routes_channel_automation_drafts;
89mod routes_coder;
90mod routes_config_providers;
91mod routes_context;
92mod routes_external_actions;
93mod routes_global;
94mod routes_governance;
95mod routes_marketplace;
96mod routes_mcp;
97mod routes_mission_builder;
98mod routes_missions_teams;
99mod routes_optimizations;
100mod routes_pack_builder;
101mod routes_packs;
102mod routes_permissions_questions;
103mod routes_presets;
104mod routes_resources;
105mod routes_routines_automations;
106mod routes_sessions;
107mod routes_setup_understanding;
108mod routes_skills_memory;
109mod routes_system_api;
110mod routes_task_intake;
111mod routes_workflow_planner;
112mod routes_workflows;
113pub(crate) mod routines_automations;
114mod session_kb_grounding;
115mod sessions;
116mod setup_understanding;
117mod skills_memory;
118mod slack_interactions;
119mod system_api;
120mod task_intake;
121mod telegram_interactions;
122pub(crate) mod workflow_planner;
123mod workflow_planner_host;
124mod workflow_planner_policy;
125pub(crate) mod workflow_planner_runtime;
126mod workflow_planner_transport;
127mod workflows;
128
129use capabilities::*;
130use context_runs::*;
131use context_types::*;
132use marketplace::*;
133use mcp::*;
134use pack_builder::*;
135use packs::*;
136use permissions_questions::*;
137use presets::*;
138use resources::*;
139use sessions::*;
140use setup_understanding::*;
141use skills_memory::*;
142use system_api::*;
143
144#[cfg(test)]
145pub(crate) use context_runs::session_context_run_id;
146pub(crate) use context_runs::sync_workflow_run_blackboard;
147#[cfg(test)]
148pub(crate) use context_runs::workflow_context_run_id;
149pub(crate) use workflow_planner_runtime::compile_plan_to_automation_v2;
150
151#[derive(Debug, Deserialize)]
152struct ListSessionsQuery {
153    q: Option<String>,
154    page: Option<usize>,
155    page_size: Option<usize>,
156    archived: Option<bool>,
157    scope: Option<SessionScope>,
158    workspace: Option<String>,
159}
160
161#[derive(Debug, Deserialize, Default)]
162struct EventFilterQuery {
163    #[serde(rename = "sessionID")]
164    session_id: Option<String>,
165    #[serde(rename = "runID")]
166    run_id: Option<String>,
167}
168
169#[derive(Debug, Deserialize, Default, Clone, Copy)]
170struct RunEventsQuery {
171    since_seq: Option<u64>,
172    tail: Option<usize>,
173}
174
175#[derive(Debug, Deserialize, Default)]
176struct PromptAsyncQuery {
177    r#return: Option<String>,
178}
179
180#[derive(Debug, Deserialize)]
181struct EngineLeaseAcquireInput {
182    client_id: Option<String>,
183    client_type: Option<String>,
184    ttl_ms: Option<u64>,
185}
186
187#[derive(Debug, Deserialize)]
188struct EngineLeaseRenewInput {
189    lease_id: String,
190}
191
192#[derive(Debug, Deserialize)]
193struct EngineLeaseReleaseInput {
194    lease_id: String,
195}
196
197#[derive(Debug, Deserialize, Default)]
198struct StorageRepairInput {
199    force: Option<bool>,
200}
201
202#[derive(Debug, Deserialize, Default)]
203struct StorageFilesQuery {
204    path: Option<String>,
205    limit: Option<usize>,
206}
207
208#[derive(Debug, Deserialize, Default)]
209struct UpdateSessionInput {
210    title: Option<String>,
211    archived: Option<bool>,
212    permission: Option<Vec<serde_json::Value>>,
213}
214
215#[derive(Debug, Deserialize)]
216struct AttachSessionInput {
217    target_workspace: String,
218    reason_tag: Option<String>,
219}
220
221#[derive(Debug, Deserialize)]
222struct WorkspaceOverrideInput {
223    ttl_seconds: Option<u64>,
224}
225
226#[derive(Debug, Deserialize, Default)]
227struct WorktreeInput {
228    repo_root: Option<String>,
229    path: Option<String>,
230    branch: Option<String>,
231    base: Option<String>,
232    task_id: Option<String>,
233    owner_run_id: Option<String>,
234    lease_id: Option<String>,
235    managed: Option<bool>,
236    cleanup_branch: Option<bool>,
237}
238
239#[derive(Debug, Deserialize, Default)]
240struct WorktreeListQuery {
241    repo_root: Option<String>,
242    managed_only: Option<bool>,
243}
244
245#[derive(Debug, Deserialize, Default)]
246struct LogInput {
247    level: Option<String>,
248    message: Option<String>,
249    context: Option<Value>,
250}
251
252#[derive(Debug, Serialize)]
253struct ErrorEnvelope {
254    error: String,
255    #[serde(skip_serializing_if = "Option::is_none")]
256    code: Option<String>,
257}
258
259pub async fn serve(addr: SocketAddr, state: AppState) -> anyhow::Result<()> {
260    let reaper_state = state.clone();
261    let session_part_persister_state = state.clone();
262    let session_context_run_journaler_state = state.clone();
263    let status_indexer_state = state.clone();
264    let routine_scheduler_state = state.clone();
265    let routine_executor_state = state.clone();
266    let usage_aggregator_state = state.clone();
267    let automation_v2_scheduler_state = state.clone();
268    let automation_v2_executor_state = state.clone();
269    let optimization_scheduler_state = state.clone();
270    let workflow_dispatcher_state = state.clone();
271    let agent_team_supervisor_state = state.clone();
272    let global_memory_ingestor_state = state.clone();
273    let bug_monitor_state = state.clone();
274    let governance_health_state = state.clone();
275    let mcp_bootstrap_state = state.clone();
276    tokio::spawn(async move {
277        bootstrap_mcp_servers_when_ready(mcp_bootstrap_state).await;
278    });
279    let app = app_router(state.clone());
280    let reaper = tokio::spawn(async move {
281        if !reaper_state.wait_until_ready_or_failed(120, 250).await {
282            let startup = reaper_state.startup_snapshot().await;
283            tracing::warn!(
284                component = "run_reaper",
285                startup_status = ?startup.status,
286                startup_phase = %startup.phase,
287                attempt_id = %startup.attempt_id,
288                "run reaper exiting before runtime access because startup did not become ready"
289            );
290            return;
291        }
292        loop {
293            tokio::time::sleep(Duration::from_secs(5)).await;
294            let stale = reaper_state
295                .run_registry
296                .reap_stale(reaper_state.run_stale_ms)
297                .await;
298            for (session_id, run) in stale {
299                let _ = reaper_state.cancellations.cancel(&session_id).await;
300                let _ = reaper_state
301                    .close_browser_sessions_for_owner(&session_id)
302                    .await;
303                reaper_state.event_bus.publish(EngineEvent::new(
304                    "session.run.finished",
305                    json!({
306                        "sessionID": session_id,
307                        "runID": run.run_id,
308                        "finishedAtMs": crate::now_ms(),
309                        "status": "timeout",
310                    }),
311                ));
312            }
313        }
314    });
315    let session_part_persister = tokio::spawn(crate::run_session_part_persister(
316        session_part_persister_state,
317    ));
318    let session_context_run_journaler = tokio::spawn(crate::run_session_context_run_journaler(
319        session_context_run_journaler_state,
320    ));
321    let status_indexer = tokio::spawn(crate::run_status_indexer(status_indexer_state));
322    let routine_scheduler = tokio::spawn(crate::run_routine_scheduler(routine_scheduler_state));
323    let routine_executor = tokio::spawn(crate::run_routine_executor(routine_executor_state));
324    let usage_aggregator = tokio::spawn(crate::run_usage_aggregator(usage_aggregator_state));
325    let automation_v2_scheduler = tokio::spawn(crate::run_automation_v2_scheduler(
326        automation_v2_scheduler_state,
327    ));
328    let automation_v2_executor = tokio::spawn(crate::run_automation_v2_executor(
329        automation_v2_executor_state,
330    ));
331    let optimization_scheduler = tokio::spawn(crate::run_optimization_scheduler(
332        optimization_scheduler_state,
333    ));
334    let workflow_dispatcher =
335        tokio::spawn(crate::run_workflow_dispatcher(workflow_dispatcher_state));
336    let agent_team_supervisor = tokio::spawn(crate::run_agent_team_supervisor(
337        agent_team_supervisor_state,
338    ));
339    let bug_monitor = tokio::spawn(crate::run_bug_monitor(bug_monitor_state));
340    let global_memory_ingestor =
341        tokio::spawn(run_global_memory_ingestor(global_memory_ingestor_state));
342    let shutdown_state = state.clone();
343    let shutdown_timeout_secs = crate::config::env::resolve_scheduler_shutdown_timeout_secs();
344
345    // --- Memory hygiene background task (runs every 12 hours) ---
346    // Opens a fresh connection to memory.sqlite each cycle — safe because WAL
347    // mode allows concurrent readers alongside the main engine connection.
348    let hygiene_task = tokio::spawn(async move {
349        // Initial delay so startup is not impacted.
350        tokio::time::sleep(Duration::from_secs(60)).await;
351        loop {
352            let retention_days: u32 = std::env::var("TANDEM_MEMORY_RETENTION_DAYS")
353                .ok()
354                .and_then(|v| v.parse().ok())
355                .unwrap_or(30);
356            if retention_days > 0 {
357                match tandem_core::resolve_shared_paths() {
358                    Ok(paths) => {
359                        match tandem_memory::db::MemoryDatabase::new(&paths.memory_db_path).await {
360                            Ok(db) => {
361                                if let Err(e) = db.run_hygiene(retention_days).await {
362                                    tracing::warn!("memory hygiene failed: {}", e);
363                                }
364                            }
365                            Err(e) => tracing::warn!("memory hygiene: could not open DB: {}", e),
366                        }
367                    }
368                    Err(e) => tracing::warn!("memory hygiene: could not resolve paths: {}", e),
369                }
370            }
371            tokio::time::sleep(Duration::from_secs(12 * 60 * 60)).await;
372        }
373    });
374
375    // --- Automation v2 runs archiver (runs at startup, then every 24h) ---
376    // Moves terminal (completed/failed/blocked/cancelled) runs older than
377    // TANDEM_AUTOMATION_V2_RUNS_RETENTION_DAYS (default 7) from the hot runs
378    // file to a sidecar archive file. The hot file is rewritten on every run
379    // status change, so keeping it small is critical for persistence
380    // throughput. Without this, the file grows unbounded and state writes
381    // slow to the point that in-memory state lags on-disk state by minutes.
382    let archiver_state = state.clone();
383    let _automation_v2_archiver = tokio::spawn(async move {
384        // Wait for startup to reach Ready so runtime-backed state is safe.
385        loop {
386            if archiver_state.is_automation_scheduler_stopping() {
387                return;
388            }
389            let startup = archiver_state.startup_snapshot().await;
390            if matches!(startup.status, crate::app::startup::StartupStatus::Ready) {
391                break;
392            }
393            if matches!(startup.status, crate::app::startup::StartupStatus::Failed) {
394                return;
395            }
396            tokio::time::sleep(Duration::from_millis(250)).await;
397        }
398        loop {
399            let retention_days: u64 = std::env::var("TANDEM_AUTOMATION_V2_RUNS_RETENTION_DAYS")
400                .ok()
401                .and_then(|v| v.trim().parse().ok())
402                .unwrap_or(7);
403            if retention_days > 0 {
404                match archiver_state
405                    .archive_stale_automation_v2_runs(retention_days)
406                    .await
407                {
408                    Ok(n) if n > 0 => {
409                        tracing::info!(
410                            archived = n,
411                            retention_days,
412                            "automation v2 archiver: pruned stale terminal runs"
413                        );
414                    }
415                    Ok(_) => {}
416                    Err(e) => {
417                        tracing::warn!(error = %e, "automation v2 archiver: archive failed");
418                    }
419                }
420            }
421            tokio::time::sleep(Duration::from_secs(24 * 60 * 60)).await;
422        }
423    });
424
425    let automation_governance_health_checker = tokio::spawn(async move {
426        loop {
427            if governance_health_state.is_automation_scheduler_stopping() {
428                return;
429            }
430            let startup = governance_health_state.startup_snapshot().await;
431            if matches!(startup.status, crate::app::startup::StartupStatus::Ready) {
432                break;
433            }
434            if matches!(startup.status, crate::app::startup::StartupStatus::Failed) {
435                return;
436            }
437            tokio::time::sleep(Duration::from_millis(250)).await;
438        }
439        loop {
440            let interval_ms = governance_health_state
441                .automation_governance
442                .read()
443                .await
444                .limits
445                .health_check_interval_ms
446                .max(60 * 1000);
447            match governance_health_state
448                .run_automation_governance_health_check()
449                .await
450            {
451                Ok(count) if count > 0 => {
452                    tracing::info!(
453                        finding_count = count,
454                        "automation governance health check recorded findings"
455                    );
456                }
457                Ok(_) => {}
458                Err(error) => {
459                    tracing::warn!(error = %error, "automation governance health check failed");
460                }
461            }
462            tokio::time::sleep(Duration::from_millis(interval_ms)).await;
463        }
464    });
465
466    // Channel listeners are started during runtime initialization
467    // (`initialize_runtime()` in `engine/src/main.rs`) so `serve()` only owns
468    // the HTTP server lifecycle.
469    let listener = tokio::net::TcpListener::bind(addr).await?;
470    let result = axum::serve(listener, app)
471        .with_graceful_shutdown(async move {
472            if tokio::signal::ctrl_c().await.is_err() {
473                futures::future::pending::<()>().await;
474            }
475            shutdown_state.set_automation_scheduler_stopping(true);
476            tokio::time::sleep(Duration::from_secs(shutdown_timeout_secs)).await;
477            let failed = shutdown_state
478                .fail_running_automation_runs_for_shutdown()
479                .await;
480            if failed > 0 {
481                tracing::warn!(
482                    failed_runs = failed,
483                    "automation runs marked failed during scheduler shutdown"
484                );
485            }
486        })
487        .await;
488    reaper.abort();
489    session_part_persister.abort();
490    session_context_run_journaler.abort();
491    status_indexer.abort();
492    routine_scheduler.abort();
493    routine_executor.abort();
494    usage_aggregator.abort();
495    automation_v2_scheduler.abort();
496    automation_v2_executor.abort();
497    optimization_scheduler.abort();
498    workflow_dispatcher.abort();
499    agent_team_supervisor.abort();
500    bug_monitor.abort();
501    global_memory_ingestor.abort();
502    hygiene_task.abort();
503    automation_governance_health_checker.abort();
504    result?;
505    Ok(())
506}
507
508fn app_router(state: AppState) -> Router {
509    router::build_router(state)
510}
511fn load_run_events_jsonl(path: &FsPath, since_seq: Option<u64>, tail: Option<usize>) -> Vec<Value> {
512    let content = match std::fs::read_to_string(path) {
513        Ok(value) => value,
514        Err(_) => return Vec::new(),
515    };
516    let mut rows: Vec<Value> = content
517        .lines()
518        .filter_map(|line| serde_json::from_str::<Value>(line).ok())
519        .filter(|row| {
520            if let Some(since) = since_seq {
521                return row.get("seq").and_then(|value| value.as_u64()).unwrap_or(0) > since;
522            }
523            true
524        })
525        .collect();
526    rows.sort_by_key(|row| row.get("seq").and_then(|value| value.as_u64()).unwrap_or(0));
527    if let Some(tail_count) = tail {
528        if rows.len() > tail_count {
529            rows = rows.split_off(rows.len().saturating_sub(tail_count));
530        }
531    }
532    rows
533}
534
535pub(super) fn truncate_for_stream(input: &str, max_len: usize) -> String {
536    if input.len() <= max_len {
537        return input.to_string();
538    }
539    let mut end = 0usize;
540    for (idx, ch) in input.char_indices() {
541        let next = idx + ch.len_utf8();
542        if next > max_len {
543            break;
544        }
545        end = next;
546    }
547    let mut out = input[..end].to_string();
548    out.push_str("...<truncated>");
549    out
550}
551
552#[cfg(test)]
553mod tests;