pub mod endpoint;
use std::time::Duration;
use metrics::{counter, describe_counter, describe_gauge, describe_histogram, gauge, histogram};
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
const QUEUE_DEPTH: &str = "swimmers_session_queue_depth";
const QUEUE_BYTES: &str = "swimmers_session_queue_bytes";
const ACTIVE_SESSIONS: &str = "swimmers_active_sessions";
const OVERLOAD_EVENTS: &str = "swimmers_overload_events_total";
const THOUGHT_LIFECYCLE_STATE: &str = "swimmers_thought_lifecycle_state";
const THOUGHT_MODEL_CALLS: &str = "swimmers_thought_model_calls_total";
const THOUGHT_SUPPRESSIONS: &str = "swimmers_thought_suppressions_total";
const THOUGHT_GENERATION_LATENCY: &str = "swimmers_thought_generation_seconds";
pub fn init_metrics() -> PrometheusHandle {
let handle = PrometheusBuilder::new()
.install_recorder()
.expect("failed to install Prometheus metrics recorder");
describe_gauge!(
QUEUE_DEPTH,
"Current outbound queue depth per session actor"
);
describe_gauge!(
QUEUE_BYTES,
"Current outbound queue byte size per session actor"
);
describe_gauge!(ACTIVE_SESSIONS, "Number of active session actors");
describe_counter!(OVERLOAD_EVENTS, "Total overload events emitted");
describe_gauge!(
THOUGHT_LIFECYCLE_STATE,
"Per-session thought lifecycle state (labels: session_id, state)"
);
describe_counter!(
THOUGHT_MODEL_CALLS,
"Thought model call outcomes by path/tier/outcome"
);
describe_counter!(
THOUGHT_SUPPRESSIONS,
"Thought suppressions by reason and cadence tier"
);
describe_histogram!(
THOUGHT_GENERATION_LATENCY,
"Thought generation latency by path and cadence tier"
);
handle
}
pub fn record_queue_depth(session_id: &str, depth: usize) {
gauge!(QUEUE_DEPTH, "session_id" => session_id.to_owned()).set(depth as f64);
}
#[allow(dead_code)]
pub fn record_queue_bytes(session_id: &str, bytes: usize) {
gauge!(QUEUE_BYTES, "session_id" => session_id.to_owned()).set(bytes as f64);
}
pub fn set_active_sessions(count: usize) {
gauge!(ACTIVE_SESSIONS).set(count as f64);
}
pub fn increment_overload(session_id: &str) {
counter!(OVERLOAD_EVENTS, "session_id" => session_id.to_owned()).increment(1);
}
#[allow(dead_code)]
pub fn set_thought_lifecycle_state(session_id: &str, state: &str) {
for candidate in ["active", "holding", "sleeping"] {
let value = if candidate == state { 1.0 } else { 0.0 };
gauge!(
THOUGHT_LIFECYCLE_STATE,
"session_id" => session_id.to_owned(),
"state" => candidate.to_string()
)
.set(value);
}
}
#[allow(dead_code)]
pub fn increment_thought_model_call(session_id: &str, path: &str, tier: &str, outcome: &str) {
counter!(
THOUGHT_MODEL_CALLS,
"session_id" => session_id.to_owned(),
"path" => path.to_owned(),
"tier" => tier.to_owned(),
"outcome" => outcome.to_owned()
)
.increment(1);
}
#[allow(dead_code)]
pub fn increment_thought_suppression(session_id: &str, reason: &str, tier: &str) {
counter!(
THOUGHT_SUPPRESSIONS,
"session_id" => session_id.to_owned(),
"reason" => reason.to_owned(),
"tier" => tier.to_owned()
)
.increment(1);
}
#[allow(dead_code)]
pub fn record_thought_generation_latency(
session_id: &str,
path: &str,
tier: &str,
duration: Duration,
) {
histogram!(
THOUGHT_GENERATION_LATENCY,
"session_id" => session_id.to_owned(),
"path" => path.to_owned(),
"tier" => tier.to_owned()
)
.record(duration.as_secs_f64());
}