use std::collections::BTreeMap;
use crate::cases::{header, Case, EnvelopeFormat};
use crate::evidence::Evidence;
use crate::report::{self, Diagnosis, Report};
use hmac::{Hmac, Mac};
use sha2::{Digest, Sha256};
use url::Url;
struct LogLine<'a> {
raw: &'a str,
json: Option<serde_json::Value>,
}
impl<'a> LogLine<'a> {
fn parse(raw: &'a str) -> Self {
let json = if raw.trim_start().starts_with('{') {
serde_json::from_str(raw).ok()
} else {
None
};
Self { raw, json }
}
fn raw(&self) -> &'a str {
self.raw
}
fn field(&self, key: &str) -> Option<String> {
if let Some(v) = self.json.as_ref().and_then(|j| j.get(key)) {
return Some(match v {
serde_json::Value::String(s) => s.clone(),
serde_json::Value::Number(n) => n.to_string(),
serde_json::Value::Bool(b) => b.to_string(),
_ => v.to_string(),
});
}
let prefix = format!("{key}=");
self.raw
.split_whitespace()
.find_map(|t| t.strip_prefix(&prefix))
.map(|s| s.trim_matches('"').to_string())
}
fn contains_ci(&self, needle_lc: &str) -> bool {
if let Some(v) = &self.json {
if let Some(obj) = v.as_object() {
for value in obj.values() {
if let Some(s) = value.as_str() {
if s.to_ascii_lowercase().contains(needle_lc) {
return true;
}
}
}
}
if let Some(obj) = v.as_object() {
for key in obj.keys() {
if key.to_ascii_lowercase().contains(needle_lc) {
return true;
}
}
}
return false;
}
self.raw.to_ascii_lowercase().contains(needle_lc)
}
}
fn parse_timestamp_ms(line: &str) -> Option<i64> {
let t_idx = line.find('T')?;
let date_str = &line[..t_idx];
let after_t = line.get(t_idx + 1..)?;
let z_idx = after_t.find('Z')?;
let time_str = &after_t[..z_idx];
let mut date_iter = date_str.split('-');
let year: i64 = date_iter.next()?.parse().ok()?;
let month: u32 = date_iter.next()?.parse().ok()?;
let day: u32 = date_iter.next()?.parse().ok()?;
if !(1..=12).contains(&month) || !(1..=31).contains(&day) {
return None;
}
let mut iter = time_str.split(':');
let h: i64 = iter.next()?.parse().ok()?;
let m: i64 = iter.next()?.parse().ok()?;
let s_part = iter.next()?;
let (sec_str, frac_str) = match s_part.split_once('.') {
Some((s, f)) => (s, f),
None => (s_part, "0"),
};
let s: i64 = sec_str.parse().ok()?;
let mut frac_padded = frac_str.to_string();
while frac_padded.len() < 3 {
frac_padded.push('0');
}
let millis: i64 = frac_padded.get(..3)?.parse().ok()?;
let days = days_from_civil(year, month, day);
let ms_of_day = ((h * 60 + m) * 60 + s) * 1000 + millis;
Some(days * 86_400_000 + ms_of_day)
}
fn days_from_civil(y: i64, m: u32, d: u32) -> i64 {
let y = if m <= 2 { y - 1 } else { y };
let era = y.div_euclid(400);
let yoe = (y - era * 400) as u32; let mp = if m > 2 { m - 3 } else { m + 9 }; let doy = (153 * mp + 2) / 5 + d - 1; let doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; era * 146_097 + doe as i64 - 719_468
}
struct WebhookEnvelope {
timestamp: Option<i64>,
signatures: Vec<String>,
label: String,
}
fn parse_envelope(
format: EnvelopeFormat,
sig_header_value: &str,
ts_header_value: &str,
) -> WebhookEnvelope {
match format {
EnvelopeFormat::Raw => {
let normalised = sig_header_value
.trim()
.trim_start_matches("sha256=")
.to_string();
let timestamp: Option<i64> = ts_header_value.trim().parse().ok();
WebhookEnvelope {
timestamp,
signatures: vec![normalised],
label: sig_header_value.to_string(),
}
}
EnvelopeFormat::StripeV1 => {
let mut timestamp: Option<i64> = None;
let mut signatures: Vec<String> = Vec::new();
for part in sig_header_value.split(',') {
let part = part.trim();
if let Some((k, v)) = part.split_once('=') {
match k {
"t" => timestamp = v.trim().parse().ok(),
"v1" | "v0" => signatures.push(v.trim().to_string()),
_ => {}
}
}
}
WebhookEnvelope {
timestamp,
signatures,
label: sig_header_value.to_string(),
}
}
EnvelopeFormat::SlackV0 => {
let normalised = sig_header_value
.trim()
.trim_start_matches("v0=")
.to_string();
let timestamp: Option<i64> = ts_header_value.trim().parse().ok();
WebhookEnvelope {
timestamp,
signatures: vec![normalised],
label: sig_header_value.to_string(),
}
}
EnvelopeFormat::GithubHmac => {
let normalised = sig_header_value
.trim()
.trim_start_matches("sha256=")
.to_string();
WebhookEnvelope {
timestamp: None,
signatures: vec![normalised],
label: sig_header_value.to_string(),
}
}
}
}
pub trait Rule: Send + Sync {
fn id(&self) -> &str;
fn evaluate(&self, case: &Case) -> Option<Diagnosis>;
}
static RULES: &[&dyn Rule] = &[
&AuthMissing,
&BadJsonPayload,
&RateLimited,
&WebhookSignatureMismatch,
&WebhookTimestampStale,
&TimeoutRetry,
&ConfigDnsError,
&IdempotencyCollision,
];
pub fn all_rules() -> &'static [&'static dyn Rule] {
RULES
}
#[derive(Debug, Clone)]
pub struct RuleTrace {
pub rule_id: String,
pub duration: std::time::Duration,
pub confidence: Option<f32>,
}
pub fn diagnose(case: &Case) -> Report {
let (report, _trace) = diagnose_traced(case);
report
}
pub fn diagnose_traced(case: &Case) -> (Report, Vec<RuleTrace>) {
let rules = all_rules();
let mut hits: Vec<Diagnosis> = Vec::with_capacity(rules.len());
let mut traces: Vec<RuleTrace> = Vec::with_capacity(rules.len());
for rule in rules {
let start = std::time::Instant::now();
let outcome = rule.evaluate(case);
let duration = start.elapsed();
let confidence = outcome.as_ref().map(|d| d.confidence);
traces.push(RuleTrace {
rule_id: rule.id().to_string(),
duration,
confidence,
});
if let Some(d) = outcome {
hits.push(d);
}
}
hits.sort_by(|a, b| {
b.confidence
.partial_cmp(&a.confidence)
.unwrap_or(std::cmp::Ordering::Equal)
.then_with(|| a.rule_id.cmp(&b.rule_id))
});
let mut iter = hits.into_iter();
let primary = iter.next();
let also_considered: Vec<Diagnosis> = iter.collect();
let report = Report {
case_name: case.name.clone(),
severity: case.severity,
primary,
also_considered,
reproduction: report::reproduction(case),
};
(report, traces)
}
struct AuthMissing;
impl Rule for AuthMissing {
fn id(&self) -> &str {
"auth_missing"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
if !case.context.auth_required {
return None;
}
if header(&case.request.headers, "authorization").is_some() {
return None;
}
let status = case.response.as_ref().map(|r| r.status).unwrap_or(0);
let mut evidence = vec![
Evidence::with(
"Authorization header absent in request",
"request.headers.authorization",
),
Evidence::with(
format!(
"Endpoint {} {} flagged auth_required=true",
case.request.method, case.request.url
),
"case.context.auth_required",
),
];
let confidence = if status == 401 {
evidence.push(Evidence::with(
"Response status 401 Unauthorized",
"response.status",
));
0.95
} else {
0.60
};
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "Missing Authorization header".into(),
confidence,
evidence,
next_steps: vec![
"Add an Authorization: Bearer <token> header to the request.".into(),
"Confirm the token has not expired.".into(),
"Verify the token's scope covers the requested operation.".into(),
],
escalation: "Customer request failed because the Authorization header was \
absent. The API rejected the request before payload processing. \
Ask the customer to retry with a valid bearer token and confirm \
the token's scope."
.into(),
})
}
}
struct BadJsonPayload;
impl Rule for BadJsonPayload {
fn id(&self) -> &str {
"bad_json_payload"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
let body = case.request.body.as_deref()?;
let ct = header(&case.request.headers, "content-type").unwrap_or("");
if !ct.contains("application/json") {
return None;
}
let parse_err = match serde_json::from_str::<serde_json::Value>(body) {
Ok(_) => return None,
Err(e) => e,
};
let status = case.response.as_ref().map(|r| r.status).unwrap_or(0);
let mut evidence = vec![
Evidence::with(
format!(
"serde_json parse error at line {} column {}: {}",
parse_err.line(),
parse_err.column(),
parse_err
),
"request.body",
),
Evidence::with(
format!("Content-Type was {ct}; body could not be parsed"),
"request.headers.content-type",
),
];
let confidence = if matches!(status, 400 | 422) {
evidence.push(Evidence::with(
format!("Response status {status} confirms server rejected payload"),
"response.status",
));
0.95
} else {
0.70
};
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "Invalid JSON payload".into(),
confidence,
evidence,
next_steps: vec![
"Validate the payload against the documented request schema.".into(),
"Re-emit the body using a JSON serialiser (avoid hand-built strings).".into(),
"If the issue persists, log the raw request bytes before send.".into(),
],
escalation: "The request body could not be parsed as JSON. The server \
rejected the request before any business logic ran. Ask the \
customer to share the exact bytes they sent and the producer \
that built them."
.into(),
})
}
}
struct RateLimited;
impl Rule for RateLimited {
fn id(&self) -> &str {
"rate_limited"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
let resp = case.response.as_ref()?;
if resp.status != 429 {
return None;
}
let mut evidence = vec![Evidence::with(
"Response status 429 Too Many Requests",
"response.status",
)];
let mut confidence: f32 = 0.70;
if let Some(remaining) = header(&resp.headers, "x-ratelimit-remaining") {
evidence.push(Evidence::with(
format!("X-RateLimit-Remaining: {remaining}"),
"response.headers.x-ratelimit-remaining",
));
if remaining.trim() == "0" {
confidence = confidence.max(0.95);
} else {
confidence = confidence.max(0.85);
}
}
if let Some(retry_after) = header(&resp.headers, "retry-after") {
evidence.push(Evidence::with(
format!("Retry-After: {retry_after} seconds"),
"response.headers.retry-after",
));
confidence = confidence.max(0.95);
}
if let Some(reset) = header(&resp.headers, "x-ratelimit-reset") {
evidence.push(Evidence::with(
format!("X-RateLimit-Reset (epoch): {reset}"),
"response.headers.x-ratelimit-reset",
));
}
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "Rate limit exceeded".into(),
confidence,
evidence,
next_steps: vec![
"Honour the Retry-After header before resending.".into(),
"Implement client-side exponential backoff with jitter.".into(),
"Reduce request frequency or request a higher quota.".into(),
],
escalation: "Customer is hitting the documented rate limit. Confirm whether \
the spike is intentional (campaign / migration) or a runaway \
loop, and whether a temporary quota bump is appropriate."
.into(),
})
}
}
struct WebhookSignatureMismatch;
impl Rule for WebhookSignatureMismatch {
fn id(&self) -> &str {
"webhook_signature_mismatch"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
let webhook = case.context.webhook.as_ref()?;
let secret = case.load_secret()?;
let provided_raw = header(&case.request.headers, &webhook.signature_header)?;
let ts_raw = header(&case.request.headers, &webhook.timestamp_header).unwrap_or("");
let env = parse_envelope(webhook.envelope_format, provided_raw, ts_raw);
if env.signatures.is_empty() {
return None;
}
let timestamp = env.timestamp.map(|t| t.to_string()).unwrap_or_default();
let body = case.request.body.as_deref().unwrap_or("");
let signing_input = match webhook.envelope_format {
EnvelopeFormat::Raw | EnvelopeFormat::StripeV1 => {
format!("{timestamp}.{body}")
}
EnvelopeFormat::SlackV0 => format!("v0:{timestamp}:{body}"),
EnvelopeFormat::GithubHmac => body.to_string(),
};
let mut mac = <Hmac<Sha256> as Mac>::new_from_slice(&secret).ok()?;
mac.update(signing_input.as_bytes());
let expected = hex::encode(mac.finalize().into_bytes());
if env
.signatures
.iter()
.any(|s| s.eq_ignore_ascii_case(&expected))
{
return None;
}
let envelope_label = match webhook.envelope_format {
EnvelopeFormat::Raw => "raw",
EnvelopeFormat::StripeV1 => "stripe_v1",
EnvelopeFormat::SlackV0 => "slack_v0",
EnvelopeFormat::GithubHmac => "github_hmac",
};
let evidence = vec![
Evidence::with(
format!(
"Provided {} ({envelope_label}): {}",
webhook.signature_header, env.label
),
format!(
"request.headers.{}",
webhook.signature_header.to_lowercase()
),
),
Evidence::with(
format!("Expected (HMAC-SHA256 over '{{timestamp}}.{{body}}'): {expected}"),
"computed",
),
Evidence::with(
format!("Signing input length: {} bytes", signing_input.len()),
"computed",
),
];
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "Webhook signature does not match recomputed HMAC".into(),
confidence: 0.92,
evidence,
next_steps: vec![
"Confirm the active signing secret matches the one used by the sender.".into(),
"Verify the receiver hashes the raw request body (not a re-serialised copy)."
.into(),
"Inspect any proxy / middleware that may rewrite the body before validation."
.into(),
],
escalation: "Recomputed HMAC differs from the provided signature. The most \
common causes are a rotated-but-not-deployed secret, a body \
being re-serialised (whitespace / key order changes), or a \
proxy mutating the request. Confirm with the customer which \
secret revision is active on their side."
.into(),
})
}
}
struct WebhookTimestampStale;
impl Rule for WebhookTimestampStale {
fn id(&self) -> &str {
"webhook_timestamp_stale"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
let webhook = case.context.webhook.as_ref()?;
let now = case.context.now_unix?;
let provided_sig = header(&case.request.headers, &webhook.signature_header).unwrap_or("");
let ts_raw = header(&case.request.headers, &webhook.timestamp_header).unwrap_or("");
let env = parse_envelope(webhook.envelope_format, provided_sig, ts_raw);
let ts = env.timestamp?;
let drift = now - ts;
if drift.abs() <= webhook.tolerance_seconds {
return None;
}
let direction = if drift >= 0 { "behind" } else { "ahead of" };
let (source_label, source_pointer) = match webhook.envelope_format {
EnvelopeFormat::Raw | EnvelopeFormat::SlackV0 => (
webhook.timestamp_header.clone(),
format!(
"request.headers.{}",
webhook.timestamp_header.to_lowercase()
),
),
EnvelopeFormat::StripeV1 => (
format!("{} (stripe_v1 t=)", webhook.signature_header),
format!(
"request.headers.{}",
webhook.signature_header.to_lowercase()
),
),
EnvelopeFormat::GithubHmac => (
"github_hmac (no timestamp)".to_string(),
format!(
"request.headers.{}",
webhook.signature_header.to_lowercase()
),
),
};
let evidence = vec![
Evidence::with(
format!(
"{}: {} ({} {} reference now)",
source_label,
ts,
drift.abs(),
direction
),
source_pointer,
),
Evidence::with(
format!(
"Tolerance is {} seconds; observed drift {} seconds",
webhook.tolerance_seconds,
drift.abs()
),
"case.context.webhook.tolerance_seconds",
),
];
let confidence = if drift.abs() > webhook.tolerance_seconds * 10 {
0.90
} else {
0.85
};
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "Webhook timestamp outside tolerance window".into(),
confidence,
evidence,
next_steps: vec![
"Check NTP / clock skew between sender and receiver.".into(),
"Confirm the timestamp header reflects the time the payload was signed, \
not the time it was forwarded."
.into(),
"If retries are stored on disk before delivery, refresh the signature \
immediately before the actual send."
.into(),
],
escalation: "Webhook timestamp is outside the configured tolerance. This \
often indicates clock skew, queued retries that re-send a \
long-stored payload, or a misconfigured replay window."
.into(),
})
}
}
struct TimeoutRetry;
impl Rule for TimeoutRetry {
fn id(&self) -> &str {
"timeout_retry"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
let log = case.load_log()?;
struct Stream<'a> {
request_id: String,
timeouts: Vec<(u32, LogLine<'a>)>,
max_attempt: u32,
elapsed_ms: Option<u64>,
}
let mut streams: BTreeMap<String, Stream<'_>> = BTreeMap::new();
let mut unknown_id_timeouts: Vec<(u32, LogLine<'_>)> = Vec::new();
for (idx, raw) in log.lines().enumerate() {
let line = LogLine::parse(raw);
if !(line.contains_ci("timeout") || line.contains_ci("timed out")) {
continue;
}
let line_no = (idx as u32) + 1;
match line.field("request_id") {
Some(rid) => {
let entry = streams.entry(rid.clone()).or_insert_with(|| Stream {
request_id: rid,
timeouts: Vec::new(),
max_attempt: 0,
elapsed_ms: None,
});
if let Some(a) = line.field("attempt").and_then(|s| s.parse::<u32>().ok()) {
entry.max_attempt = entry.max_attempt.max(a);
}
entry.timeouts.push((line_no, line));
}
None => unknown_id_timeouts.push((line_no, line)),
}
}
let total_timeouts: usize =
streams.values().map(|s| s.timeouts.len()).sum::<usize>() + unknown_id_timeouts.len();
if total_timeouts < 2 {
return None;
}
for stream in streams.values_mut() {
let mut min_ms: Option<i64> = None;
let mut max_ms: Option<i64> = None;
for raw in log.lines() {
let line = LogLine::parse(raw);
if line.field("request_id").as_deref() != Some(stream.request_id.as_str()) {
continue;
}
if let Some(ms) = parse_timestamp_ms(raw) {
min_ms = Some(min_ms.map_or(ms, |m| m.min(ms)));
max_ms = Some(max_ms.map_or(ms, |m| m.max(ms)));
}
}
if let (Some(a), Some(b)) = (min_ms, max_ms) {
if b > a {
stream.elapsed_ms = Some((b - a) as u64);
}
}
}
let primary_stream = streams
.values()
.max_by_key(|s| (s.timeouts.len(), s.max_attempt));
let mut evidence: Vec<Evidence> = Vec::new();
if let Some(s) = primary_stream {
evidence.push(Evidence::with(
format!(
"request_id={} accounts for {} timeout entries (max attempt={})",
s.request_id,
s.timeouts.len(),
s.max_attempt
),
"server.log",
));
for (line_no, line) in s.timeouts.iter().take(4) {
evidence.push(Evidence::at_line(
format!("timeout entry: {}", truncate(line.raw(), 160)),
"server.log",
*line_no,
));
}
if let Some(elapsed) = s.elapsed_ms {
evidence.push(Evidence::with(
format!(
"elapsed (derived from log timestamps): {} ms across {} attempts",
elapsed,
s.timeouts.len()
),
"computed",
));
}
}
let all_request_ids: std::collections::BTreeSet<String> = log
.lines()
.filter_map(|raw| LogLine::parse(raw).field("request_id"))
.collect();
if all_request_ids.len() > 1 {
evidence.push(Evidence::with(
format!(
"log contains {} distinct request_ids; rule grouped timeouts by request_id rather than pooling",
all_request_ids.len()
),
"server.log",
));
}
let mut confidence: f32 = 0.65;
if let Some(s) = primary_stream {
if s.max_attempt >= 3 {
confidence = confidence.max(0.85);
evidence.push(Evidence::with(
format!(
"max attempt observed: {} (suggests retry exhaustion)",
s.max_attempt
),
"server.log",
));
}
if let (Some(elapsed), Some(deadline)) = (s.elapsed_ms, case.context.client_deadline_ms)
{
if elapsed > deadline {
confidence = confidence.max(0.90);
evidence.push(Evidence::with(
format!(
"derived elapsed {} ms exceeds documented client deadline {} ms",
elapsed, deadline
),
"computed",
));
}
}
}
if let Some(deadline_ms) = case.context.client_deadline_ms {
evidence.push(Evidence::with(
format!("documented client deadline: {deadline_ms} ms"),
"case.context.client_deadline_ms",
));
confidence = confidence.max(0.85);
}
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "Upstream timeout with retries exhausted".into(),
confidence,
evidence,
next_steps: vec![
"Inspect upstream latency for the affected endpoint.".into(),
"Verify retry policy (max attempts, backoff, jitter).".into(),
"If the deadline is shorter than upstream p99, raise it or reduce work.".into(),
],
escalation: "Client retried the request multiple times before failing. \
Confirm whether upstream latency spiked, whether the retry \
budget is appropriate for the documented client deadline, and \
whether idempotency keys protect against duplicate side \
effects on retry."
.into(),
})
}
}
struct ConfigDnsError;
impl Rule for ConfigDnsError {
fn id(&self) -> &str {
"config_dns_error"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
let expected_base = case.context.expected_base_url.as_ref()?;
let expected = Url::parse(expected_base).ok()?;
let actual = Url::parse(&case.request.url).ok()?;
let exp_host = expected.host_str()?;
let act_host = actual.host_str()?;
if act_host == exp_host && actual.scheme() == expected.scheme() {
return None;
}
let mut evidence = vec![
Evidence::with(format!("Request host: {act_host}"), "request.url"),
Evidence::with(
format!("Documented base host: {exp_host}"),
"case.context.expected_base_url",
),
];
let mut confidence: f32 = 0.75;
if actual.scheme() != expected.scheme() {
evidence.push(Evidence::with(
format!(
"Scheme differs: request={}, expected={}",
actual.scheme(),
expected.scheme()
),
"request.url",
));
confidence = confidence.max(0.80);
}
if let Some(hint) = near_miss_hint(act_host, exp_host) {
evidence.push(Evidence::with(hint, "computed"));
confidence = confidence.max(0.90);
}
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "API base URL or hostname does not match documented endpoint".into(),
confidence,
evidence,
next_steps: vec![
"Confirm the API base URL in the customer's environment configuration.".into(),
"Run `dig` / `nslookup` against the documented host to rule out DNS issues.".into(),
"Check for environment variable overrides (staging vs production).".into(),
],
escalation: "Customer is targeting a host that does not match the documented \
API base. The most common causes are a stale base-URL config, a \
staging endpoint left in production, or a typo in a TLD or \
subdomain. Verify the deploying revision before assuming a DNS \
outage."
.into(),
})
}
}
struct IdempotencyCollision;
impl Rule for IdempotencyCollision {
fn id(&self) -> &str {
"idempotency_collision"
}
fn evaluate(&self, case: &Case) -> Option<Diagnosis> {
let idem = case.context.idempotency.as_ref()?;
let key = header(&case.request.headers, &idem.header)?;
let body = case.request.body.as_deref().unwrap_or("");
let mut hasher = Sha256::new();
hasher.update(body.as_bytes());
let actual = hex::encode(hasher.finalize());
if actual.eq_ignore_ascii_case(&idem.stored_body_sha256) {
return None;
}
let status = case.response.as_ref().map(|r| r.status).unwrap_or(0);
let mut evidence = vec![
Evidence::with(
format!("Idempotency-Key: {key}"),
format!("request.headers.{}", idem.header.to_lowercase()),
),
Evidence::with(
format!("Stored body SHA-256: {}", idem.stored_body_sha256),
"case.context.idempotency.stored_body_sha256",
),
Evidence::with(format!("Current body SHA-256: {actual}"), "computed"),
Evidence::with(
format!("Current body length: {} bytes", body.len()),
"request.body",
),
];
let confidence = if status == 422 {
evidence.push(Evidence::with(
"Response status 422 confirms server rejected duplicate-key with different body",
"response.status",
));
0.93
} else if (400..500).contains(&status) {
0.80
} else {
0.70
};
Some(Diagnosis {
rule_id: self.id().into(),
likely_cause: "Idempotency-Key reused with a different request body".into(),
confidence,
evidence,
next_steps: vec![
"Generate a fresh Idempotency-Key for any logically new request.".into(),
"If retrying, send byte-identical body bytes used on the first attempt.".into(),
"Check whether a serialiser or middleware is adding fields between attempts."
.into(),
],
escalation: "Customer reused an Idempotency-Key with a different body, so the \
server returned its stored-body-mismatch error. Confirm whether \
their retry logic captures the body before its first send and \
replays the same bytes, or whether a logging / proxy layer is \
re-serialising between attempts."
.into(),
})
}
}
fn near_miss_hint(actual: &str, expected: &str) -> Option<String> {
let a_parts: Vec<&str> = actual.rsplit('.').collect();
let e_parts: Vec<&str> = expected.rsplit('.').collect();
if a_parts.len() == e_parts.len() {
let mut diffs = 0usize;
let mut diff_label: Option<(&str, &str)> = None;
for (a, e) in a_parts.iter().zip(e_parts.iter()) {
if a != e {
diffs += 1;
diff_label = Some((a, e));
}
}
if diffs == 1 {
let (a, e) = diff_label?;
if a.len() == e.len() && hamming(a, e) <= 2 {
return Some(format!(
"near-miss label: '{a}' differs from documented '{e}' by ≤2 chars (typo?)"
));
}
}
}
if !actual.ends_with(expected.split('.').next_back().unwrap_or("")) {
return Some(format!(
"TLD differs: request '{actual}' vs documented '{expected}'"
));
}
None
}
fn hamming(a: &str, b: &str) -> usize {
a.chars().zip(b.chars()).filter(|(x, y)| x != y).count()
}
fn truncate(s: &str, max: usize) -> String {
if s.len() <= max {
s.to_string()
} else {
format!("{}\u{2026}", &s[..max])
}
}
#[cfg(test)]
mod private_helper_tests {
use super::*;
#[test]
fn days_from_civil_unix_epoch() {
assert_eq!(days_from_civil(1970, 1, 1), 0);
}
#[test]
fn days_from_civil_one_day_after_epoch() {
assert_eq!(days_from_civil(1970, 1, 2), 1);
}
#[test]
fn days_from_civil_one_year_after_epoch() {
assert_eq!(days_from_civil(1971, 1, 1), 365);
}
#[test]
fn days_from_civil_2000_leap_day() {
assert_eq!(days_from_civil(2000, 2, 29), 11016);
}
#[test]
fn days_from_civil_2100_not_leap() {
let feb28 = days_from_civil(2100, 2, 28);
let mar01 = days_from_civil(2100, 3, 1);
assert_eq!(mar01 - feb28, 1, "2100 must not be a leap year");
}
#[test]
fn days_from_civil_2400_leap() {
let feb29 = days_from_civil(2400, 2, 29);
let mar01 = days_from_civil(2400, 3, 1);
assert_eq!(mar01 - feb29, 1, "2400 must be a leap year");
}
#[test]
fn days_from_civil_pre_epoch() {
assert_eq!(days_from_civil(1969, 12, 31), -1);
}
#[test]
fn parse_timestamp_ms_unix_epoch_zero() {
assert_eq!(parse_timestamp_ms("1970-01-01T00:00:00.000Z"), Some(0));
}
#[test]
fn parse_timestamp_ms_one_second_after_epoch() {
assert_eq!(parse_timestamp_ms("1970-01-01T00:00:01.000Z"), Some(1000));
}
#[test]
fn parse_timestamp_ms_pads_short_fractions() {
assert_eq!(parse_timestamp_ms("1970-01-01T00:00:00.5Z"), Some(500));
}
#[test]
fn parse_timestamp_ms_returns_none_on_garbage() {
assert_eq!(parse_timestamp_ms("not a timestamp"), None);
assert_eq!(parse_timestamp_ms(""), None);
}
#[test]
fn parse_timestamp_ms_rejects_invalid_month() {
assert_eq!(parse_timestamp_ms("1970-13-01T00:00:00.000Z"), None);
}
#[test]
fn parse_timestamp_ms_cross_midnight_is_monotone() {
let before = parse_timestamp_ms("2026-12-31T23:59:59.500Z").unwrap();
let after = parse_timestamp_ms("2027-01-01T00:00:01.500Z").unwrap();
assert_eq!(after - before, 2000, "cross-midnight span must be 2 s");
}
#[test]
fn hamming_identical_strings() {
assert_eq!(hamming("abc", "abc"), 0);
}
#[test]
fn hamming_one_char_diff() {
assert_eq!(hamming("abc", "abd"), 1);
}
#[test]
fn hamming_all_diff() {
assert_eq!(hamming("abc", "xyz"), 3);
}
#[test]
fn hamming_empty_strings() {
assert_eq!(hamming("", ""), 0);
}
#[test]
fn near_miss_hint_typo_label() {
let hint = near_miss_hint("api.acme.exemple", "api.acme.example");
let h = hint.expect("typo near-miss must produce a hint");
assert!(h.contains("near-miss"), "{h}");
}
#[test]
fn near_miss_hint_completely_different_tld() {
let hint = near_miss_hint("api.acme.local", "api.acme.example");
let h = hint.expect("TLD-differs must produce a hint");
assert!(h.contains("TLD differs"), "{h}");
}
#[test]
fn near_miss_hint_label_count_differs() {
let hint = near_miss_hint("staging.api.acme.example", "api.acme.example");
if let Some(h) = hint {
assert!(h.contains("TLD") || h.contains("near-miss"), "{h}");
}
}
#[test]
fn truncate_under_limit_passes_through() {
assert_eq!(truncate("hi", 10), "hi");
}
#[test]
fn truncate_at_limit_passes_through() {
assert_eq!(truncate("hello", 5), "hello");
}
#[test]
fn truncate_over_limit_appends_ellipsis() {
assert_eq!(truncate("hello!", 5), "hello\u{2026}");
}
#[test]
fn parse_envelope_raw_strips_sha256_prefix() {
let env = parse_envelope(EnvelopeFormat::Raw, "sha256=deadbeef", "1700000000");
assert_eq!(env.signatures, vec!["deadbeef".to_string()]);
assert_eq!(env.timestamp, Some(1_700_000_000));
}
#[test]
fn parse_envelope_stripe_v1_collects_v1_and_v0() {
let env = parse_envelope(
EnvelopeFormat::StripeV1,
"t=1700000000,v1=aaaa,v0=bbbb",
"ignored",
);
assert_eq!(env.signatures, vec!["aaaa".to_string(), "bbbb".to_string()]);
assert_eq!(env.timestamp, Some(1_700_000_000));
}
#[test]
fn parse_envelope_slack_v0_strips_prefix() {
let env = parse_envelope(EnvelopeFormat::SlackV0, "v0=cafef00d", "1700000000");
assert_eq!(env.signatures, vec!["cafef00d".to_string()]);
assert_eq!(env.timestamp, Some(1_700_000_000));
}
#[test]
fn parse_envelope_github_hmac_has_no_timestamp() {
let env = parse_envelope(
EnvelopeFormat::GithubHmac,
"sha256=feedface",
"this should be ignored",
);
assert_eq!(env.signatures, vec!["feedface".to_string()]);
assert_eq!(env.timestamp, None, "GitHub envelope claims no timestamp");
}
}