mod harness;
use std::collections::HashMap;
use std::time::Duration;
use harness::nzb_fixture::NzbFixture;
use harness::{HarnessBuilder, ServerProfile, yenc_articles};
use nzb_nntp::testutil::MockConfig;
use nzb_web::nzb_core::models::JobStatus;
fn make_fixture(prefix: &str, n: usize) -> (Vec<u8>, HashMap<String, Vec<u8>>, Vec<String>) {
let mids: Vec<String> = (1..=n).map(|i| format!("{prefix}-{i}@test")).collect();
let bodies: Vec<Vec<u8>> = (1..=n).map(|i| format!("body-{i}").into_bytes()).collect();
let segs: Vec<(&str, &[u8])> = mids
.iter()
.zip(bodies.iter())
.map(|(m, b)| (m.as_str(), b.as_slice()))
.collect();
let file_name = format!("{prefix}.bin");
let built = NzbFixture::new(prefix).add_file(&file_name, &segs).build();
let triples: Vec<(&str, &[u8], &str)> = built
.articles
.iter()
.map(|(m, b, f)| (*m, *b, f.as_str()))
.collect();
let yenc = yenc_articles(&triples);
(built.xml, yenc, mids)
}
fn subset(all_yenc: &HashMap<String, Vec<u8>>, which: &[&str]) -> HashMap<String, Vec<u8>> {
which
.iter()
.filter_map(|m| all_yenc.get(*m).map(|v| (m.to_string(), v.clone())))
.collect()
}
#[tokio::test]
async fn backup_stays_idle_when_primary_serves_all() {
let (xml, yenc, _mids) = make_fixture("primserve", 6);
let primary = ServerProfile::start(
"primary",
MockConfig {
articles: yenc.clone(),
..Default::default()
},
3,
)
.await
.with_priority(0);
let backup = ServerProfile::start(
"backup",
MockConfig {
articles: HashMap::new(),
..Default::default()
},
3,
)
.await
.with_priority(1);
let engine = HarnessBuilder::new()
.with_server(primary)
.with_server(backup)
.article_timeout(10)
.build();
engine
.queue_manager
.set_max_worker_idle(Duration::from_secs(3));
let job_id = engine.submit_nzb_xml("primserve", xml).expect("submit nzb");
let done = engine
.wait_for(Duration::from_secs(20), |snap| {
snap.job(&job_id)
.map(|j| j.articles_downloaded == 6)
.unwrap_or(false)
})
.await;
let view = engine.job(&job_id).expect("job present");
assert!(
done,
"primary did not complete the job: status={} downloaded={} failed={}",
view.status, view.articles_downloaded, view.articles_failed
);
assert_eq!(
view.articles_failed, 0,
"no articles should have failed — backup should have stayed out of dispatch"
);
let evictions = engine.queue_manager.worker_eviction_count();
assert_eq!(
evictions, 0,
"idle backup workers were evicted ({evictions}x) — Bug 2 regressed"
);
}
#[tokio::test]
async fn backup_takes_over_when_primary_unreachable() {
let (xml, yenc, _mids) = make_fixture("deadprim", 4);
let primary = ServerProfile::start(
"dead-primary",
MockConfig {
service_unavailable: true,
..Default::default()
},
2,
)
.await
.with_priority(0);
let backup = ServerProfile::start(
"backup",
MockConfig {
articles: yenc,
..Default::default()
},
2,
)
.await
.with_priority(1);
let engine = HarnessBuilder::new()
.with_server(primary)
.with_server(backup)
.article_timeout(5)
.build();
let job_id = engine.submit_nzb_xml("deadprim", xml).expect("submit nzb");
let done = engine
.wait_for(Duration::from_secs(25), |snap| {
snap.job(&job_id)
.map(|j| j.articles_downloaded == 4)
.unwrap_or(false)
})
.await;
let view = engine.job(&job_id).expect("job present");
assert!(
done,
"backup did not take over from unreachable primary: status={} downloaded={} failed={}",
view.status, view.articles_downloaded, view.articles_failed
);
}
#[tokio::test]
async fn backup_picks_up_primary_430_failures() {
let (xml, yenc, mids) = make_fixture("mixed", 4);
let mut overrides = HashMap::new();
overrides.insert(mids[0].clone(), 430u16);
overrides.insert(mids[1].clone(), 430u16);
let primary_articles = subset(¥c, &[mids[2].as_str(), mids[3].as_str()]);
let primary = ServerProfile::start(
"primary",
MockConfig {
articles: primary_articles,
article_response_overrides: overrides,
..Default::default()
},
2,
)
.await
.with_priority(0);
let backup = ServerProfile::start(
"backup",
MockConfig {
articles: yenc,
..Default::default()
},
2,
)
.await
.with_priority(1);
let engine = HarnessBuilder::new()
.with_server(primary)
.with_server(backup)
.article_timeout(10)
.build();
let job_id = engine.submit_nzb_xml("mixed", xml).expect("submit nzb");
let done = engine
.wait_for(Duration::from_secs(20), |snap| {
snap.job(&job_id)
.map(|j| j.articles_downloaded == 4)
.unwrap_or(false)
})
.await;
let view = engine.job(&job_id).expect("job present");
assert!(
done,
"job did not complete via cascade: status={} downloaded={} failed={}",
view.status, view.articles_downloaded, view.articles_failed
);
assert_eq!(
view.articles_failed, 0,
"expected 0 failures after failover"
);
}
#[tokio::test]
async fn three_tier_priority_cascade() {
let (xml, yenc, mids) = make_fixture("tier", 3);
let all_430: HashMap<String, u16> = mids.iter().map(|m| (m.clone(), 430u16)).collect();
let tier0 = ServerProfile::start(
"tier0",
MockConfig {
article_response_overrides: all_430.clone(),
..Default::default()
},
2,
)
.await
.with_priority(0);
let tier1 = ServerProfile::start(
"tier1",
MockConfig {
article_response_overrides: all_430,
..Default::default()
},
2,
)
.await
.with_priority(1);
let tier2 = ServerProfile::start(
"tier2",
MockConfig {
articles: yenc,
..Default::default()
},
2,
)
.await
.with_priority(2);
let engine = HarnessBuilder::new()
.with_server(tier0)
.with_server(tier1)
.with_server(tier2)
.article_timeout(10)
.build();
let job_id = engine.submit_nzb_xml("tier", xml).expect("submit nzb");
let done = engine
.wait_for(Duration::from_secs(25), |snap| {
snap.job(&job_id)
.map(|j| j.articles_downloaded == 3)
.unwrap_or(false)
})
.await;
let view = engine.job(&job_id).expect("job present");
assert!(
done,
"tier-2 did not complete cascade: status={} downloaded={} failed={}",
view.status, view.articles_downloaded, view.articles_failed
);
}
#[tokio::test]
async fn same_priority_peers_both_serve_job() {
let (xml, yenc, _mids) = make_fixture("same", 6);
let a = ServerProfile::start(
"peer-a",
MockConfig {
articles: yenc.clone(),
..Default::default()
},
2,
)
.await
.with_priority(0);
let b = ServerProfile::start(
"peer-b",
MockConfig {
articles: yenc,
..Default::default()
},
2,
)
.await
.with_priority(0);
let engine = HarnessBuilder::new()
.with_server(a)
.with_server(b)
.article_timeout(10)
.build();
let job_id = engine.submit_nzb_xml("same", xml).expect("submit nzb");
let done = engine
.wait_for(Duration::from_secs(15), |snap| {
snap.job(&job_id)
.map(|j| j.articles_downloaded == 6)
.unwrap_or(false)
})
.await;
let view = engine.job(&job_id).expect("job present");
assert!(
done,
"same-priority peers didn't complete job: status={} downloaded={} failed={}",
view.status, view.articles_downloaded, view.articles_failed
);
}
#[tokio::test]
async fn unreachable_primary_does_not_hang_backup() {
let (xml, yenc, _mids) = make_fixture("unreach", 2);
let primary = ServerProfile::start(
"dead",
MockConfig {
service_unavailable: true,
..Default::default()
},
1,
)
.await
.with_priority(0);
let backup = ServerProfile::start(
"live",
MockConfig {
articles: yenc,
..Default::default()
},
2,
)
.await
.with_priority(1);
let engine = HarnessBuilder::new()
.with_server(primary)
.with_server(backup)
.article_timeout(5)
.build();
let job_id = engine.submit_nzb_xml("unreach", xml).expect("submit nzb");
let settled = engine
.wait_for_status(
&job_id,
Duration::from_secs(25),
&[JobStatus::Completed, JobStatus::Failed],
)
.await;
let view = engine.job(&job_id).expect("job present");
assert!(
settled,
"job never left Downloading when primary was unreachable: status={} downloaded={} failed={}",
view.status, view.articles_downloaded, view.articles_failed
);
}