use std::io::Read;
use std::num::NonZeroU32;
use std::path::{Path, PathBuf};
use std::sync::OnceLock;
use std::time::{Duration, Instant};
use anyhow::{Context, Result, anyhow};
use reqwest::blocking::Client;
use sha2::{Digest, Sha256};
static SHARED_CLIENT: OnceLock<Client> = OnceLock::new();
const SHARED_CLIENT_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
pub fn shared_client() -> &'static Client {
SHARED_CLIENT.get_or_init(|| {
Client::builder()
.connect_timeout(SHARED_CLIENT_CONNECT_TIMEOUT)
.build()
.expect("build shared reqwest client")
})
}
static RELEASES_CACHE: OnceLock<Vec<Release>> = OnceLock::new();
pub(crate) fn cached_releases() -> Result<Vec<Release>> {
cached_releases_with(shared_client())
}
fn is_shared_client(client: &Client) -> bool {
match SHARED_CLIENT.get() {
Some(singleton) => std::ptr::eq(client, singleton),
None => false,
}
}
fn cached_releases_with(client: &Client) -> Result<Vec<Release>> {
cached_releases_with_url(client, RELEASES_URL)
}
fn cached_releases_with_url(client: &Client, url: &str) -> Result<Vec<Release>> {
if !is_shared_client(client) {
return fetch_releases(client, url);
}
debug_assert!(
url == RELEASES_URL,
"cached_releases_with_url: shared_client() must use RELEASES_URL \
to avoid RELEASES_CACHE pollution — got url={url:?}, expected \
RELEASES_URL ({RELEASES_URL:?}). Tests that need URL injection \
must pass a non-singleton Client (which takes the bypass branch \
above and never touches the cache).",
);
if url != RELEASES_URL {
return fetch_releases(client, url);
}
if let Some(cached) = RELEASES_CACHE.get() {
return Ok(cached.clone());
}
let fresh = fetch_releases(client, url)?;
let _ = RELEASES_CACHE.set(fresh.clone());
Ok(fresh)
}
#[non_exhaustive]
pub struct AcquiredSource {
pub source_dir: PathBuf,
pub cache_key: String,
pub version: Option<String>,
pub kernel_source: crate::cache::KernelSource,
pub is_temp: bool,
pub is_dirty: bool,
pub is_git: bool,
}
pub fn arch_info() -> (&'static str, &'static str) {
#[cfg(target_arch = "x86_64")]
{
("x86_64", "bzImage")
}
#[cfg(target_arch = "aarch64")]
{
("aarch64", "Image")
}
}
fn major_version(version: &str) -> Result<u32> {
let major_str = version
.split('.')
.next()
.ok_or_else(|| anyhow!("invalid version: {version}"))?;
major_str
.parse::<u32>()
.with_context(|| format!("invalid major version in {version}"))
}
fn is_rc(version: &str) -> bool {
version.contains("-rc")
}
#[derive(Clone, Debug)]
pub(crate) struct Release {
pub moniker: String,
pub version: String,
}
pub(crate) fn is_skippable_release_moniker(moniker: &str) -> bool {
moniker == "linux-next"
}
fn latest_in_series(client: &Client, version: &str) -> Option<String> {
let prefix = {
let parts: Vec<&str> = version.split('.').collect();
if parts.len() >= 2 {
format!("{}.{}", parts[0], parts[1])
} else {
return None;
}
};
let releases = cached_releases_with(client).ok()?;
let mut best: Option<(String, (u32, u32, u32))> = None;
for r in &releases {
if is_skippable_release_moniker(&r.moniker) {
continue;
}
if !r.version.starts_with(&prefix) {
continue;
}
if r.version.len() != prefix.len() && r.version.as_bytes()[prefix.len()] != b'.' {
continue;
}
if let Some(tuple) = version_tuple(&r.version)
&& (best.is_none() || tuple > best.as_ref().unwrap().1)
{
best = Some((r.version.clone(), tuple));
}
}
best.map(|(v, _)| v)
}
fn version_not_found_msg(client: &Client, version: &str) -> String {
let parts: Vec<&str> = version.split('.').collect();
let prefix = if parts.len() >= 2 {
format!("{}.{}", parts[0], parts[1])
} else {
version.to_string()
};
match latest_in_series(client, version) {
Some(latest) if latest != version => {
format!("version {version} not found. latest {prefix}.x: {latest}")
}
_ => format!("version {version} not found"),
}
}
fn reject_html_response(response: &reqwest::blocking::Response, url: &str) -> Result<()> {
if let Some(ct) = response.headers().get(reqwest::header::CONTENT_TYPE)
&& let Ok(ct_str) = ct.to_str()
&& ct_str.contains("text/html")
{
anyhow::bail!(
"download {url}: server returned HTML instead of tarball (URL may be invalid)"
);
}
Ok(())
}
fn print_download_size(response: &reqwest::blocking::Response, url: &str, cli_label: &str) {
if let Some(len) = response.content_length() {
let mb = len as f64 / (1024.0 * 1024.0);
eprintln!("{cli_label}: downloading {url} ({mb:.1} MB)");
} else {
eprintln!("{cli_label}: downloading {url}");
}
}
const DOWNLOAD_NO_PROGRESS_TIMEOUT: Duration = Duration::from_secs(60);
struct DownloadStream<R: Read> {
inner: R,
hasher: Sha256,
bytes_total: u64,
last_progress: Instant,
no_progress_timeout: Duration,
}
impl<R: Read> DownloadStream<R> {
fn new(inner: R) -> Self {
Self {
inner,
hasher: Sha256::new(),
bytes_total: 0,
last_progress: Instant::now(),
no_progress_timeout: DOWNLOAD_NO_PROGRESS_TIMEOUT,
}
}
fn finalize(self) -> (String, u64) {
(hex::encode(self.hasher.finalize()), self.bytes_total)
}
}
impl<R: Read> Read for DownloadStream<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let elapsed = self.last_progress.elapsed();
if elapsed > self.no_progress_timeout {
return Err(std::io::Error::new(
std::io::ErrorKind::TimedOut,
format!(
"download stalled: no body bytes for {}s after {} bytes received",
elapsed.as_secs(),
self.bytes_total,
),
));
}
match self.inner.read(buf) {
Ok(0) => {
Ok(0)
}
Ok(n) => {
self.hasher.update(&buf[..n]);
self.bytes_total += n as u64;
self.last_progress = Instant::now();
Ok(n)
}
Err(e) => Err(e),
}
}
}
const DOWNLOAD_REQUEST_READ_TIMEOUT: Duration = Duration::from_secs(300);
const SHA256SUMS_REQUEST_TIMEOUT: Duration = Duration::from_secs(30);
fn fetch_stable_sha256sums(client: &Client, major: u32) -> Result<String> {
let url = format!("https://cdn.kernel.org/pub/linux/kernel/v{major}.x/sha256sums.asc");
tracing::info!(%url, "fetching kernel tarball sha256sums (requires network)");
let response = client
.get(&url)
.timeout(SHA256SUMS_REQUEST_TIMEOUT)
.send()
.with_context(|| format!("fetch {url}"))?;
if !response.status().is_success() {
anyhow::bail!("fetch {url}: HTTP {}", response.status());
}
response
.text()
.with_context(|| format!("read body of {url}"))
}
fn parse_sha256_for_file(manifest: &str, target_filename: &str) -> Option<String> {
let body = manifest
.split_once("-----BEGIN PGP SIGNATURE-----")
.map(|(before, _)| before)
.unwrap_or(manifest);
for line in body.lines() {
let line = line.trim();
let mut parts = line.split_whitespace();
let Some(hash) = parts.next() else { continue };
let Some(name) = parts.next() else { continue };
if name != target_filename {
continue;
}
if hash.len() != 64 || !hash.chars().all(|c| c.is_ascii_hexdigit()) {
continue;
}
return Some(hash.to_ascii_lowercase());
}
None
}
fn verify_sha256(actual_hex: &str, expected_hex: &str, url: &str) -> Result<()> {
if actual_hex.eq_ignore_ascii_case(expected_hex) {
Ok(())
} else {
anyhow::bail!(
"sha256 mismatch for {url}: expected {}, got {}. \
If cdn.kernel.org updated this tarball in-place, \
retry with --skip-sha256 to bypass verification.",
expected_hex.to_ascii_lowercase(),
actual_hex.to_ascii_lowercase(),
);
}
}
fn resolve_expected_sha256(
client: &Client,
major: u32,
tarball_name: &str,
skip_sha256: bool,
) -> Option<String> {
if skip_sha256 {
tracing::warn!(
tarball = %tarball_name,
"--skip-sha256: bypassing checksum verification — the \
downloaded tarball will not be authenticated against \
cdn.kernel.org's sha256sums.asc manifest. Use only when \
upstream has updated a tarball in-place and the manifest \
is mismatched.",
);
return None;
}
match fetch_stable_sha256sums(client, major) {
Ok(manifest) => match parse_sha256_for_file(&manifest, tarball_name) {
Some(hex) => Some(hex),
None => {
tracing::warn!(
tarball = %tarball_name,
"sha256sums.asc fetched but no entry for {tarball_name}; \
download will proceed without checksum verification. \
Pass --skip-sha256 to bypass the manifest fetch when \
the entry is known to be absent.",
);
None
}
},
Err(err) => {
tracing::warn!(
error = %format!("{err:#}"),
"failed to fetch sha256sums.asc; download will proceed \
without checksum verification. Pass --skip-sha256 to \
bypass the manifest fetch when the manifest is known \
to be unavailable.",
);
None
}
}
}
fn download_stable_tarball(
client: &Client,
version: &str,
dest_dir: &Path,
cli_label: &str,
skip_sha256: bool,
) -> Result<PathBuf> {
let major = major_version(version)?;
let tarball_name = format!("linux-{version}.tar.xz");
let url = format!("https://cdn.kernel.org/pub/linux/kernel/v{major}.x/{tarball_name}");
let expected_sha256 = resolve_expected_sha256(client, major, &tarball_name, skip_sha256);
tracing::info!(%url, "downloading stable kernel tarball (requires network)");
let response = client
.get(&url)
.timeout(DOWNLOAD_REQUEST_READ_TIMEOUT)
.send()
.with_context(|| format!("download {url}"))?;
if !response.status().is_success() {
if response.status() == reqwest::StatusCode::NOT_FOUND {
anyhow::bail!("{}", version_not_found_msg(client, version));
}
anyhow::bail!("download {url}: HTTP {}", response.status());
}
reject_html_response(&response, &url)?;
print_download_size(&response, &url, cli_label);
eprintln!("{cli_label}: extracting tarball (xz)");
let staging =
tempfile::TempDir::new_in(dest_dir).with_context(|| "create extraction staging dir")?;
let stream = DownloadStream::new(response);
let decoder = xz2::read::XzDecoder::new(stream);
let mut archive = tar::Archive::new(decoder);
archive
.unpack(staging.path())
.with_context(|| "extract tarball")?;
let stream = archive.into_inner().into_inner();
let (actual_hex, bytes_total) = stream.finalize();
if let Some(expected) = expected_sha256.as_deref() {
verify_sha256(&actual_hex, expected, &url)?;
eprintln!("{cli_label}: sha256 verified ({bytes_total} bytes, hash {actual_hex})");
} else if !skip_sha256 {
tracing::warn!(
url = %url,
bytes = bytes_total,
sha256 = %actual_hex,
"no expected sha256 available for {url}; computed digest \
{actual_hex} over {bytes_total} bytes is unverified",
);
}
let source_dir = promote_staged_kernel_tree(&staging, dest_dir, version)?;
Ok(source_dir)
}
fn promote_staged_kernel_tree(
staging: &tempfile::TempDir,
dest_dir: &Path,
version: &str,
) -> Result<PathBuf> {
let expected_name = format!("linux-{version}");
let mut found_inner = false;
for entry in std::fs::read_dir(staging.path()).with_context(|| "read staging dir entries")? {
let entry = entry.with_context(|| "iterate staging dir entry")?;
let name = entry.file_name();
if name == std::ffi::OsStr::new(&expected_name) {
found_inner = true;
} else {
anyhow::bail!(
"tarball contains unexpected top-level entry {name:?}; \
expected only {expected_name}/"
);
}
}
if !found_inner {
anyhow::bail!("expected directory {expected_name} after extraction");
}
let inner = staging.path().join(&expected_name);
let source_dir = dest_dir.join(&expected_name);
std::fs::rename(&inner, &source_dir)
.with_context(|| format!("rename {} -> {}", inner.display(), source_dir.display()))?;
Ok(source_dir)
}
fn download_rc_tarball(
client: &Client,
version: &str,
dest_dir: &Path,
cli_label: &str,
) -> Result<PathBuf> {
let url = format!("https://git.kernel.org/torvalds/t/linux-{version}.tar.gz");
tracing::info!(%url, "downloading RC kernel tarball (requires network)");
let response = client
.get(&url)
.timeout(DOWNLOAD_REQUEST_READ_TIMEOUT)
.send()
.with_context(|| format!("download {url}"))?;
if response.status() == reqwest::StatusCode::NOT_FOUND {
anyhow::bail!(
"RC tarball not found: {url}\n \
RC releases are removed from git.kernel.org after the stable version ships."
);
}
if !response.status().is_success() {
anyhow::bail!("download {url}: HTTP {}", response.status());
}
reject_html_response(&response, &url)?;
print_download_size(&response, &url, cli_label);
eprintln!("{cli_label}: extracting tarball (gzip)");
let staging =
tempfile::TempDir::new_in(dest_dir).with_context(|| "create extraction staging dir")?;
let stream = DownloadStream::new(response);
let decoder = flate2::read::GzDecoder::new(stream);
let mut archive = tar::Archive::new(decoder);
archive
.unpack(staging.path())
.with_context(|| "extract tarball")?;
let stream = archive.into_inner().into_inner();
let (actual_hex, bytes_total) = stream.finalize();
tracing::warn!(
url = %url,
bytes = bytes_total,
sha256 = %actual_hex,
"no expected sha256 available for {url} (RC tarballs are \
dynamically generated by git.kernel.org and have no \
published manifest); computed digest {actual_hex} over \
{bytes_total} bytes is unverified",
);
let source_dir = promote_staged_kernel_tree(&staging, dest_dir, version)?;
Ok(source_dir)
}
pub fn download_tarball(
client: &Client,
version: &str,
dest_dir: &Path,
cli_label: &str,
skip_sha256: bool,
) -> Result<AcquiredSource> {
let (arch, _) = arch_info();
let source_dir = if is_rc(version) {
download_rc_tarball(client, version, dest_dir, cli_label)?
} else {
download_stable_tarball(client, version, dest_dir, cli_label, skip_sha256)?
};
Ok(AcquiredSource {
source_dir,
cache_key: format!("{version}-tarball-{arch}-kc{}", crate::cache_key_suffix()),
version: Some(version.to_string()),
kernel_source: crate::cache::KernelSource::Tarball,
is_temp: true,
is_dirty: false,
is_git: true,
})
}
fn patch_level(version: &str) -> Option<u32> {
let parts: Vec<&str> = version.split('.').collect();
match parts.len() {
2 => Some(0), 3 => parts[2].parse().ok(),
_ => None,
}
}
pub(crate) const RELEASES_URL: &str = "https://www.kernel.org/releases.json";
pub(crate) fn fetch_releases(client: &Client, url: &str) -> Result<Vec<Release>> {
tracing::info!(%url, "fetching kernel.org releases index (requires network)");
let response = client
.get(url)
.send()
.with_context(|| format!("fetch {url}"))?;
if !response.status().is_success() {
anyhow::bail!("fetch {url}: HTTP {}", response.status());
}
let body = response.text().with_context(|| "read response body")?;
parse_releases_body(&body)
}
fn parse_releases_body(body: &str) -> Result<Vec<Release>> {
let json: serde_json::Value =
serde_json::from_str(body).with_context(|| "parse releases.json")?;
let releases = json
.get("releases")
.and_then(|r| r.as_array())
.ok_or_else(|| anyhow!("releases.json: missing releases array"))?;
let input_rows = releases.len();
let parsed: Vec<Release> = releases
.iter()
.filter_map(|r| {
let moniker = r.get("moniker")?.as_str()?;
let version = r.get("version")?.as_str()?;
Some(Release {
moniker: moniker.to_string(),
version: version.to_string(),
})
})
.collect();
let dropped = input_rows - parsed.len();
if dropped > 0 {
tracing::warn!(
input_rows,
parsed_rows = parsed.len(),
dropped,
"releases.json: dropped {dropped} of {input_rows} row(s) \
missing moniker/version (or non-string values); cached \
snapshot will reflect this for the process lifetime"
);
}
Ok(parsed)
}
pub fn fetch_latest_stable_version(client: &Client, cli_label: &str) -> Result<String> {
eprintln!("{cli_label}: fetching latest kernel version");
let releases = cached_releases_with(client)?;
let mut best: Option<&str> = None;
for r in &releases {
if r.moniker != "stable" && r.moniker != "longterm" {
continue;
}
if patch_level(&r.version).unwrap_or(0) < 8 {
continue;
}
best = Some(r.version.as_str());
break;
}
let version =
best.ok_or_else(|| anyhow!("no stable kernel with patch >= 8 found in releases.json"))?;
eprintln!("{cli_label}: latest stable kernel: {version}");
Ok(version.to_string())
}
fn version_tuple(version: &str) -> Option<(u32, u32, u32)> {
let parts: Vec<&str> = version.split('.').collect();
match parts.len() {
2 => {
let major = parts[0].parse().ok()?;
let minor = parts[1].parse().ok()?;
Some((major, minor, 0))
}
3 => {
let major = parts[0].parse().ok()?;
let minor = parts[1].parse().ok()?;
let patch = parts[2].parse().ok()?;
Some((major, minor, patch))
}
_ => None,
}
}
pub fn is_major_minor_prefix(s: &str) -> bool {
s.matches('.').count() < 2 && !s.contains("-rc")
}
pub fn fetch_version_for_prefix(client: &Client, prefix: &str, cli_label: &str) -> Result<String> {
eprintln!("{cli_label}: fetching latest {prefix}.x kernel version");
let releases = cached_releases_with(client)?;
let mut best: Option<(&str, (u32, u32, u32))> = None;
for r in &releases {
if is_skippable_release_moniker(&r.moniker) {
continue;
}
if !r.version.starts_with(prefix) {
continue;
}
if r.version.len() != prefix.len() && r.version.as_bytes()[prefix.len()] != b'.' {
continue;
}
let Some(tuple) = version_tuple(&r.version) else {
continue;
};
if best.is_none() || tuple > best.unwrap().1 {
best = Some((r.version.as_str(), tuple));
}
}
if let Some((version, _)) = best {
eprintln!("{cli_label}: latest {prefix}.x kernel: {version}");
return Ok(version.to_string());
}
eprintln!("{cli_label}: {prefix}.x not in releases.json (EOL series), probing cdn.kernel.org");
probe_latest_patch(client, prefix, cli_label)
}
fn probe_latest_patch(client: &Client, prefix: &str, cli_label: &str) -> Result<String> {
let major = major_version(prefix)?;
let url = format!("https://cdn.kernel.org/pub/linux/kernel/v{major}.x/");
eprintln!("{cli_label}: fetching directory listing from {url}");
let body = client
.get(&url)
.send()
.with_context(|| format!("GET {url}"))?
.error_for_status()
.with_context(|| format!("GET {url}"))?
.text()
.with_context(|| format!("reading body from {url}"))?;
let needle = format!("linux-{prefix}.");
let mut best_patch: Option<u32> = None;
for line in body.lines() {
let Some(pos) = line.find(&needle) else {
continue;
};
let after = &line[pos + needle.len()..];
let Some(dot) = after.find(".tar.xz") else {
continue;
};
let patch_str = &after[..dot];
if let Ok(patch) = patch_str.parse::<u32>()
&& best_patch.is_none_or(|b| patch > b)
{
best_patch = Some(patch);
}
}
match best_patch {
Some(patch) => {
let version = format!("{prefix}.{patch}");
eprintln!("{cli_label}: latest {prefix}.x kernel (from cdn listing): {version}");
Ok(version)
}
None => {
anyhow::bail!(
"no tarball matching {prefix}.x found in cdn.kernel.org \
directory listing at {url}"
);
}
}
}
pub fn git_clone(
url: &str,
git_ref: &str,
dest_dir: &Path,
cli_label: &str,
) -> Result<AcquiredSource> {
let (arch, _) = arch_info();
eprintln!("{cli_label}: cloning {url} (ref: {git_ref}, depth: 1)");
let clone_dir = dest_dir.join("linux");
let mut prep = gix::prepare_clone(url, &clone_dir)
.with_context(|| "prepare clone")?
.with_shallow(gix::remote::fetch::Shallow::DepthAtRemote(
NonZeroU32::new(1).expect("1 is nonzero"),
))
.with_ref_name(Some(git_ref))
.with_context(|| "set ref name")?;
let (mut checkout, _outcome) = prep
.fetch_then_checkout(
gix::progress::Discard,
&std::sync::atomic::AtomicBool::new(false),
)
.with_context(|| "clone fetch")?;
let (_repo, _outcome) = checkout
.main_worktree(
gix::progress::Discard,
&std::sync::atomic::AtomicBool::new(false),
)
.with_context(|| "checkout")?;
let repo = gix::open(&clone_dir).with_context(|| "open cloned repo")?;
let head = repo.head_id().with_context(|| "read HEAD")?;
let short_hash = format!("{}", head).chars().take(7).collect::<String>();
let cache_key = format!(
"{git_ref}-git-{short_hash}-{arch}-kc{}",
crate::cache_key_suffix()
);
Ok(AcquiredSource {
source_dir: clone_dir,
cache_key,
version: None,
kernel_source: crate::cache::KernelSource::Git {
git_hash: Some(short_hash),
git_ref: Some(git_ref.to_string()),
},
is_temp: true,
is_dirty: false,
is_git: true,
})
}
pub fn local_source(source_path: &Path) -> Result<AcquiredSource> {
let (arch, _) = arch_info();
if !source_path.is_dir() {
anyhow::bail!("{}: not a directory", source_path.display());
}
let canonical = source_path
.canonicalize()
.with_context(|| format!("canonicalize {}", source_path.display()))?;
let LocalSourceState {
short_hash,
is_dirty,
is_git,
} = inspect_local_source_state(&canonical)?;
let user_config_hash = config_hash_for_key(&canonical);
let cache_key =
compose_local_cache_key(arch, &short_hash, &canonical, user_config_hash.as_deref());
Ok(AcquiredSource {
source_dir: canonical.clone(),
cache_key,
version: None,
kernel_source: crate::cache::KernelSource::Local {
source_tree_path: Some(canonical),
git_hash: short_hash,
},
is_temp: false,
is_dirty,
is_git,
})
}
#[derive(Debug, Clone)]
pub struct LocalSourceState {
pub short_hash: Option<String>,
pub is_dirty: bool,
pub is_git: bool,
}
pub fn inspect_local_source_state(canonical: &Path) -> Result<LocalSourceState> {
let (short_hash, is_dirty, is_git) = match gix::discover(canonical) {
Ok(repo) => {
let head = repo.head_id().with_context(|| "read HEAD")?;
let short_hash = format!("{}", head).chars().take(7).collect::<String>();
let head_tree = repo.head_tree().with_context(|| "read HEAD tree")?;
let head_tree_id = head_tree.id;
let mut index_dirty = false;
let index = repo.index_or_empty().with_context(|| "open index")?;
let _ = repo.tree_index_status(
&head_tree_id,
&index,
None,
gix::status::tree_index::TrackRenames::Disabled,
|_, _, _| {
index_dirty = true;
Ok::<_, std::convert::Infallible>(std::ops::ControlFlow::Break(()))
},
);
let worktree_dirty = if !index_dirty {
repo.status(gix::progress::Discard)
.with_context(|| "status")?
.index_worktree_rewrites(None)
.index_worktree_submodules(gix::status::Submodule::Given {
ignore: gix::submodule::config::Ignore::All,
check_dirty: false,
})
.index_worktree_options_mut(|opts| {
opts.dirwalk_options = None;
})
.into_index_worktree_iter(Vec::new())
.map(|mut iter| iter.next().is_some())
.unwrap_or(false)
} else {
false
};
let is_dirty = index_dirty || worktree_dirty;
let hash = if is_dirty { None } else { Some(short_hash) };
(hash, is_dirty, true)
}
Err(_) => {
(None, true, false)
}
};
Ok(LocalSourceState {
short_hash,
is_dirty,
is_git,
})
}
pub fn compose_local_cache_key(
arch: &str,
short_hash: &Option<String>,
canonical: &Path,
user_config_hash: Option<&str>,
) -> String {
let suffix = crate::cache_key_suffix();
match short_hash {
Some(hash) => match user_config_hash {
Some(cfg) => format!("local-{hash}-{arch}-cfg{cfg}-kc{suffix}"),
None => format!("local-{hash}-{arch}-kc{suffix}"),
},
None => {
let path_hash = canonical_path_hash(canonical);
format!("local-unknown-{path_hash}-{arch}-kc{suffix}")
}
}
}
pub(crate) fn canonical_path_hash(canonical: &Path) -> String {
let bytes = canonical.as_os_str().as_encoded_bytes();
format!("{:08x}", crc32fast::hash(bytes))
}
fn config_hash_for_key(canonical: &Path) -> Option<String> {
let config_path = canonical.join(".config");
let data = std::fs::read(&config_path).ok()?;
Some(format!("{:08x}", crc32fast::hash(&data)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn fetch_arch_info_returns_known_arch() {
let (arch, image) = arch_info();
assert!(
(arch == "x86_64" && image == "bzImage") || (arch == "aarch64" && image == "Image"),
"unexpected arch/image: {arch}/{image}"
);
}
#[test]
fn is_major_minor_prefix_accepts_two_segment() {
assert!(is_major_minor_prefix("6.14"));
assert!(is_major_minor_prefix("7.0"));
}
#[test]
fn is_major_minor_prefix_rejects_patch_version() {
assert!(!is_major_minor_prefix("6.14.2"));
assert!(!is_major_minor_prefix("5.4.0"));
}
#[test]
fn is_major_minor_prefix_rejects_rc_tag() {
assert!(!is_major_minor_prefix("6.15-rc3"));
assert!(!is_major_minor_prefix("6.14-rc1"));
}
#[test]
fn is_major_minor_prefix_historical_edge_cases() {
assert!(is_major_minor_prefix("7"));
assert!(is_major_minor_prefix(""));
}
#[test]
fn fetch_major_version_stable() {
assert_eq!(major_version("6.14.2").unwrap(), 6);
}
#[test]
fn fetch_major_version_rc() {
assert_eq!(major_version("6.15-rc3").unwrap(), 6);
}
#[test]
fn fetch_major_version_two_part() {
assert_eq!(major_version("5.4").unwrap(), 5);
}
#[test]
fn fetch_major_version_invalid() {
assert!(major_version("abc").is_err());
}
#[test]
fn fetch_is_rc_true() {
assert!(is_rc("6.15-rc3"));
assert!(is_rc("6.14.2-rc1"));
}
#[test]
fn fetch_is_rc_false() {
assert!(!is_rc("6.14.2"));
assert!(!is_rc("6.14"));
}
fn stable_tarball_url(version: &str) -> Result<String> {
let major = major_version(version)?;
Ok(format!(
"https://cdn.kernel.org/pub/linux/kernel/v{major}.x/linux-{version}.tar.xz"
))
}
fn rc_tarball_url(version: &str) -> String {
format!("https://git.kernel.org/torvalds/t/linux-{version}.tar.gz")
}
#[test]
fn fetch_stable_url_construction() {
let url = stable_tarball_url("6.14.2").unwrap();
assert_eq!(
url,
"https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.14.2.tar.xz"
);
}
#[test]
fn fetch_stable_url_v5() {
let url = stable_tarball_url("5.4.0").unwrap();
assert_eq!(
url,
"https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.0.tar.xz"
);
}
#[test]
fn fetch_rc_url_construction() {
let url = rc_tarball_url("6.15-rc3");
assert_eq!(
url,
"https://git.kernel.org/torvalds/t/linux-6.15-rc3.tar.gz"
);
}
#[test]
fn promote_staged_renames_well_formed_archive() {
let dest = tempfile::TempDir::new().unwrap();
let staging = tempfile::TempDir::new_in(dest.path()).unwrap();
std::fs::create_dir(staging.path().join("linux-6.14.2")).unwrap();
std::fs::write(
staging.path().join("linux-6.14.2").join("Makefile"),
b"# fake",
)
.unwrap();
let source_dir = promote_staged_kernel_tree(&staging, dest.path(), "6.14.2").unwrap();
assert_eq!(source_dir, dest.path().join("linux-6.14.2"));
assert!(source_dir.is_dir());
assert!(source_dir.join("Makefile").is_file());
assert!(!staging.path().join("linux-6.14.2").exists());
}
#[test]
fn promote_staged_rejects_stray_top_level_entry() {
let dest = tempfile::TempDir::new().unwrap();
let staging = tempfile::TempDir::new_in(dest.path()).unwrap();
std::fs::create_dir(staging.path().join("linux-6.14.2")).unwrap();
std::fs::write(staging.path().join("evil"), b"backdoor").unwrap();
let err = promote_staged_kernel_tree(&staging, dest.path(), "6.14.2").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("unexpected top-level entry"),
"diagnostic must cite stray entry: {msg}"
);
assert!(!dest.path().join("linux-6.14.2").exists());
}
#[test]
fn promote_staged_bails_on_missing_inner_dir() {
let dest = tempfile::TempDir::new().unwrap();
let staging = tempfile::TempDir::new_in(dest.path()).unwrap();
std::fs::create_dir(staging.path().join("linux-6.14.3")).unwrap();
let err = promote_staged_kernel_tree(&staging, dest.path(), "6.14.2").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("unexpected top-level entry"),
"wrong-version dir surfaces as stray: {msg}"
);
assert!(!dest.path().join("linux-6.14.2").exists());
}
#[test]
fn promote_staged_bails_on_empty_staging() {
let dest = tempfile::TempDir::new().unwrap();
let staging = tempfile::TempDir::new_in(dest.path()).unwrap();
let err = promote_staged_kernel_tree(&staging, dest.path(), "6.14.2").unwrap_err();
let msg = format!("{err:#}");
assert!(
msg.contains("expected directory linux-6.14.2"),
"empty staging surfaces as missing-dir: {msg}"
);
}
#[test]
fn fetch_patch_level_three_part() {
assert_eq!(patch_level("6.12.8"), Some(8));
}
#[test]
fn fetch_patch_level_two_part() {
assert_eq!(patch_level("7.0"), Some(0));
}
#[test]
fn fetch_patch_level_single_part() {
assert_eq!(patch_level("6"), None);
}
#[test]
fn fetch_patch_level_four_part() {
assert_eq!(patch_level("6.1.2.3"), None);
}
#[test]
fn fetch_patch_level_non_numeric_patch() {
assert_eq!(patch_level("6.1.rc3"), None);
}
#[test]
fn fetch_patch_level_zero() {
assert_eq!(patch_level("6.14.0"), Some(0));
}
#[test]
fn fetch_patch_level_large() {
assert_eq!(patch_level("6.12.99"), Some(99));
}
fn init_repo_with_commit(dir: &Path) {
use std::process::Command;
let run = |args: &[&str]| {
let out = Command::new("git")
.args(args)
.current_dir(dir)
.env("GIT_CONFIG_GLOBAL", "/dev/null")
.env("GIT_CONFIG_SYSTEM", "/dev/null")
.env("GIT_AUTHOR_NAME", "ktstr-test")
.env("GIT_AUTHOR_EMAIL", "ktstr-test@localhost")
.env("GIT_COMMITTER_NAME", "ktstr-test")
.env("GIT_COMMITTER_EMAIL", "ktstr-test@localhost")
.output()
.expect("spawn git");
assert!(
out.status.success(),
"git {:?} failed: {}",
args,
String::from_utf8_lossy(&out.stderr)
);
};
run(&["init", "-q", "-b", "main"]);
std::fs::write(dir.join("file.txt"), "original\n").unwrap();
run(&["add", "file.txt"]);
run(&[
"-c",
"commit.gpgsign=false",
"commit",
"-q",
"-m",
"initial",
]);
}
#[test]
fn local_source_clean_repo_populates_hash() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_commit(tmp.path());
let acquired = local_source(tmp.path()).expect("local_source ok");
assert!(!acquired.is_dirty, "clean tree must not be dirty");
let git_hash = match &acquired.kernel_source {
crate::cache::KernelSource::Local { git_hash, .. } => git_hash.clone(),
other => panic!("expected KernelSource::Local, got {other:?}"),
};
let hash = git_hash.expect("clean repo must carry a git_hash");
assert_eq!(hash.len(), 7, "short hash must be 7 chars, got {hash:?}");
assert!(
hash.chars().all(|c| c.is_ascii_hexdigit()),
"hash must be hex, got {hash:?}"
);
assert!(
acquired.cache_key.contains(&hash),
"clean cache_key must embed the short hash, got {}",
acquired.cache_key
);
}
#[test]
fn local_source_dirty_tracked_file_clears_hash() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_commit(tmp.path());
std::fs::write(tmp.path().join("file.txt"), "modified\n").unwrap();
let acquired = local_source(tmp.path()).expect("local_source ok");
assert!(acquired.is_dirty, "worktree mutation must mark dirty");
match &acquired.kernel_source {
crate::cache::KernelSource::Local { git_hash, .. } => {
assert!(
git_hash.is_none(),
"dirty tree must not publish git_hash, got {git_hash:?}"
);
}
other => panic!("expected KernelSource::Local, got {other:?}"),
}
assert!(
acquired.cache_key.starts_with("local-unknown-"),
"dirty cache_key must use local-unknown prefix, got {}",
acquired.cache_key
);
}
#[test]
fn local_source_dirty_staged_only_clears_hash() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_commit(tmp.path());
std::fs::write(tmp.path().join("file.txt"), "staged\n").unwrap();
let status = std::process::Command::new("git")
.args(["add", "file.txt"])
.current_dir(tmp.path())
.env("GIT_CONFIG_GLOBAL", "/dev/null")
.env("GIT_CONFIG_SYSTEM", "/dev/null")
.status()
.expect("git add");
assert!(status.success());
let acquired = local_source(tmp.path()).expect("local_source ok");
assert!(acquired.is_dirty, "staged-only change must mark dirty");
match &acquired.kernel_source {
crate::cache::KernelSource::Local { git_hash, .. } => {
assert!(
git_hash.is_none(),
"dirty (staged) tree must not publish git_hash, got {git_hash:?}"
);
}
other => panic!("expected KernelSource::Local, got {other:?}"),
}
}
#[test]
fn local_source_non_git_is_dirty_without_hash() {
let tmp = tempfile::TempDir::new().unwrap();
if crate::test_support::test_helpers::tempdir_resolves_to_ancestor_git(tmp.path()) {
skip!(
"tempdir {} resolves to an ancestor git repo; cannot pin non-git \
path semantics in this environment",
tmp.path().display()
);
}
std::fs::write(tmp.path().join("file.txt"), "no git here\n").unwrap();
let acquired = local_source(tmp.path()).expect("local_source ok");
assert!(acquired.is_dirty, "non-git tree must mark dirty");
match &acquired.kernel_source {
crate::cache::KernelSource::Local { git_hash, .. } => {
assert!(
git_hash.is_none(),
"non-git tree must not publish git_hash, got {git_hash:?}"
);
}
other => panic!("expected KernelSource::Local, got {other:?}"),
}
assert!(
acquired.cache_key.starts_with("local-unknown-"),
"non-git cache_key must use local-unknown prefix, got {}",
acquired.cache_key
);
}
#[test]
fn local_unknown_keys_carry_distinct_per_path_salt() {
let tmp_a = tempfile::TempDir::new().unwrap();
let tmp_b = tempfile::TempDir::new().unwrap();
if crate::test_support::test_helpers::tempdir_resolves_to_ancestor_git(tmp_a.path())
|| crate::test_support::test_helpers::tempdir_resolves_to_ancestor_git(tmp_b.path())
{
skip!(
"tempdir(s) {} / {} resolve to ancestor git repo; cannot pin \
non-git salt semantics in this environment",
tmp_a.path().display(),
tmp_b.path().display(),
);
}
std::fs::write(tmp_a.path().join("file"), b"a").unwrap();
std::fs::write(tmp_b.path().join("file"), b"b").unwrap();
let key_a = local_source(tmp_a.path()).unwrap().cache_key;
let key_b = local_source(tmp_b.path()).unwrap().cache_key;
assert!(
key_a.starts_with("local-unknown-"),
"tree-a key shape: {key_a}"
);
assert!(
key_b.starts_with("local-unknown-"),
"tree-b key shape: {key_b}"
);
assert_ne!(
key_a, key_b,
"distinct paths must produce distinct local-unknown keys; \
without per-path salt they would collide and parallel \
builds could stomp each other's cache content"
);
}
#[test]
fn local_unknown_key_stable_across_repeated_calls_on_same_path() {
let tmp = tempfile::TempDir::new().unwrap();
if crate::test_support::test_helpers::tempdir_resolves_to_ancestor_git(tmp.path()) {
skip!(
"tempdir {} resolves to an ancestor git repo; cannot pin \
deterministic non-git salt in this environment",
tmp.path().display()
);
}
std::fs::write(tmp.path().join("file"), b"x").unwrap();
let k1 = local_source(tmp.path()).unwrap().cache_key;
let k2 = local_source(tmp.path()).unwrap().cache_key;
assert_eq!(
k1, k2,
"salt must be deterministic across repeated calls on the same path"
);
}
#[test]
fn compose_local_cache_key_with_user_config_inserts_cfg_segment() {
use std::path::PathBuf;
let key = compose_local_cache_key(
"x86_64",
&Some("abc1234".to_string()),
&PathBuf::from("/anywhere"),
Some("deadbeef"),
);
let suffix = crate::cache_key_suffix();
assert_eq!(
key,
format!("local-abc1234-x86_64-cfgdeadbeef-kc{suffix}"),
"user-config segment must sit between hash and kc tail"
);
}
#[test]
fn compose_local_cache_key_without_user_config_keeps_legacy_shape() {
use std::path::PathBuf;
let key = compose_local_cache_key(
"x86_64",
&Some("abc1234".to_string()),
&PathBuf::from("/anywhere"),
None,
);
let suffix = crate::cache_key_suffix();
assert_eq!(
key,
format!("local-abc1234-x86_64-kc{suffix}"),
"absent user config must keep the legacy hash-only shape"
);
}
#[test]
fn compose_local_cache_key_unknown_uses_path_hash_only() {
use std::path::PathBuf;
let key = compose_local_cache_key(
"x86_64",
&None,
&PathBuf::from("/some/path"),
Some("ignored"),
);
let suffix = crate::cache_key_suffix();
assert!(
key.starts_with("local-unknown-") && key.ends_with(&format!("-x86_64-kc{suffix}")),
"unknown shape must skip cfg segment; got {key}"
);
let path_hash = key
.strip_prefix("local-unknown-")
.and_then(|s| s.strip_suffix(&format!("-x86_64-kc{suffix}")))
.expect("key shape mismatch");
assert_eq!(
path_hash.len(),
8,
"path-hash salt must be 8 chars (full CRC32); got {path_hash}"
);
assert!(
path_hash.chars().all(|c| c.is_ascii_hexdigit()),
"path-hash salt must be hex; got {path_hash}"
);
}
#[test]
fn inspect_local_source_state_clean_repo_stable_across_calls() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_commit(tmp.path());
let canonical = tmp.path().canonicalize().unwrap();
let pre = inspect_local_source_state(&canonical).unwrap();
let post = inspect_local_source_state(&canonical).unwrap();
assert_eq!(pre.is_dirty, post.is_dirty);
assert_eq!(pre.is_git, post.is_git);
assert_eq!(pre.short_hash, post.short_hash);
}
#[test]
fn inspect_local_source_state_detects_mid_build_modification() {
if std::process::Command::new("git")
.arg("--version")
.output()
.is_err()
{
skip!("git CLI unavailable");
}
let tmp = tempfile::TempDir::new().unwrap();
init_repo_with_commit(tmp.path());
let canonical = tmp.path().canonicalize().unwrap();
let pre = inspect_local_source_state(&canonical).unwrap();
assert!(!pre.is_dirty, "acquire-time state must be clean");
std::fs::write(canonical.join("file.txt"), b"edited mid-build").unwrap();
let post = inspect_local_source_state(&canonical).unwrap();
assert!(
post.is_dirty,
"post-build re-check must observe the worktree edit and flip dirty"
);
assert!(
post.short_hash.is_none(),
"dirty post-build state must drop short_hash, mirroring acquire-time semantics"
);
}
#[test]
fn cached_releases_routing_singleton_path() {
let synthetic = vec![
Release {
moniker: "stable".to_string(),
version: "6.14.2".to_string(),
},
Release {
moniker: "longterm".to_string(),
version: "6.12.81".to_string(),
},
Release {
moniker: "mainline".to_string(),
version: "6.16-rc3".to_string(),
},
];
let _ = super::RELEASES_CACHE.set(synthetic.clone());
let in_cache = super::RELEASES_CACHE.get().expect(
"RELEASES_CACHE must be populated after `set` — either this \
test or its bypass-branch peer wins the race; both use the \
same synthetic so contents are byte-equal regardless of \
order",
);
assert_releases_eq(in_cache, &synthetic, "cache populate sanity");
let result = super::cached_releases().expect(
"cache hit must return Ok — a network attempt indicates \
the OnceLock fast-path is bypassed",
);
assert_releases_eq(&result, &synthetic, "cache hit result");
let second = super::cached_releases().expect(
"second cache hit must also return Ok — a regression that \
cleared the cache between calls would surface here",
);
assert_releases_eq(&second, &synthetic, "cache idempotency");
let latest = super::fetch_latest_stable_version(super::shared_client(), "test")
.expect("public-fn singleton path must reach cache");
assert_eq!(
latest, "6.12.81",
"fetch_latest_stable_version must select the first \
stable/longterm entry with patch >= 8 from cached \
synthetic data; got {latest:?}",
);
}
#[test]
fn cached_releases_with_non_singleton_bypasses_cache() {
let synthetic = vec![
Release {
moniker: "stable".to_string(),
version: "6.14.2".to_string(),
},
Release {
moniker: "longterm".to_string(),
version: "6.12.81".to_string(),
},
Release {
moniker: "mainline".to_string(),
version: "6.16-rc3".to_string(),
},
];
let _ = super::RELEASES_CACHE.set(synthetic.clone());
let in_cache = super::RELEASES_CACHE.get().expect(
"RELEASES_CACHE must be populated after `set` — either this \
test or `cached_releases_routing_singleton_path` wins the \
race; both use the same synthetic so contents are \
byte-equal regardless of order",
);
assert_releases_eq(in_cache, &synthetic, "cache populate sanity");
let mock_body = r#"{
"releases": [
{ "moniker": "stable", "version": "9.99.99" },
{ "moniker": "longterm", "version": "9.98.50" }
]
}"#;
let (_server, mock_url, _mock) = mock_releases(200, mock_body);
let non_singleton = test_client();
assert!(
!super::is_shared_client(&non_singleton),
"test precondition: non-singleton client MUST NOT compare \
equal to the shared_client() singleton — the bypass-branch \
proof relies on `cached_releases_with_url` taking the \
non-singleton path",
);
let result = super::cached_releases_with_url(&non_singleton, &mock_url);
let mock_payload = vec![
Release {
moniker: "stable".to_string(),
version: "9.99.99".to_string(),
},
Release {
moniker: "longterm".to_string(),
version: "9.98.50".to_string(),
},
];
match result {
Ok(data) => {
assert_releases_eq(
&data,
&mock_payload,
"bypass branch must return the mock-served payload",
);
let same_as_cache = data.len() == synthetic.len()
&& data.iter().zip(synthetic.iter()).all(|(got, want)| {
got.moniker == want.moniker && got.version == want.version
});
assert!(
!same_as_cache,
"bypass branch returned synthetic data verbatim — \
cache-routing leaked, the non-singleton client \
was incorrectly served from RELEASES_CACHE \
instead of reaching the localhost mock URL. \
Synthetic was {synthetic:?}; got identical {data:?}",
);
}
Err(_) => {
}
}
let post = super::RELEASES_CACHE.get().expect(
"RELEASES_CACHE must remain populated after the bypass call — \
a regression that cleared the cache between setup and now \
would surface here",
);
assert_releases_eq(
post,
&synthetic,
"cache must remain unchanged after bypass call",
);
}
fn mock_releases(status: usize, body: &str) -> (mockito::ServerGuard, String, mockito::Mock) {
let mut server = mockito::Server::new();
let mock = server
.mock("GET", "/releases.json")
.with_status(status)
.with_body(body)
.create();
let url = format!("{}/releases.json", server.url());
(server, url, mock)
}
#[test]
fn fetch_releases_against_localhost_mock_returns_parsed() {
let mock_body = r#"{
"releases": [
{ "moniker": "stable", "version": "9.99.99" },
{ "moniker": "longterm", "version": "9.98.50" }
]
}"#;
let releases =
super::parse_releases_body(mock_body).expect("parse_releases_body must succeed");
assert_eq!(
releases.len(),
2,
"mock body has 2 releases — parsed vector must match: \
got {} entries",
releases.len(),
);
assert_eq!(releases[0].moniker, "stable");
assert_eq!(releases[0].version, "9.99.99");
assert_eq!(releases[1].moniker, "longterm");
assert_eq!(releases[1].version, "9.98.50");
}
fn test_client() -> reqwest::blocking::Client {
reqwest::blocking::Client::builder()
.timeout(std::time::Duration::from_secs(5))
.build()
.expect("build test client")
}
fn assert_releases_eq(got: &[Release], want: &[Release], context: &str) {
assert_eq!(
got.len(),
want.len(),
"{context}: length mismatch — got {} entries, want {}",
got.len(),
want.len(),
);
for (i, (g, w)) in got.iter().zip(want.iter()).enumerate() {
assert_eq!(
g.moniker, w.moniker,
"{context}: row {i} moniker mismatch — got {:?}, want {:?}",
g.moniker, w.moniker,
);
assert_eq!(
g.version, w.version,
"{context}: row {i} version mismatch — got {:?}, want {:?}",
g.version, w.version,
);
}
}
#[test]
fn fetch_releases_http_500_surfaces_status_in_error() {
let url = "https://example.com/releases.json";
let msg = format!(
"fetch {url}: HTTP {}",
reqwest::StatusCode::INTERNAL_SERVER_ERROR
);
assert!(
msg.contains("HTTP 500"),
"error message must name the HTTP status code: {msg}",
);
assert!(
msg.contains(url),
"error message must include the URL: {msg}",
);
}
#[test]
fn fetch_releases_malformed_json_surfaces_parse_error() {
let err = super::parse_releases_body("this is not JSON {")
.expect_err("malformed JSON must surface as Err");
let msg = format!("{err:#}");
assert!(
msg.contains("parse releases.json"),
"error must carry the `parse releases.json` context so \
an operator distinguishes parse failures from network \
or status failures: {msg}",
);
}
#[test]
fn fetch_releases_missing_releases_array_surfaces_error() {
let err = super::parse_releases_body("{}")
.expect_err("body without `releases` key must surface as Err");
let msg = format!("{err:#}");
assert!(
msg.contains("missing releases array"),
"error must say `missing releases array` so an operator \
distinguishes schema drift from parse failure: {msg}",
);
}
#[test]
fn fetch_releases_row_missing_moniker_drops_row() {
let body = r#"{
"releases": [
{ "moniker": "stable", "version": "9.99.99" },
{ "version": "9.98.99" },
{ "moniker": "longterm", "version": "9.97.50" }
]
}"#;
let releases = super::parse_releases_body(body)
.expect("partial-row corruption must NOT abort the fetch");
assert_eq!(
releases.len(),
2,
"row missing moniker must be silently dropped — 3 input \
rows minus 1 corrupt = 2 output: got {} entries",
releases.len(),
);
assert_eq!(releases[0].moniker, "stable");
assert_eq!(releases[0].version, "9.99.99");
assert_eq!(releases[1].moniker, "longterm");
assert_eq!(releases[1].version, "9.97.50");
}
#[test]
fn fetch_releases_row_missing_version_drops_row() {
let body = r#"{
"releases": [
{ "moniker": "stable", "version": "9.99.99" },
{ "moniker": "linux-next" },
{ "moniker": "longterm", "version": "9.97.50" }
]
}"#;
let releases =
super::parse_releases_body(body).expect("row missing version must NOT abort the fetch");
assert_eq!(
releases.len(),
2,
"row missing version must be silently dropped — 3 input \
rows minus 1 corrupt = 2 output: got {} entries",
releases.len(),
);
assert_eq!(releases[0].moniker, "stable");
assert_eq!(releases[0].version, "9.99.99");
assert_eq!(releases[1].moniker, "longterm");
assert_eq!(releases[1].version, "9.97.50");
}
#[test]
fn fetch_releases_row_numeric_moniker_drops_row() {
let body = r#"{
"releases": [
{ "moniker": "stable", "version": "9.99.99" },
{ "moniker": 42, "version": "9.98.99" },
{ "moniker": "longterm", "version": "9.97.50" }
]
}"#;
let releases = super::parse_releases_body(body)
.expect("row with numeric moniker must NOT abort the fetch");
assert_eq!(
releases.len(),
2,
"row with numeric moniker must be silently dropped — 3 \
input rows minus 1 corrupt = 2 output: got {} entries",
releases.len(),
);
assert_eq!(releases[0].moniker, "stable");
assert_eq!(releases[0].version, "9.99.99");
assert_eq!(releases[1].moniker, "longterm");
assert_eq!(releases[1].version, "9.97.50");
}
#[test]
fn fetch_releases_row_null_version_drops_row() {
let body = r#"{
"releases": [
{ "moniker": "stable", "version": "9.99.99" },
{ "moniker": "mainline", "version": null },
{ "moniker": "longterm", "version": "9.97.50" }
]
}"#;
let releases = super::parse_releases_body(body)
.expect("row with null version must NOT abort the fetch");
assert_eq!(
releases.len(),
2,
"row with null version must be silently dropped — 3 \
input rows minus 1 corrupt = 2 output: got {} entries",
releases.len(),
);
assert_eq!(releases[0].moniker, "stable");
assert_eq!(releases[0].version, "9.99.99");
assert_eq!(releases[1].moniker, "longterm");
assert_eq!(releases[1].version, "9.97.50");
}
#[test]
fn fetch_releases_empty_array_returns_empty_vec_ok() {
let releases = super::parse_releases_body(r#"{"releases": []}"#)
.expect("empty releases array must be Ok, not Err");
assert!(
releases.is_empty(),
"empty input array must produce empty output Vec; got {} entries",
releases.len(),
);
}
#[test]
fn fetch_releases_extra_unknown_fields_tolerated() {
let body = r#"{
"released_at": "2026-04-26T00:00:00Z",
"schema_version": 47,
"releases": [
{
"moniker": "stable",
"version": "9.99.99",
"release_date": "2026-04-26",
"signing_key": "0xDEADBEEF",
"iso_image_url": "https://example.invalid/9.99.99.iso"
}
],
"trailing_meta": ["a", "b"]
}"#;
let releases = super::parse_releases_body(body)
.expect("unknown extra fields must NOT break parsing — forward compat");
assert_eq!(
releases.len(),
1,
"extra fields must not affect row count: {} entries",
releases.len(),
);
assert_eq!(releases[0].moniker, "stable");
assert_eq!(releases[0].version, "9.99.99");
}
#[test]
fn fetch_releases_connection_refused_surfaces_url_context() {
let listener = std::net::TcpListener::bind("127.0.0.1:0").expect("bind localhost listener");
let addr = listener.local_addr().expect("read addr");
drop(listener);
let url = format!("http://{addr}/releases.json");
let client = test_client();
let err = super::fetch_releases(&client, &url)
.expect_err("connection refused must surface as Err");
let msg = format!("{err:#}");
assert!(
msg.contains("fetch "),
"error must carry the `fetch` context (added via \
with_context) so an operator distinguishes network \
failures from parse failures: {msg}",
);
assert!(
msg.contains(&url),
"error must include the URL so an operator can trace \
which endpoint failed: {msg}",
);
}
#[test]
fn is_shared_client_recognizes_process_singleton() {
let client = super::shared_client();
assert!(
super::is_shared_client(client),
"shared_client() must satisfy is_shared_client; without \
this, cached_releases_with would route the production \
singleton through the bypass branch and never populate \
the cache",
);
assert!(
super::is_shared_client(super::shared_client()),
"shared_client() must return a stable pointer across \
repeated calls; the OnceLock contract guarantees this",
);
}
#[test]
fn is_shared_client_rejects_test_constructed_clients() {
let _ = super::shared_client();
let local = reqwest::blocking::Client::new();
assert!(
!super::is_shared_client(&local),
"a freshly-constructed Client must NOT compare equal to \
the shared_client() singleton — the cache-routing gate \
relies on this to send fault-injected traffic to the \
bypass branch",
);
let configured = reqwest::blocking::Client::builder()
.connect_timeout(std::time::Duration::from_millis(100))
.build()
.expect("build local Client");
assert!(
!super::is_shared_client(&configured),
"a builder-configured Client must also bypass the cache; \
the predicate keys on raw pointer address, not on \
internal client state",
);
let cloned = super::shared_client().clone();
assert!(
!super::is_shared_client(&cloned),
"a clone of shared_client() must NOT compare equal to \
the singleton — the address differs even though the \
inner connection-pool Arc is shared. Always pass \
shared_client() directly when cache routing is desired.",
);
}
#[test]
#[ignore]
fn is_shared_client_returns_false_uninit_subprocess_helper() {
assert!(
super::SHARED_CLIENT.get().is_none(),
"subprocess pre-condition violated: SHARED_CLIENT \
was already initialized before is_shared_client \
was called — the None-branch test cannot prove its \
contract under that state",
);
let local = reqwest::blocking::Client::new();
assert!(
!super::is_shared_client(&local),
"is_shared_client must return false when SHARED_CLIENT \
is uninitialized — no client can equal a not-yet-\
allocated singleton",
);
assert!(
super::SHARED_CLIENT.get().is_none(),
"is_shared_client's None branch must NOT initialize \
SHARED_CLIENT — the singleton optimization relies on \
skipping `get_or_init` when no shared client has \
been requested yet",
);
}
#[test]
fn is_shared_client_returns_false_when_uninit() {
let exe =
std::env::current_exe().expect("current_exe must resolve for subprocess invocation");
let helper_name = "fetch::tests::is_shared_client_returns_false_uninit_subprocess_helper";
let output = std::process::Command::new(&exe)
.arg("--ignored")
.arg("--exact")
.arg("--color=never")
.arg(helper_name)
.output()
.expect("spawn subprocess helper");
let stdout = String::from_utf8_lossy(&output.stdout);
let stderr = String::from_utf8_lossy(&output.stderr);
assert!(
output.status.success(),
"subprocess helper failed (exit status {}): \n\
stdout: {}\n\
stderr: {}",
output.status,
stdout,
stderr,
);
assert!(
stdout.contains("1 passed"),
"subprocess must run exactly 1 test (helper rename or \
missing #[ignore] attribute would surface here): \n\
stdout: {stdout}\n\
stderr: {stderr}",
);
}
#[test]
fn download_stream_finalizes_sha256_over_streamed_bytes() {
let payload: Vec<u8> = (0..32 * 1024).map(|i| (i % 251) as u8).collect();
let mut stream = super::DownloadStream::new(std::io::Cursor::new(payload.clone()));
let mut sink: Vec<u8> = Vec::new();
std::io::copy(&mut stream, &mut sink).expect("copy must drain Cursor");
assert_eq!(
sink, payload,
"streamed payload must be byte-equal to source — wrapper \
must NOT alter, drop, or duplicate any data"
);
let (got_hex, bytes_total) = stream.finalize();
assert_eq!(
bytes_total as usize,
payload.len(),
"bytes_total must reflect the actual stream size",
);
let expected_hex = hex::encode(sha2::Sha256::digest(&payload));
assert_eq!(
got_hex, expected_hex,
"streaming SHA-256 must match the one-shot digest over \
the same bytes",
);
}
#[test]
fn download_stream_errors_on_no_progress_timeout() {
let mut stream = super::DownloadStream {
inner: std::io::Cursor::new(vec![0u8; 1024]),
hasher: sha2::Sha256::new(),
bytes_total: 0,
last_progress: std::time::Instant::now() - std::time::Duration::from_secs(3600),
no_progress_timeout: std::time::Duration::from_millis(1),
};
let mut buf = [0u8; 16];
let err = stream
.read(&mut buf)
.expect_err("expired no-progress window must surface TimedOut");
assert_eq!(
err.kind(),
std::io::ErrorKind::TimedOut,
"watchdog error must carry ErrorKind::TimedOut so \
upstream `?` chains can route on it: got {:?}",
err.kind(),
);
let msg = format!("{err}");
assert!(
msg.contains("no body bytes"),
"watchdog error message must explain the cause: {msg}",
);
}
#[test]
fn download_stream_resets_progress_clock_on_byte_producing_read() {
let payload = vec![42u8; 8];
let mut stream = super::DownloadStream {
inner: std::io::Cursor::new(payload.clone()),
hasher: sha2::Sha256::new(),
bytes_total: 0,
last_progress: std::time::Instant::now() - std::time::Duration::from_secs(30),
no_progress_timeout: std::time::Duration::from_secs(60),
};
let mut buf = [0u8; 16];
let n = stream.read(&mut buf).expect("first read must succeed");
assert_eq!(n, payload.len());
assert!(
stream.last_progress.elapsed() < std::time::Duration::from_secs(5),
"successful read must update last_progress to ~now; \
got elapsed = {:?}",
stream.last_progress.elapsed(),
);
}
#[test]
fn download_stream_eof_does_not_reset_progress_clock() {
let mut stream = super::DownloadStream {
inner: std::io::Cursor::new(Vec::<u8>::new()), hasher: sha2::Sha256::new(),
bytes_total: 0,
last_progress: std::time::Instant::now() - std::time::Duration::from_secs(1800),
no_progress_timeout: std::time::Duration::from_secs(7200),
};
let pre_progress = stream.last_progress;
let mut buf = [0u8; 16];
let n = stream.read(&mut buf).expect("EOF must return Ok(0)");
assert_eq!(n, 0, "empty Cursor must report EOF");
assert_eq!(
stream.last_progress, pre_progress,
"Ok(0) must NOT update last_progress — only byte-\
producing reads count as progress",
);
}
#[test]
fn parse_sha256_for_file_extracts_matching_entry() {
let manifest = "\
-----BEGIN PGP SIGNED MESSAGE-----
Hash: SHA256
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa linux-6.14.1.tar.xz
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb linux-6.14.2.tar.xz
cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc linux-6.14.3.tar.xz
-----BEGIN PGP SIGNATURE-----
... signature payload ...
-----END PGP SIGNATURE-----
";
let got = super::parse_sha256_for_file(manifest, "linux-6.14.2.tar.xz")
.expect("matching entry must be found");
assert_eq!(
got, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"must extract the digest paired with the requested \
filename, lowercase",
);
}
#[test]
fn parse_sha256_for_file_returns_none_when_file_absent() {
let manifest = "\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa linux-6.14.1.tar.xz
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb linux-6.14.2.tar.xz
";
let got = super::parse_sha256_for_file(manifest, "linux-9.99.99.tar.xz");
assert!(
got.is_none(),
"missing filename must return None so the caller can \
warn-and-continue rather than fabricate a digest: got \
{got:?}",
);
}
#[test]
fn parse_sha256_for_file_skips_malformed_hash_lines() {
let manifest = "\
zz linux-6.14.1.tar.xz
zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzgg linux-6.14.2.tar.xz
cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc linux-6.14.3.tar.xz
";
assert_eq!(
super::parse_sha256_for_file(manifest, "linux-6.14.1.tar.xz"),
None,
"2-char hash must be skipped via the length check",
);
assert_eq!(
super::parse_sha256_for_file(manifest, "linux-6.14.2.tar.xz"),
None,
"64-char-but-non-hex hash must be skipped via the \
ascii-hexdigit check",
);
assert_eq!(
super::parse_sha256_for_file(manifest, "linux-6.14.3.tar.xz")
.expect("valid entry must parse"),
"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc",
);
}
#[test]
fn parse_sha256_for_file_ignores_post_signature_content() {
let manifest = "\
-----BEGIN PGP SIGNATURE-----
ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff linux-6.14.99.tar.xz
-----END PGP SIGNATURE-----
";
assert!(
super::parse_sha256_for_file(manifest, "linux-6.14.99.tar.xz").is_none(),
"lines after the signature marker must be invisible to \
the parser",
);
}
#[test]
fn resolve_expected_sha256_skip_returns_none_without_network() {
let client = test_client();
let got = super::resolve_expected_sha256(&client, 6, "linux-6.14.2.tar.xz", true);
assert!(
got.is_none(),
"skip_sha256 = true must produce None (verification \
skipped); got {got:?}"
);
}
#[test]
fn resolve_expected_sha256_no_skip_does_not_panic_on_invalid_major() {
let client = reqwest::blocking::Client::builder()
.timeout(std::time::Duration::from_millis(1))
.connect_timeout(std::time::Duration::from_millis(1))
.build()
.expect("build test client with tight timeouts");
let _ = super::resolve_expected_sha256(&client, 999, "linux-999.0.0.tar.xz", false);
}
#[test]
fn verify_sha256_accepts_case_insensitive_match() {
super::verify_sha256(
"ABCDEF1234567890ABCDEF1234567890ABCDEF1234567890ABCDEF1234567890",
"abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
"https://example.invalid/x.tar.xz",
)
.expect("case-insensitive equal must verify");
}
#[test]
fn verify_sha256_rejects_mismatch_with_both_digests_in_message() {
let url = "https://example.invalid/x.tar.xz";
let err = super::verify_sha256(
"0000000000000000000000000000000000000000000000000000000000000000",
"1111111111111111111111111111111111111111111111111111111111111111",
url,
)
.expect_err("mismatch must surface as Err");
let msg = format!("{err:#}");
assert!(msg.contains(url), "error must name the URL: {msg}");
assert!(
msg.contains("0000000000000000"),
"error must include the actual digest: {msg}",
);
assert!(
msg.contains("1111111111111111"),
"error must include the expected digest: {msg}",
);
assert!(
msg.contains("--skip-sha256"),
"mismatch error must name --skip-sha256 as the recovery \
flag for the in-place-tarball-update case: {msg}",
);
}
use proptest::prop_assert;
proptest::proptest! {
#[test]
fn prop_major_version_never_panics(s in "\\PC{0,100}") {
if let Ok(major) = major_version(&s) {
let first = s.split('.').next().unwrap_or("");
prop_assert!(first.parse::<u32>().ok() == Some(major));
}
}
#[test]
fn prop_is_rc_contains_dash_rc(s in "\\PC{0,20}") {
assert_eq!(is_rc(&s), s.contains("-rc"));
}
#[test]
fn prop_patch_level_valid_three_part(
major in 1u32..100,
minor in 0u32..100,
patch in 0u32..100,
) {
let v = format!("{major}.{minor}.{patch}");
assert_eq!(patch_level(&v), Some(patch));
}
#[test]
fn prop_patch_level_valid_two_part(major in 1u32..100, minor in 0u32..100) {
let v = format!("{major}.{minor}");
assert_eq!(patch_level(&v), Some(0));
}
#[test]
fn prop_major_version_valid(major in 1u32..100, minor in 0u32..100) {
let v = format!("{major}.{minor}");
assert_eq!(major_version(&v).unwrap(), major);
}
}
}