#![cfg(target_os = "windows")]
use std::io;
use std::path::{Path, PathBuf};
use oci_client::secrets::RegistryAuth;
use zlayer_hcs::schema::Layer;
use crate::windows::layer::{self, BackupStreamWriter};
use crate::windows::wclayer::{self, LayerChain};
#[derive(Debug, Clone)]
pub struct ResolvedLayerDescriptor {
pub digest: String,
pub media_type: String,
pub size: i64,
pub urls: Vec<String>,
}
#[derive(Debug, Clone)]
pub struct UnpackedImage {
pub chain: LayerChain,
pub root: PathBuf,
}
pub async fn unpack_windows_image(
puller: &zlayer_registry::client::ImagePuller,
image: &str,
auth: &RegistryAuth,
layers: &[ResolvedLayerDescriptor],
dest_root: &Path,
) -> io::Result<UnpackedImage> {
layer::enable_backup_restore_privileges()?;
std::fs::create_dir_all(dest_root)?;
let mut chain_so_far: Vec<Layer> = Vec::with_capacity(layers.len());
for desc in layers {
let layer_id = new_layer_id();
let layer_path = dest_root.join(&layer_id);
std::fs::create_dir_all(&layer_path)?;
let bytes = puller
.pull_blob_with_urls(image, &desc.digest, auth, &desc.urls, Some(desc.size))
.await
.map_err(|e| io::Error::other(format!("pull blob {}: {e}", desc.digest)))?;
verify_digest(&bytes, &desc.digest)?;
let raw = decompress(&bytes, &desc.media_type)?;
extract_tar_to_backup_stream(&raw, &layer_path)?;
let parent_chain = build_parent_chain(&chain_so_far);
wclayer::import_layer(&layer_path, &layer_path, &parent_chain).map_err(|e| {
io::Error::other(format!("HcsImportLayer({}): {e}", layer_path.display()))
})?;
chain_so_far.push(Layer {
id: layer_id,
path: layer_path.to_string_lossy().into_owned(),
});
}
let mut final_chain = chain_so_far;
final_chain.reverse();
Ok(UnpackedImage {
chain: LayerChain::new(final_chain),
root: dest_root.to_path_buf(),
})
}
fn build_parent_chain(base_first: &[Layer]) -> LayerChain {
let parents: Vec<Layer> = base_first.iter().rev().cloned().collect();
LayerChain::new(parents)
}
fn new_layer_id() -> String {
uuid::Uuid::new_v4().to_string()
}
fn decompress(bytes: &[u8], media_type: &str) -> io::Result<Vec<u8>> {
use std::io::Read as _;
let mt = media_type.to_ascii_lowercase();
if mt.ends_with("+gzip") || mt.ends_with(".tar.gzip") {
let mut d = flate2::read::GzDecoder::new(bytes);
let mut out = Vec::new();
d.read_to_end(&mut out)?;
Ok(out)
} else if mt.ends_with("+zstd") || mt.ends_with(".tar.zstd") {
let mut d = zstd::stream::read::Decoder::new(bytes)?;
let mut out = Vec::new();
d.read_to_end(&mut out)?;
Ok(out)
} else {
Ok(bytes.to_vec())
}
}
fn verify_digest(bytes: &[u8], expected: &str) -> io::Result<()> {
use sha2::{Digest, Sha256};
let expected_hex = expected.trim_start_matches("sha256:");
let mut hasher = Sha256::new();
hasher.update(bytes);
let got = hex::encode(hasher.finalize());
if !got.eq_ignore_ascii_case(expected_hex) {
return Err(io::Error::other(format!(
"blob digest mismatch: expected sha256:{expected_hex}, got sha256:{got}"
)));
}
Ok(())
}
fn extract_tar_to_backup_stream(tar_bytes: &[u8], layer_path: &Path) -> io::Result<()> {
let mut archive = tar::Archive::new(tar_bytes);
for entry in archive.entries()? {
let mut entry = entry?;
if entry.header().entry_type().is_dir() {
continue;
}
let rel_path = entry.path()?.into_owned();
let dest = layer_path.join(&rel_path);
if let Some(parent) = dest.parent() {
std::fs::create_dir_all(parent)?;
}
let rel_str = rel_path.to_string_lossy();
let is_files_payload = rel_str.starts_with("Files/") || rel_str.starts_with("Files\\");
if is_files_payload {
let mut writer = BackupStreamWriter::create_new(&dest)?;
std::io::copy(&mut entry, &mut writer)?;
} else {
let mut f = std::fs::File::create(&dest)?;
std::io::copy(&mut entry, &mut f)?;
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn digest_verify_accepts_matching_hash() {
let bytes = b"hello world";
let digest = "sha256:b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9";
verify_digest(bytes, digest).expect("should match");
}
#[test]
fn digest_verify_rejects_mismatch() {
let err = verify_digest(
b"hello world",
"sha256:0000000000000000000000000000000000000000000000000000000000000000",
)
.expect_err("should reject");
assert!(err.to_string().contains("digest mismatch"));
}
#[test]
fn digest_verify_is_case_insensitive() {
let bytes = b"hello world";
let upper = "sha256:B94D27B9934D3E08A52E52D7DA7DABFAC484EFE37A5380EE9088F7ACE2EFCDE9";
verify_digest(bytes, upper).expect("should match");
}
#[test]
fn decompress_passthrough_for_unknown_media_type() {
let out = decompress(b"not compressed", "application/octet-stream").expect("ok");
assert_eq!(out, b"not compressed");
}
#[test]
fn decompress_gzip_roundtrip() {
use std::io::Write as _;
let mut gz = flate2::write::GzEncoder::new(Vec::new(), flate2::Compression::default());
gz.write_all(b"hello tar").unwrap();
let compressed = gz.finish().unwrap();
let out = decompress(&compressed, "application/vnd.oci.image.layer.v1.tar+gzip")
.expect("decompress");
assert_eq!(out, b"hello tar");
}
#[test]
fn build_parent_chain_reverses_base_first_to_child_first() {
let base_first = vec![
Layer {
id: "base".into(),
path: r"C:\l\base".into(),
},
Layer {
id: "mid".into(),
path: r"C:\l\mid".into(),
},
Layer {
id: "top".into(),
path: r"C:\l\top".into(),
},
];
let chain = build_parent_chain(&base_first);
assert_eq!(chain.0.len(), 3);
assert_eq!(chain.0[0].id, "top");
assert_eq!(chain.0[1].id, "mid");
assert_eq!(chain.0[2].id, "base");
}
#[test]
fn build_parent_chain_handles_empty() {
let chain = build_parent_chain(&[]);
assert!(chain.0.is_empty());
}
#[test]
fn new_layer_id_is_unique_and_uuid_shaped() {
let a = new_layer_id();
let b = new_layer_id();
assert_ne!(a, b);
assert_eq!(a.len(), 36); }
}