use std::fs;
use std::io::{self, Seek, SeekFrom, Write};
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use pkgar_core::HeaderFlags;
use pkgar_core::{
dryoc::classic::crypto_sign::crypto_sign_detached, Entry, Header, Mode, PackageSrc,
};
use pkgar_keys::PublicKeyFile;
use crate::ext::{copy_and_hash, DataWriter, EntryExt};
use crate::package::PackageFile;
use crate::transaction::Transaction;
use crate::{wrap_io_err, Error};
fn folder_entries<P, Q>(base: P, path: Q, entries: &mut Vec<Entry>) -> io::Result<()>
where
P: AsRef<Path>,
Q: AsRef<Path>,
{
let base = base.as_ref();
let path = path.as_ref();
let mut read_dir = Vec::new();
for entry_res in fs::read_dir(path)? {
read_dir.push(entry_res?);
}
read_dir.sort_by_key(|path| path.file_name());
for entry in read_dir {
let metadata = entry.metadata()?;
let entry_path = entry.path();
if metadata.is_dir() {
folder_entries(base, entry_path, entries)?;
} else {
let relative = entry_path
.strip_prefix(base)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))?;
let mut path_bytes = [0; 256];
let relative_bytes = relative.as_os_str().as_bytes();
if relative_bytes.len() >= path_bytes.len() {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"relative path longer than supported: {} > {}",
relative_bytes.len(),
path_bytes.len()
),
));
}
path_bytes[..relative_bytes.len()].copy_from_slice(relative_bytes);
let file_type = metadata.file_type();
let file_mode = metadata.permissions().mode();
let mut mode = file_mode & Mode::PERM.bits();
if file_type.is_file() {
mode |= Mode::FILE.bits();
} else if file_type.is_symlink() {
mode |= Mode::SYMLINK.bits();
} else {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("Unsupported entry at {:?}: {:?}", relative, metadata),
));
}
entries.push(Entry {
blake3: [0; 32],
offset: 0,
size: metadata.len(),
mode,
path: path_bytes,
});
}
}
Ok(())
}
pub fn create(
secret_path: impl AsRef<Path>,
archive_path: impl AsRef<Path>,
folder: impl AsRef<Path>,
) -> Result<(), Error> {
create_with_flags(
secret_path,
archive_path,
folder,
HeaderFlags::latest(
pkgar_core::Architecture::Independent,
pkgar_core::Packaging::Uncompressed,
),
)
}
pub fn create_with_flags(
secret_path: impl AsRef<Path>,
archive_path: impl AsRef<Path>,
folder: impl AsRef<Path>,
flags: HeaderFlags,
) -> Result<(), Error> {
let keyfile = pkgar_keys::get_skey(secret_path.as_ref())?;
let secret_key = keyfile
.secret_key()
.unwrap_or_else(|| panic!("{} was encrypted?", secret_path.as_ref().display()));
let public_key = keyfile
.public_key()
.unwrap_or_else(|| panic!("{} was encrypted?", secret_path.as_ref().display()));
let archive_path = archive_path.as_ref();
let mut archive_file = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(archive_path)
.map_err(wrap_io_err!(archive_path, "Opening source"))?;
let mut entries = Vec::new();
let folder = folder.as_ref();
folder_entries(folder, folder, &mut entries)
.map_err(wrap_io_err!(archive_path, "Recursing buildroot"))?;
let mut header = Header {
signature: [0; 64],
public_key,
blake3: [0; 32],
count: entries.len() as u32,
flags,
};
let data_offset = header.total_size()?;
archive_file
.seek(SeekFrom::Start(data_offset as u64))
.map_err(wrap_io_err!(archive_path, "Seeking archive file"))?;
let mut header_hasher = blake3::Hasher::new();
let mut buf = vec![0; 4 * 1024 * 1024];
let mut data_offset: u64 = 0;
for entry in &mut entries {
let relative = entry.check_path()?;
let path = folder.join(relative);
let mode = entry.mode().map_err(Error::from)?;
let (ulen, clen, rlen, hash) = match mode.kind() {
Mode::FILE => {
let mut entry_file = fs::OpenOptions::new()
.read(true)
.open(&path)
.map_err(wrap_io_err!(path, "Opening entry data"))?;
let entry_meta = entry_file
.metadata()
.map_err(wrap_io_err!(path, "Checking entry data size"))?;
let start_pos = archive_file
.stream_position()
.map_err(wrap_io_err!(path, "Getting file position"))?;
let rlen = entry_meta.len();
let mut writer = DataWriter::new(header.flags.packaging(), archive_file, rlen)
.map_err(wrap_io_err!(path, "Writing entry data size"))?;
let (ulen, hash) = copy_and_hash(&mut entry_file, &mut writer, &mut buf)
.map_err(wrap_io_err!(path, "Writing data to archive"))?;
archive_file = writer
.finish()
.map_err(wrap_io_err!(path, "Finalize archive"))?;
let end_pos = archive_file
.stream_position()
.map_err(wrap_io_err!(path, "Getting file position"))?;
(ulen, end_pos - start_pos, rlen, hash)
}
Mode::SYMLINK => {
let destination =
fs::read_link(&path).map_err(wrap_io_err!(path, "Reading entry symlink"))?;
let start_pos = archive_file
.stream_position()
.map_err(wrap_io_err!(path, "Getting file position"))?;
let mut data = destination.as_os_str().as_bytes();
let rlen = data.len() as u64;
let mut writer = DataWriter::new(header.flags.packaging(), archive_file, rlen)
.map_err(wrap_io_err!(path, "Writing entry data size"))?;
let (ulen, hash) = copy_and_hash(&mut data, &mut writer, &mut buf)
.map_err(wrap_io_err!(path, "Writing data to archive"))?;
archive_file = writer
.finish()
.map_err(wrap_io_err!(path, "Finalize archive"))?;
let end_pos = archive_file
.stream_position()
.map_err(wrap_io_err!(path, "Getting file position"))?;
(ulen, end_pos - start_pos, rlen, hash)
}
_ => {
return Err(Error::from(pkgar_core::Error::InvalidMode(mode.bits())));
}
};
if ulen != rlen {
return Err(Error::LengthMismatch {
actual: ulen,
expected: rlen,
});
}
entry.size = clen;
entry.offset = data_offset;
entry.blake3.copy_from_slice(hash.as_bytes());
data_offset = data_offset
.checked_add(clen)
.ok_or(pkgar_core::Error::Overflow)
.map_err(Error::from)?;
header_hasher.update_rayon(bytemuck::bytes_of(entry));
}
header
.blake3
.copy_from_slice(header_hasher.finalize().as_bytes());
let mut signature = [0; 64];
crypto_sign_detached(
&mut signature,
&bytemuck::bytes_of(&header)[64..],
&secret_key,
)
.map_err(pkgar_core::Error::Dryoc)?;
header.signature.copy_from_slice(&signature);
archive_file
.seek(SeekFrom::Start(0))
.map_err(|source| Error::Io {
source,
path: Some(archive_path.to_path_buf()),
context: "Seeking archive_file back to 0",
})?;
archive_file
.write_all(bytemuck::bytes_of(&header))
.map_err(|source| Error::Io {
source,
path: Some(archive_path.to_path_buf()),
context: "Writing header",
})?;
for entry in &entries {
let _ = entry.check_path()?;
archive_file
.write_all(bytemuck::bytes_of(entry))
.map_err(|source| Error::Io {
source,
path: Some(archive_path.to_path_buf()),
context: "Writing entry",
})?;
}
Ok(())
}
pub fn extract(
pkey_path: impl AsRef<Path>,
archive_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> Result<(), Error> {
let pkey = PublicKeyFile::open(pkey_path.as_ref())?.pkey;
let mut package = PackageFile::new(archive_path, &pkey)?;
Transaction::install(&mut package, base_dir)?.commit()?;
Ok(())
}
pub fn remove(
pkey_path: impl AsRef<Path>,
archive_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> Result<(), Error> {
let pkey = PublicKeyFile::open(pkey_path.as_ref())?.pkey;
let mut package = PackageFile::new(archive_path, &pkey)?;
Transaction::remove(&mut package, base_dir)?.commit()?;
Ok(())
}
pub fn list(pkey_path: impl AsRef<Path>, archive_path: impl AsRef<Path>) -> Result<(), Error> {
let pkey = PublicKeyFile::open(pkey_path.as_ref())?.pkey;
let mut package = PackageFile::new(archive_path, &pkey)?;
for entry in package.read_entries()? {
let relative = entry.check_path()?;
println!("{}", relative.display());
}
Ok(())
}
pub fn split(
pkey_path: impl AsRef<Path>,
archive_path: impl AsRef<Path>,
head_path: impl AsRef<Path>,
data_path_opt: Option<impl AsRef<Path>>,
) -> Result<(), Error> {
let pkey_path = pkey_path.as_ref();
let archive_path = archive_path.as_ref();
let head_path = head_path.as_ref();
let data_path_opt = data_path_opt.as_ref();
let pkey = PublicKeyFile::open(pkey_path)?.pkey;
let mut package = PackageFile::new(archive_path, &pkey)?;
package.split(head_path, data_path_opt.map(|p| p.as_ref()))
}
pub fn verify(
pkey_path: impl AsRef<Path>,
archive_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> Result<(), Error> {
let pkey = PublicKeyFile::open(pkey_path)?.pkey;
let mut package = PackageFile::new(&archive_path, &pkey)?;
package.verify(base_dir.as_ref())
}