#![forbid(missing_debug_implementations, missing_copy_implementations)]
#![deny(rust_2018_idioms)]
#![deny(missing_docs)]
#![warn(clippy::pedantic)]
#![allow(
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::missing_errors_doc,
clippy::result_large_err
)]
mod cache;
mod datastore;
pub mod editor;
pub mod error;
mod fetch;
#[cfg(feature = "http")]
pub mod http;
mod io;
pub mod key_source;
pub mod schema;
pub mod sign;
mod target_name;
mod transport;
mod urlpath;
use crate::datastore::Datastore;
use crate::error::Result;
use crate::fetch::{fetch_max_size, fetch_sha256};
#[cfg(feature = "http")]
pub use crate::http::{HttpTransport, HttpTransportBuilder};
use crate::io::is_dir;
use crate::schema::{
DelegatedRole, Delegations, Role, RoleType, Root, Signed, Snapshot, Timestamp,
};
pub use crate::target_name::TargetName;
pub use crate::transport::IntoVec;
pub use crate::transport::{
DefaultTransport, FilesystemTransport, Transport, TransportError, TransportErrorKind,
TransportStream,
};
pub use crate::urlpath::SafeUrlPath;
use async_recursion::async_recursion;
pub use async_trait::async_trait;
pub use bytes::Bytes;
use chrono::{DateTime, Utc};
use error::SnapshotTargetsMetaMissingSnafu;
use futures::StreamExt;
use futures_core::Stream;
use log::warn;
use percent_encoding::{utf8_percent_encode, AsciiSet, NON_ALPHANUMERIC};
use snafu::{ensure, OptionExt, ResultExt};
use std::collections::{BTreeSet, HashMap};
use std::path::{Path, PathBuf};
use tempfile::NamedTempFile;
use tokio::fs::{canonicalize, create_dir_all};
use tokio::io::AsyncWriteExt;
use url::Url;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ExpirationEnforcement {
Safe,
Unsafe,
}
impl Default for ExpirationEnforcement {
fn default() -> Self {
ExpirationEnforcement::Safe
}
}
impl From<bool> for ExpirationEnforcement {
fn from(b: bool) -> Self {
if b {
ExpirationEnforcement::Safe
} else {
ExpirationEnforcement::Unsafe
}
}
}
impl From<ExpirationEnforcement> for bool {
fn from(ee: ExpirationEnforcement) -> Self {
ee == ExpirationEnforcement::Safe
}
}
#[derive(Debug, Clone)]
pub struct RepositoryLoader<'a> {
root: &'a [u8],
metadata_base_url: Url,
targets_base_url: Url,
transport: Option<Box<dyn Transport + Send + Sync>>,
limits: Option<Limits>,
datastore: Option<PathBuf>,
expiration_enforcement: Option<ExpirationEnforcement>,
}
impl<'a> RepositoryLoader<'a> {
pub fn new(root: &'a impl AsRef<[u8]>, metadata_base_url: Url, targets_base_url: Url) -> Self {
Self {
root: root.as_ref(),
metadata_base_url,
targets_base_url,
transport: None,
limits: None,
datastore: None,
expiration_enforcement: None,
}
}
pub async fn load(self) -> Result<Repository> {
Repository::load(self).await
}
#[must_use]
pub fn transport<T: Transport + Send + Sync + 'static>(mut self, transport: T) -> Self {
self.transport = Some(Box::new(transport));
self
}
#[must_use]
pub fn limits(mut self, limits: Limits) -> Self {
self.limits = Some(limits);
self
}
#[must_use]
pub fn datastore<P: Into<PathBuf>>(mut self, datastore: P) -> Self {
self.datastore = Some(datastore.into());
self
}
#[must_use]
pub fn expiration_enforcement(mut self, exp: ExpirationEnforcement) -> Self {
self.expiration_enforcement = Some(exp);
self
}
}
#[derive(Debug, Clone, Copy)]
pub struct Limits {
pub max_root_size: u64,
pub max_targets_size: u64,
pub max_timestamp_size: u64,
pub max_snapshot_size: u64,
pub max_root_updates: u64,
}
impl Default for Limits {
fn default() -> Self {
Self {
max_root_size: 1024 * 1024, max_targets_size: 1024 * 1024 * 10, max_timestamp_size: 1024 * 1024, max_snapshot_size: 1024 * 1024, max_root_updates: 1024,
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Prefix {
None,
Digest,
}
#[derive(Debug, Clone)]
pub struct Repository {
transport: Box<dyn Transport + Send + Sync>,
consistent_snapshot: bool,
datastore: Datastore,
earliest_expiration: DateTime<Utc>,
earliest_expiration_role: RoleType,
root: Signed<Root>,
snapshot: Signed<Snapshot>,
timestamp: Signed<Timestamp>,
targets: Signed<crate::schema::Targets>,
limits: Limits,
metadata_base_url: Url,
targets_base_url: Url,
expiration_enforcement: ExpirationEnforcement,
delegated_metadata_bytes: std::collections::HashMap<String, Vec<u8>>,
}
impl Repository {
async fn load(loader: RepositoryLoader<'_>) -> Result<Self> {
let datastore = Datastore::new(loader.datastore)?;
let transport = loader
.transport
.unwrap_or_else(|| Box::new(DefaultTransport::new()));
let limits = loader.limits.unwrap_or_default();
let expiration_enforcement = loader.expiration_enforcement.unwrap_or_default();
let metadata_base_url = parse_url(loader.metadata_base_url)?;
let targets_base_url = parse_url(loader.targets_base_url)?;
let update_start = datastore.system_time().await?;
let root = load_root(
transport.as_ref(),
loader.root,
&datastore,
limits.max_root_size,
limits.max_root_updates,
&metadata_base_url,
expiration_enforcement,
&update_start,
)
.await?;
let timestamp = load_timestamp(
transport.as_ref(),
&root,
&datastore,
limits.max_timestamp_size,
&metadata_base_url,
expiration_enforcement,
&update_start,
)
.await?;
let snapshot = load_snapshot(
transport.as_ref(),
&root,
×tamp,
limits.max_snapshot_size,
&datastore,
&metadata_base_url,
expiration_enforcement,
&update_start,
)
.await?;
let (targets, delegated_metadata_bytes) = load_targets(
transport.as_ref(),
&root,
&snapshot,
&datastore,
limits.max_targets_size,
&metadata_base_url,
expiration_enforcement,
&update_start,
)
.await?;
let expires_iter = [
(root.signed.expires, RoleType::Root),
(timestamp.signed.expires, RoleType::Timestamp),
(snapshot.signed.expires, RoleType::Snapshot),
(targets.signed.expires, RoleType::Targets),
];
let (earliest_expiration, earliest_expiration_role) =
expires_iter.iter().min_by_key(|tup| tup.0).unwrap();
Ok(Self {
transport,
consistent_snapshot: root.signed.consistent_snapshot,
datastore,
earliest_expiration: *earliest_expiration,
earliest_expiration_role: *earliest_expiration_role,
root,
snapshot,
timestamp,
targets,
limits,
metadata_base_url,
targets_base_url,
expiration_enforcement,
delegated_metadata_bytes,
})
}
pub fn targets(&self) -> &Signed<crate::schema::Targets> {
&self.targets
}
pub fn root(&self) -> &Signed<Root> {
&self.root
}
pub fn snapshot(&self) -> &Signed<Snapshot> {
&self.snapshot
}
pub fn timestamp(&self) -> &Signed<Timestamp> {
&self.timestamp
}
pub fn all_targets(&self) -> impl Iterator<Item = (&TargetName, &schema::Target)> + '_ {
self.targets.signed.targets_iter()
}
pub async fn read_target(
&self,
name: &TargetName,
) -> Result<
Option<impl Stream<Item = error::Result<Bytes>> + IntoVec<error::Error> + Send + Sync>,
> {
if self.expiration_enforcement == ExpirationEnforcement::Safe {
ensure!(
self.datastore.system_time().await? < self.earliest_expiration,
error::ExpiredMetadataSnafu {
role: self.earliest_expiration_role
}
);
}
Ok(
if let Ok(target) = self.targets.signed.find_target(name, false) {
let (sha256, file) = self.target_digest_and_filename(target, name);
Some(self.fetch_target(target, &sha256, file.as_str()).await?)
} else {
None
},
)
}
pub async fn save_target<P>(&self, name: &TargetName, outdir: P, prepend: Prefix) -> Result<()>
where
P: AsRef<Path>,
{
let outdir = outdir.as_ref();
let outdir = canonicalize(outdir)
.await
.context(error::SaveTargetOutdirCanonicalizeSnafu { path: outdir })?;
ensure!(
is_dir(&outdir).await,
error::SaveTargetOutdirSnafu { path: outdir }
);
if name.resolved() != name.raw() {
warn!(
"The target named '{}' had path segments that were resolved to produce the \
following name: {}",
name.raw(),
name.resolved()
);
}
let filename = match prepend {
Prefix::Digest => {
let target = self
.targets
.signed
.find_target(name, false)
.with_context(|_| error::CacheTargetMissingSnafu {
target_name: name.clone(),
})?;
let sha256 = target.hashes.sha256.clone().into_vec();
format!("{}.{}", hex::encode(sha256), name.resolved())
}
Prefix::None => name.resolved().to_owned(),
};
let resolved_filepath = outdir.join(filename);
let filepath_dir =
resolved_filepath
.parent()
.with_context(|| error::SaveTargetNoParentSnafu {
path: &resolved_filepath,
name: name.clone(),
})?;
ensure!(
filepath_dir.starts_with(&outdir),
error::SaveTargetUnsafePathSnafu {
name: name.clone(),
outdir,
filepath: &resolved_filepath,
}
);
let mut stream = self
.read_target(name)
.await?
.with_context(|| error::SaveTargetNotFoundSnafu { name: name.clone() })?;
create_dir_all(filepath_dir)
.await
.context(error::DirCreateSnafu {
path: &filepath_dir,
})?;
let real_filepath_dir = canonicalize(filepath_dir)
.await
.context(error::AbsolutePathSnafu { path: filepath_dir })?;
let real_outdir = canonicalize(&outdir)
.await
.context(error::AbsolutePathSnafu { path: &outdir })?;
ensure!(
real_filepath_dir.starts_with(&real_outdir),
error::SaveTargetUnsafePathSnafu {
name: name.clone(),
outdir,
filepath: &resolved_filepath,
}
);
let tmp_path = real_filepath_dir;
let tmp = tokio::task::spawn_blocking(move || NamedTempFile::new_in(tmp_path))
.await
.context(error::JoinSpawnBlockingTaskSnafu)?
.context(error::NamedTempFileCreateSnafu { path: filepath_dir })?;
let (f, tmp_path) = tmp.into_parts();
let mut f = tokio::fs::File::from_std(f);
while let Some(bytes) = stream.next().await {
f.write_all(bytes?.as_ref())
.await
.context(error::FileWriteSnafu { path: &tmp_path })?;
}
let f = NamedTempFile::from_parts(f.into_std().await, tmp_path);
f.persist(&resolved_filepath)
.context(error::NamedTempFilePersistSnafu {
path: resolved_filepath,
})?;
Ok(())
}
pub fn delegated_role(&self, name: &str) -> Option<&DelegatedRole> {
self.targets.signed.delegated_role(name).ok()
}
}
const CHARACTERS_TO_ESCAPE: AsciiSet = NON_ALPHANUMERIC
.remove(b'_')
.remove(b'.')
.remove(b'-')
.remove(b'~');
pub(crate) fn encode_filename<S: AsRef<str>>(name: S) -> String {
utf8_percent_encode(name.as_ref(), &CHARACTERS_TO_ESCAPE).to_string()
}
fn check_expired<T: Role>(update_start: &DateTime<Utc>, role: &T) -> Result<()> {
ensure!(
*update_start <= role.expires(),
error::ExpiredMetadataSnafu { role: T::TYPE }
);
Ok(())
}
fn parse_url(url: Url) -> Result<Url> {
if url.as_str().ends_with('/') {
Ok(url)
} else {
let mut s = url.to_string();
s.push('/');
Url::parse(&s).context(error::ParseUrlSnafu { url: s })
}
}
#[expect(clippy::too_many_arguments)]
#[allow(clippy::needless_continue)]
async fn load_root<R: AsRef<[u8]>>(
transport: &dyn Transport,
root: R,
datastore: &Datastore,
max_root_size: u64,
max_root_updates: u64,
metadata_base_url: &Url,
expiration_enforcement: ExpirationEnforcement,
update_start: &DateTime<Utc>,
) -> Result<Signed<Root>> {
let mut root: Signed<Root> =
serde_json::from_slice(root.as_ref()).context(error::ParseTrustedMetadataSnafu)?;
root.signed
.verify_role(&root)
.context(error::VerifyTrustedMetadataSnafu)?;
let original_root_version = root.signed.version.get();
let original_timestamp_keys = root
.signed
.keys(RoleType::Timestamp)
.cloned()
.collect::<Vec<_>>();
let original_snapshot_keys = root
.signed
.keys(RoleType::Snapshot)
.cloned()
.collect::<Vec<_>>();
loop {
ensure!(
root.signed.version.get() < original_root_version + max_root_updates,
error::MaxUpdatesExceededSnafu { max_root_updates }
);
let path = format!("{}.root.json", root.signed.version.get() + 1);
let url = metadata_base_url
.join(&path)
.with_context(|_| error::JoinUrlSnafu {
path: path.clone(),
url: metadata_base_url.clone(),
})?;
match fetch_max_size(
transport,
url.clone(),
max_root_size,
"max_root_size argument",
)
.await
{
Err(_) => break, Ok(stream) => {
let data = match stream.into_vec().await {
Ok(d) => d,
Err(e) if e.kind() == TransportErrorKind::FileNotFound => break,
err @ Err(_) => err.context(error::TransportSnafu { url })?,
};
let new_root: Signed<Root> =
serde_json::from_slice(&data).context(error::ParseMetadataSnafu {
role: RoleType::Root,
})?;
root.signed
.verify_role(&new_root)
.context(error::VerifyMetadataSnafu {
role: RoleType::Root,
})?;
new_root
.signed
.verify_role(&new_root)
.context(error::VerifyMetadataSnafu {
role: RoleType::Root,
})?;
ensure!(
root.signed.version < new_root.signed.version
&& root.signed.version.get() + 1 == new_root.signed.version.get(),
error::OlderMetadataSnafu {
role: RoleType::Root,
current_version: root.signed.version,
new_version: new_root.signed.version
}
);
root = new_root;
datastore.remove("root.json").await?;
datastore.create("root.json", &root).await?;
continue;
}
}
}
datastore.remove("root.json").await?;
datastore.create("root.json", &root).await?;
if expiration_enforcement == ExpirationEnforcement::Safe {
check_expired(update_start, &root.signed)?;
}
if original_timestamp_keys
.iter()
.ne(root.signed.keys(RoleType::Timestamp))
|| original_snapshot_keys
.iter()
.ne(root.signed.keys(RoleType::Snapshot))
{
let r1 = datastore.remove("timestamp.json").await;
let r2 = datastore.remove("snapshot.json").await;
r1.and(r2)?;
}
Ok(root)
}
async fn load_timestamp(
transport: &dyn Transport,
root: &Signed<Root>,
datastore: &Datastore,
max_timestamp_size: u64,
metadata_base_url: &Url,
expiration_enforcement: ExpirationEnforcement,
update_start: &DateTime<Utc>,
) -> Result<Signed<Timestamp>> {
let path = "timestamp.json";
let url = metadata_base_url
.join(path)
.with_context(|_| error::JoinUrlSnafu {
path,
url: metadata_base_url.clone(),
})?;
let stream = fetch_max_size(
transport,
url.clone(),
max_timestamp_size,
"max_timestamp_size argument",
)
.await?;
let data = stream
.into_vec()
.await
.context(error::TransportSnafu { url })?;
let timestamp: Signed<Timestamp> =
serde_json::from_slice(&data).context(error::ParseMetadataSnafu {
role: RoleType::Timestamp,
})?;
root.signed
.verify_role(×tamp)
.context(error::VerifyMetadataSnafu {
role: RoleType::Timestamp,
})?;
ensure!(
timestamp.signed.meta.len() == 1,
error::TimestampMetaLengthSnafu {
version: timestamp.signed.version,
meta_length: timestamp.signed.meta.len(),
}
);
let snapshot_meta =
timestamp
.signed
.meta
.get("snapshot.json")
.context(error::MissingSnapshotMetaSnafu {
version: timestamp.signed.version,
})?;
if let Some(Ok(old_timestamp)) = datastore
.bytes("timestamp.json")
.await?
.map(|b| serde_json::from_slice::<Signed<Timestamp>>(&b))
{
if root.signed.verify_role(&old_timestamp).is_ok() {
ensure!(
old_timestamp.signed.version <= timestamp.signed.version,
error::OlderMetadataSnafu {
role: RoleType::Timestamp,
current_version: old_timestamp.signed.version,
new_version: timestamp.signed.version
}
);
ensure!(
old_timestamp.signed.meta.len() == 1,
error::TimestampMetaLengthSnafu {
version: old_timestamp.signed.version,
meta_length: old_timestamp.signed.meta.len(),
}
);
let old_snapshot_meta = old_timestamp.signed.meta.get("snapshot.json").context(
error::MissingSnapshotMetaSnafu {
version: old_timestamp.signed.version,
},
)?;
ensure!(
old_snapshot_meta.version <= snapshot_meta.version,
error::OlderSnapshotInTimestampSnafu {
snapshot_new: snapshot_meta.version,
timestamp_new: timestamp.signed.version,
snapshot_old: old_snapshot_meta.version,
timestamp_old: old_timestamp.signed.version,
}
);
}
}
if expiration_enforcement == ExpirationEnforcement::Safe {
check_expired(update_start, ×tamp.signed)?;
}
datastore.create("timestamp.json", ×tamp).await?;
Ok(timestamp)
}
#[expect(clippy::too_many_arguments, clippy::too_many_lines)]
async fn load_snapshot(
transport: &dyn Transport,
root: &Signed<Root>,
timestamp: &Signed<Timestamp>,
max_snapshot_size: u64,
datastore: &Datastore,
metadata_base_url: &Url,
expiration_enforcement: ExpirationEnforcement,
update_start: &DateTime<Utc>,
) -> Result<Signed<Snapshot>> {
let snapshot_meta =
timestamp
.signed
.meta
.get("snapshot.json")
.context(error::MetaMissingSnafu {
file: "snapshot.json",
role: RoleType::Timestamp,
})?;
let path = if root.signed.consistent_snapshot {
format!("{}.snapshot.json", snapshot_meta.version)
} else {
"snapshot.json".to_owned()
};
let url = metadata_base_url
.join(&path)
.with_context(|_| error::JoinUrlSnafu {
path: path.clone(),
url: metadata_base_url.clone(),
})?;
let stream = if let Some(hashes) = &snapshot_meta.hashes {
fetch_sha256(
transport,
url.clone(),
snapshot_meta.length.unwrap_or(max_snapshot_size),
"timestamp.json",
&hashes.sha256,
)
.await?
} else {
fetch_max_size(
transport,
url.clone(),
snapshot_meta.length.unwrap_or(max_snapshot_size),
"timestamp.json",
)
.await?
};
let data = stream
.into_vec()
.await
.context(error::TransportSnafu { url })?;
let snapshot: Signed<Snapshot> =
serde_json::from_slice(&data).context(error::ParseMetadataSnafu {
role: RoleType::Snapshot,
})?;
ensure!(
snapshot.signed.version == snapshot_meta.version,
error::VersionMismatchSnafu {
role: RoleType::Snapshot,
fetched: snapshot.signed.version,
expected: snapshot_meta.version
}
);
root.signed
.verify_role(&snapshot)
.context(error::VerifyMetadataSnafu {
role: RoleType::Snapshot,
})?;
ensure!(
snapshot.signed.meta.contains_key("targets.json"),
SnapshotTargetsMetaMissingSnafu {
version: snapshot.signed.version,
}
);
if let Some(Ok(old_snapshot)) = datastore
.bytes("snapshot.json")
.await?
.map(|b| serde_json::from_slice::<Signed<Snapshot>>(&b))
{
if root.signed.verify_role(&old_snapshot).is_ok() {
ensure!(
old_snapshot.signed.version <= snapshot.signed.version,
error::OlderMetadataSnafu {
role: RoleType::Snapshot,
current_version: old_snapshot.signed.version,
new_version: snapshot.signed.version
}
);
ensure!(
old_snapshot.signed.meta.contains_key("targets.json"),
error::SnapshotTargetsMetaMissingSnafu {
version: old_snapshot.signed.version,
}
);
for (name, meta) in &old_snapshot.signed.meta {
ensure!(
snapshot.signed.meta.contains_key(name),
error::SnapshotRoleMissingSnafu {
role: name,
old_version: old_snapshot.signed.version,
new_version: snapshot.signed.version,
}
);
let new_meta = snapshot.signed.meta.get(name).unwrap();
ensure!(
meta.version <= new_meta.version,
error::SnapshotRoleRollbackSnafu {
role: name,
old_role_version: meta.version,
old_snapshot_version: old_snapshot.signed.version,
new_role_version: new_meta.version,
new_snapshot_version: snapshot.signed.version,
}
);
}
if let Some(old_targets_meta) = old_snapshot.signed.meta.get("targets.json") {
let targets_meta =
snapshot
.signed
.meta
.get("targets.json")
.context(error::MetaMissingSnafu {
file: "targets.json",
role: RoleType::Snapshot,
})?;
ensure!(
old_targets_meta.version <= targets_meta.version,
error::OlderMetadataSnafu {
role: RoleType::Targets,
current_version: old_targets_meta.version,
new_version: targets_meta.version,
}
);
}
}
}
if expiration_enforcement == ExpirationEnforcement::Safe {
check_expired(update_start, &snapshot.signed)?;
}
datastore.create("snapshot.json", &snapshot).await?;
Ok(snapshot)
}
#[expect(clippy::too_many_arguments)]
async fn load_targets(
transport: &dyn Transport,
root: &Signed<Root>,
snapshot: &Signed<Snapshot>,
datastore: &Datastore,
max_targets_size: u64,
metadata_base_url: &Url,
expiration_enforcement: ExpirationEnforcement,
update_start: &DateTime<Utc>,
) -> Result<(
Signed<crate::schema::Targets>,
std::collections::HashMap<String, Vec<u8>>,
)> {
let targets_meta =
snapshot
.signed
.meta
.get("targets.json")
.context(error::MetaMissingSnafu {
file: "targets.json",
role: RoleType::Timestamp,
})?;
let path = if root.signed.consistent_snapshot {
format!("{}.targets.json", targets_meta.version)
} else {
"targets.json".to_owned()
};
let targets_url = metadata_base_url
.join(&path)
.with_context(|_| error::JoinUrlSnafu {
path,
url: metadata_base_url.clone(),
})?;
let (max_targets_file_size, specifier) = match targets_meta.length {
Some(length) => (length, "snapshot.json"),
None => (max_targets_size, "max_targets_size parameter"),
};
let stream = if let Some(hashes) = &targets_meta.hashes {
fetch_sha256(
transport,
targets_url.clone(),
max_targets_file_size,
specifier,
&hashes.sha256,
)
.await?
} else {
fetch_max_size(
transport,
targets_url.clone(),
max_targets_file_size,
specifier,
)
.await?
};
let data = stream
.into_vec()
.await
.context(error::TransportSnafu { url: targets_url })?;
let mut targets: Signed<crate::schema::Targets> =
serde_json::from_slice(&data).context(error::ParseMetadataSnafu {
role: RoleType::Targets,
})?;
root.signed
.verify_role(&targets)
.context(error::VerifyMetadataSnafu {
role: RoleType::Targets,
})?;
ensure!(
targets.signed.version == targets_meta.version,
error::VersionMismatchSnafu {
role: RoleType::Targets,
fetched: targets.signed.version,
expected: targets_meta.version
}
);
if expiration_enforcement == ExpirationEnforcement::Safe {
check_expired(update_start, &targets.signed)?;
}
datastore.create("targets.json", &targets).await?;
let mut delegated_metadata_bytes: std::collections::HashMap<String, Vec<u8>> =
std::collections::HashMap::new();
if let Some(delegations) = &mut targets.signed.delegations {
let mut loaded_roles: BTreeSet<String> = BTreeSet::new();
load_delegations(
transport,
snapshot,
root.signed.consistent_snapshot,
metadata_base_url,
max_targets_size,
delegations,
datastore,
&mut loaded_roles,
update_start,
expiration_enforcement,
&mut delegated_metadata_bytes,
)
.await?;
}
targets.signed.validate().context(error::InvalidPathSnafu)?;
Ok((targets, delegated_metadata_bytes))
}
#[expect(clippy::too_many_arguments)]
#[async_recursion]
#[allow(clippy::too_many_lines)]
async fn load_delegations(
transport: &dyn Transport,
snapshot: &Signed<Snapshot>,
consistent_snapshot: bool,
metadata_base_url: &Url,
max_targets_size: u64,
delegation: &mut Delegations,
datastore: &Datastore,
loaded_roles: &mut BTreeSet<String>,
update_start: &DateTime<Utc>,
expiration_enforcement: ExpirationEnforcement,
delegated_metadata_bytes: &mut std::collections::HashMap<String, Vec<u8>>,
) -> Result<()> {
let mut delegated_roles: HashMap<String, Option<Signed<crate::schema::Targets>>> =
HashMap::new();
for delegated_role in &delegation.roles {
if loaded_roles.contains(&delegated_role.name) {
continue;
}
let role_meta = snapshot
.signed
.meta
.get(&format!("{}.json", &delegated_role.name));
if role_meta.is_none() {
loaded_roles.insert(delegated_role.name.clone());
return Ok(());
}
let role_meta = role_meta.unwrap();
let path = if consistent_snapshot {
format!(
"{}.{}.json",
&role_meta.version,
encode_filename(&delegated_role.name)
)
} else {
format!("{}.json", encode_filename(&delegated_role.name))
};
let role_url = metadata_base_url
.join(&path)
.with_context(|_| error::JoinUrlSnafu {
path: path.clone(),
url: metadata_base_url.clone(),
})?;
let (max_delegated_file_size, specifier) = match role_meta.length {
Some(length) => (length, "snapshot.json"),
None => (max_targets_size, "max_targets_size parameter"),
};
let stream = if let Some(hashes) = &role_meta.hashes {
fetch_sha256(
transport,
role_url.clone(),
max_delegated_file_size,
specifier,
&hashes.sha256,
)
.await?
} else {
fetch_max_size(
transport,
role_url.clone(),
max_delegated_file_size,
specifier,
)
.await?
};
let data = stream
.into_vec()
.await
.context(error::TransportSnafu { url: role_url })?;
delegated_metadata_bytes.insert(delegated_role.name.clone(), data.clone());
let role: Signed<crate::schema::Targets> =
serde_json::from_slice(&data).context(error::ParseMetadataSnafu {
role: RoleType::Targets,
})?;
delegation
.verify_role(&role, &delegated_role.name)
.context(error::VerifyMetadataSnafu {
role: RoleType::Targets,
})?;
ensure!(
role.signed.version == role_meta.version,
error::VersionMismatchSnafu {
role: RoleType::Targets,
fetched: role.signed.version,
expected: role_meta.version
}
);
if expiration_enforcement == ExpirationEnforcement::Safe {
check_expired(update_start, &role.signed)?;
}
datastore.create(&path, &role).await?;
delegated_roles.insert(delegated_role.name.clone(), Some(role));
}
for delegated_role in &mut delegation.roles {
if loaded_roles.contains(&delegated_role.name) {
continue;
}
loaded_roles.insert(delegated_role.name.clone());
delegated_role.targets =
delegated_roles
.remove(&delegated_role.name)
.with_context(|| error::DelegatedRolesNotConsistentSnafu {
name: delegated_role.name.clone(),
})?;
if let Some(targets) = &mut delegated_role.targets {
if let Some(delegations) = &mut targets.signed.delegations {
load_delegations(
transport,
snapshot,
consistent_snapshot,
metadata_base_url,
max_targets_size,
delegations,
datastore,
loaded_roles,
update_start,
expiration_enforcement,
delegated_metadata_bytes,
)
.await?;
}
}
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn url_missing_trailing_slash() {
let parsed_url_without_trailing_slash =
parse_url(Url::parse("https://example.org/a/b/c").unwrap()).unwrap();
let parsed_url_with_trailing_slash =
parse_url(Url::parse("https://example.org/a/b/c/").unwrap()).unwrap();
assert_eq!(
parsed_url_without_trailing_slash,
parsed_url_with_trailing_slash
);
}
#[test]
fn expiration_enforcement_traits() {
let enforce = true;
let safe: ExpirationEnforcement = enforce.into();
assert_eq!(safe, ExpirationEnforcement::Safe);
let not_enforce = false;
let not_safe: ExpirationEnforcement = not_enforce.into();
assert_eq!(not_safe, ExpirationEnforcement::Unsafe);
let enforcing: bool = ExpirationEnforcement::Safe.into();
assert!(enforcing);
let non_enforcing: bool = ExpirationEnforcement::Unsafe.into();
assert!(!non_enforcing);
let default = ExpirationEnforcement::default();
assert_eq!(default, ExpirationEnforcement::Safe);
}
#[test]
fn encode_filename_1() {
let input = "../a";
let expected = "..%2Fa";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_2() {
let input = "";
let expected = "";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_3() {
let input = ".";
let expected = ".";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_4() {
let input = "/";
let expected = "%2F";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_5() {
let input = "ö";
let expected = "%C3%B6";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_6() {
let input = "!@#$%^&*()[]|\\~`'\";:.,><?/-_";
let expected =
"%21%40%23%24%25%5E%26%2A%28%29%5B%5D%7C%5C~%60%27%22%3B%3A.%2C%3E%3C%3F%2F-_";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_7() {
let input = "../../strange/role/../name";
let expected = "..%2F..%2Fstrange%2Frole%2F..%2Fname";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_8() {
let input = "../🍺/( ͡° ͜ʖ ͡°)";
let expected = "..%2F%F0%9F%8D%BA%2F%28%20%CD%A1%C2%B0%20%CD%9C%CA%96%20%CD%A1%C2%B0%29";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_9() {
let input = "ᚩ os, ᚱ rad, ᚳ cen, ᚷ gyfu, ᚹ ƿynn, ᚻ hægl, ...";
let expected = "%E1%9A%A9%20os%2C%20%E1%9A%B1%20rad%2C%20%E1%9A%B3%20cen%2C%20%E1%9A%B7%20gyfu%2C%20%E1%9A%B9%20%C6%BFynn%2C%20%E1%9A%BB%20h%C3%A6gl%2C%20...";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_10() {
let input = "../../path/like/dubious";
let expected = "..%2F..%2Fpath%2Flike%2Fdubious";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
#[test]
fn encode_filename_11() {
let input = "🍺/30";
let expected = "%F0%9F%8D%BA%2F30";
let actual = encode_filename(input);
assert_eq!(expected, actual);
}
}