use std::{cmp, error, fmt, ops};
use std::cmp::Ordering;
use std::collections::hash_map;
use std::collections::{HashMap, HashSet, VecDeque};
use std::convert::TryFrom;
use std::hash::Hash;
use std::net::{AddrParseError, IpAddr};
use std::num::ParseIntError;
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use std::time::{Duration, SystemTime};
use chrono::{DateTime, Utc};
use crossbeam_queue::SegQueue;
use log::{info, warn};
use rpki::repository::cert::ResourceCert;
use rpki::repository::resources::{
AsId, IpBlock, IpBlocks, IpBlocksBuilder, Prefix
};
use rpki::repository::roa::{
FriendlyRoaIpAddress, RouteOriginAttestation
};
use rpki::repository::tal::{Tal, TalInfo, TalUri};
use rpki::repository::x509::{Time, Validity};
use rpki::rtr::payload::{Action, Ipv4Prefix, Ipv6Prefix, Payload, Timing};
use rpki::rtr::server::VrpSource;
use rpki::rtr::state::{Serial, State};
use rpki::uri;
use serde::{Deserialize, Deserializer};
use crate::config::{Config, FilterPolicy};
use crate::engine::{CaCert, ProcessPubPoint, ProcessRun};
use crate::error::Failed;
use crate::metrics::{Metrics, VrpMetrics};
use crate::slurm::{ExceptionInfo, LocalExceptions};
#[derive(Debug, Default)]
pub struct ValidationReport {
pub_points: SegQueue<PubPoint>,
rejected: RejectedResourcesBuilder,
}
impl ValidationReport {
pub fn new() -> Self {
Default::default()
}
}
impl<'a> ProcessRun for &'a ValidationReport {
type PubPoint = PubPointProcessor<'a>;
fn process_ta(
&self,
_tal: &Tal, _uri: &TalUri, cert: &CaCert,
tal_index: usize,
) -> Result<Option<Self::PubPoint>, Failed> {
Ok(Some(
PubPointProcessor {
report: self,
pub_point: PubPoint::new_ta(cert, tal_index),
validity: cert.combined_validity(),
}
))
}
}
#[derive(Clone, Debug)]
pub struct PubPointProcessor<'a> {
report: &'a ValidationReport,
pub_point: PubPoint,
validity: Validity,
}
impl<'a> ProcessPubPoint for PubPointProcessor<'a> {
fn repository_index(&mut self, repository_index: usize) {
self.pub_point.repository_index = Some(repository_index)
}
fn update_refresh(&mut self, not_after: Time) {
self.pub_point.refresh = cmp::min(
self.pub_point.refresh, not_after
);
}
fn want(&self, _uri: &uri::Rsync) -> Result<bool, Failed> {
Ok(true)
}
fn process_ca(
&mut self, _uri: &uri::Rsync, cert: &CaCert,
) -> Result<Option<Self>, Failed> {
Ok(Some(
PubPointProcessor {
report: self.report,
pub_point: PubPoint::new_ca(&self.pub_point, cert),
validity: cert.combined_validity(),
}
))
}
fn process_roa(
&mut self,
_uri: &uri::Rsync,
cert: ResourceCert,
route: RouteOriginAttestation
) -> Result<(), Failed> {
self.pub_point.update_refresh(cert.validity().not_after());
self.pub_point.add_roa(
route, Arc::new(RoaInfo::new(&cert, self.validity))
);
Ok(())
}
fn restart(&mut self) -> Result<(), Failed> {
self.pub_point.restart();
Ok(())
}
fn commit(self) {
if !self.pub_point.is_empty() {
self.report.pub_points.push(self.pub_point);
}
}
fn cancel(self, cert: &CaCert) {
warn!(
"CA for {} rejected, resources marked as unsafe:",
cert.ca_repository()
);
for block in cert.cert().v4_resources().iter() {
warn!(" {}", block.display_v4());
}
for block in cert.cert().v6_resources().iter() {
warn!(" {}", block.display_v6());
}
for block in cert.cert().as_resources().iter() {
warn!(" {}", block);
}
self.report.rejected.extend_from_cert(cert);
}
}
#[derive(Clone, Debug)]
struct PubPoint {
origins: Vec<(RouteOrigin, Arc<RoaInfo>)>,
refresh: Time,
orig_refresh: Time,
tal_index: usize,
repository_index: Option<usize>,
}
impl PubPoint {
fn new_ta(cert: &CaCert, tal_index: usize) -> Self {
let refresh = cert.cert().validity().not_after();
PubPoint {
origins: Vec::new(),
refresh,
orig_refresh: refresh,
tal_index,
repository_index: None,
}
}
fn new_ca(parent: &PubPoint, cert: &CaCert) -> Self {
let refresh = cmp::min(
parent.refresh, cert.cert().validity().not_after()
);
PubPoint {
origins: Vec::new(),
refresh,
orig_refresh: refresh,
tal_index: parent.tal_index,
repository_index: None,
}
}
pub fn is_empty(&self) -> bool {
self.origins.is_empty()
}
fn update_refresh(&mut self, refresh: Time) {
self.refresh = cmp::min(self.refresh, refresh)
}
fn restart(&mut self) {
self.origins.clear();
self.refresh = self.orig_refresh;
}
fn add_roa(
&mut self,
roa: RouteOriginAttestation,
info: Arc<RoaInfo>,
) {
self.origins.extend(roa.iter().map(|prefix| {
(RouteOrigin::from_roa(roa.as_id(), prefix), info.clone())
}));
}
}
#[derive(Debug, Default)]
struct RejectedResourcesBuilder {
blocks: SegQueue<(bool, IpBlock)>,
}
impl RejectedResourcesBuilder {
fn extend_from_cert(&self, cert: &CaCert) {
for block in cert.cert().v4_resources().iter().filter(|block|
!block.is_slash_zero()
) {
self.blocks.push((true, block));
}
for block in cert.cert().v6_resources().iter().filter(|block|
!block.is_slash_zero()
) {
self.blocks.push((false, block));
}
}
fn finalize(self) -> RejectedResources {
let mut v4 = IpBlocksBuilder::new();
let mut v6 = IpBlocksBuilder::new();
while let Some((is_v4, block)) = self.blocks.pop() {
if is_v4 {
v4.push(block);
}
else {
v6.push(block);
}
}
RejectedResources {
v4: v4.finalize(),
v6: v6.finalize()
}
}
}
#[derive(Clone, Debug)]
pub struct RejectedResources {
v4: IpBlocks,
v6: IpBlocks
}
impl RejectedResources {
pub fn keep_prefix(&self, prefix: AddressPrefix) -> bool {
if prefix.is_v4() {
!self.v4.intersects_block(prefix)
}
else {
!self.v6.intersects_block(prefix)
}
}
}
#[derive(Clone, Debug)]
pub struct SharedHistory(Arc<RwLock<PayloadHistory>>);
impl SharedHistory {
pub fn from_config(config: &Config) -> Self {
SharedHistory(Arc::new(RwLock::new(
PayloadHistory::from_config(config)
)))
}
pub fn read(&self) -> impl ops::Deref<Target = PayloadHistory> + '_ {
self.0.read().expect("Payload history lock poisoned")
}
fn write(&self) -> impl ops::DerefMut<Target = PayloadHistory> + '_ {
self.0.write().expect("Payload history lock poisoned")
}
pub fn update(
&self,
report: ValidationReport,
exceptions: &LocalExceptions,
mut metrics: Metrics
) -> bool {
let snapshot = SnapshotBuilder::from_report(
report, exceptions, &mut metrics,
self.read().unsafe_vrps
);
let (current, serial) = {
let read = self.read();
(read.current(), read.serial())
};
let delta = current.as_ref().and_then(|current| {
PayloadDelta::construct(¤t.to_builder(), &snapshot, serial)
});
let mut history = self.write();
history.metrics = Some(metrics.into());
if let Some(delta) = delta {
info!(
"Delta with {} announced and {} withdrawn origins.",
delta.announced_origins.len(),
delta.withdrawn_origins.len(),
);
history.current = Some(snapshot.into_snapshot().into());
history.push_delta(delta);
true
}
else if current.is_none() {
history.current = Some(snapshot.into_snapshot().into());
true
}
else {
false
}
}
pub fn mark_update_start(&self) {
self.write().last_update_start = Utc::now();
}
pub fn mark_update_done(&self) {
let mut locked = self.write();
let now = Utc::now();
locked.last_update_done = Some(now);
locked.last_update_duration = Some(
now.signed_duration_since(locked.last_update_start)
.to_std().unwrap_or_else(|_| Duration::from_secs(0))
);
locked.next_update_start = SystemTime::now() + locked.refresh;
if let Some(refresh) = locked.current.as_ref().and_then(|c|
c.refresh()
) {
let refresh = SystemTime::from(refresh);
if refresh < locked.next_update_start {
locked.next_update_start = refresh;
}
}
locked.created = {
if let Some(created) = locked.created {
if now.timestamp() <= created.timestamp() {
Some(created + chrono::Duration::seconds(1))
}
else {
Some(now)
}
}
else {
Some(now)
}
};
}
}
impl VrpSource for SharedHistory {
type FullIter = SnapshotVrpIter;
type DiffIter = DeltaVrpIter;
fn ready(&self) -> bool {
self.read().is_active()
}
fn notify(&self) -> State {
let read = self.read();
State::from_parts(read.rtr_session(), read.serial())
}
fn full(&self) -> (State, Self::FullIter) {
let read = self.read();
(
State::from_parts(read.rtr_session(), read.serial()),
SnapshotVrpIter::new(read.current.clone().unwrap_or_default())
)
}
fn diff(&self, state: State) -> Option<(State, Self::DiffIter)> {
let read = self.read();
if read.rtr_session() != state.session() {
return None
}
read.delta_since(state.serial()).map(|delta| {
(
State::from_parts(read.rtr_session(), read.serial()),
DeltaVrpIter::new(delta)
)
})
}
fn timing(&self) -> Timing {
let read = self.read();
let mut res = read.timing;
res.refresh = u32::try_from(
read.update_wait().as_secs()
).unwrap_or(u32::MAX);
res
}
}
#[derive(Clone, Debug)]
pub struct PayloadHistory {
current: Option<Arc<PayloadSnapshot>>,
deltas: VecDeque<Arc<PayloadDelta>>,
metrics: Option<Arc<Metrics>>,
session: u64,
keep: usize,
refresh: Duration,
unsafe_vrps: FilterPolicy,
last_update_start: DateTime<Utc>,
last_update_done: Option<DateTime<Utc>>,
last_update_duration: Option<Duration>,
next_update_start: SystemTime,
created: Option<DateTime<Utc>>,
timing: Timing,
}
impl PayloadHistory {
pub fn from_config(config: &Config) -> Self {
PayloadHistory {
current: None,
deltas: VecDeque::with_capacity(config.history_size),
metrics: None,
session: {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH).unwrap()
.as_secs()
},
keep: config.history_size,
refresh: config.refresh,
unsafe_vrps: config.unsafe_vrps,
last_update_start: Utc::now(),
last_update_done: None,
last_update_duration: None,
next_update_start: SystemTime::now() + config.refresh,
created: None,
timing: Timing {
refresh: config.refresh.as_secs() as u32,
retry: config.retry.as_secs() as u32,
expire: config.expire.as_secs() as u32,
},
}
}
fn push_delta(&mut self, delta: PayloadDelta) {
if self.deltas.len() == self.keep {
let _ = self.deltas.pop_back();
}
self.deltas.push_front(Arc::new(delta))
}
pub fn is_active(&self) -> bool {
self.current.is_some()
}
pub fn current(&self) -> Option<Arc<PayloadSnapshot>> {
self.current.clone()
}
pub fn refresh_wait(&self) -> Duration {
self.next_update_start
.duration_since(SystemTime::now())
.unwrap_or_else(|_| Duration::from_secs(0))
}
pub fn update_wait(&self) -> Duration {
let start = match self.last_update_duration {
Some(duration) => self.next_update_start + duration + duration,
None => self.next_update_start + self.refresh
};
start.duration_since(SystemTime::now()).unwrap_or(self.refresh)
}
pub fn delta_since(&self, serial: Serial) -> Option<Arc<PayloadDelta>> {
if let Some(delta) = self.deltas.front() {
if delta.serial() < serial {
return None
}
else if delta.serial() == serial {
return Some(Arc::new(PayloadDelta::empty(serial)))
}
else if delta.serial() == serial.add(1) {
return Some(delta.clone())
}
}
else {
if serial == 0 {
return Some(Arc::new(PayloadDelta::empty(serial)))
}
else {
return None
}
};
let mut iter = self.deltas.iter().rev();
for delta in &mut iter {
match delta.serial().partial_cmp(&serial) {
Some(cmp::Ordering::Greater) => return None,
Some(cmp::Ordering::Equal) => break,
_ => continue
}
}
Some(DeltaMerger::from_iter(iter).into_delta())
}
pub fn serial(&self) -> Serial {
self.deltas.front().map(|delta| {
delta.serial()
}).unwrap_or_else(|| 0.into())
}
pub fn session(&self) -> u64 {
self.session
}
pub fn rtr_session(&self) -> u16 {
self.session as u16
}
pub fn metrics(&self) -> Option<Arc<Metrics>> {
self.metrics.clone()
}
pub fn last_update_start(&self) -> DateTime<Utc> {
self.last_update_start
}
pub fn last_update_done(&self) -> Option<DateTime<Utc>> {
self.last_update_done
}
pub fn last_update_duration(&self) -> Option<Duration> {
self.last_update_duration
}
pub fn created(&self) -> Option<DateTime<Utc>> {
self.created
}
}
#[derive(Clone, Debug)]
pub struct PayloadSnapshot {
origins: Vec<(RouteOrigin, OriginInfo)>,
created: DateTime<Utc>,
refresh: Option<Time>,
}
impl PayloadSnapshot {
pub fn new() -> Self {
Default::default()
}
pub fn from_report(
report: ValidationReport,
exceptions: &LocalExceptions,
metrics: &mut Metrics,
unsafe_vrps: FilterPolicy
) -> Self {
SnapshotBuilder::from_report(
report, exceptions, metrics, unsafe_vrps
).into_snapshot()
}
pub fn created(&self) -> DateTime<Utc> {
self.created
}
fn refresh(&self) -> Option<Time> {
self.refresh
}
pub fn origins(&self) -> &[(RouteOrigin, OriginInfo)] {
&self.origins
}
pub fn into_vrp_iter(self: Arc<Self>) -> SnapshotVrpIter {
SnapshotVrpIter::new(self)
}
fn to_builder(&self) -> SnapshotBuilder {
SnapshotBuilder {
origins: self.origins.iter().cloned().collect(),
created: self.created,
refresh: self.refresh,
}
}
}
impl Default for PayloadSnapshot {
fn default() -> Self {
PayloadSnapshot {
origins: Vec::new(),
created: Utc::now(),
refresh: None
}
}
}
impl AsRef<PayloadSnapshot> for PayloadSnapshot {
fn as_ref(&self) -> &Self {
self
}
}
#[derive(Clone, Debug)]
pub struct SnapshotVrpIter {
snapshot: Arc<PayloadSnapshot>,
pos: usize,
}
impl SnapshotVrpIter {
fn new(snapshot: Arc<PayloadSnapshot>) -> Self {
SnapshotVrpIter {
snapshot,
pos: 0
}
}
}
impl Iterator for SnapshotVrpIter {
type Item = Payload;
fn next(&mut self) -> Option<Self::Item> {
let res = self.snapshot.origins.get(self.pos)?;
self.pos += 1;
Some(res.0.to_payload())
}
}
#[derive(Clone, Debug)]
struct SnapshotBuilder {
origins: HashMap<RouteOrigin, OriginInfo>,
created: DateTime<Utc>,
refresh: Option<Time>,
}
impl SnapshotBuilder {
fn from_report(
report: ValidationReport,
exceptions: &LocalExceptions,
metrics: &mut Metrics,
unsafe_vrps: FilterPolicy
) -> Self {
let mut res = SnapshotBuilder {
origins: HashMap::new(),
created: metrics.time,
refresh: None
};
let rejected = report.rejected.finalize();
while let Some(pub_point) = report.pub_points.pop() {
res.update_refresh(pub_point.refresh);
let mut point_metrics = AllVrpMetrics::new(
metrics, pub_point.tal_index, pub_point.repository_index
);
for (origin, roa_info) in pub_point.origins {
point_metrics.update(|m| m.valid += 1);
if !rejected.keep_prefix(origin.prefix()) {
point_metrics.update(|m| m.marked_unsafe += 1);
if unsafe_vrps != FilterPolicy::Accept {
warn!(
"Filtering potentially unsafe VRP \
({}/{}-{}, {})",
origin.address(),
origin.address_length(),
origin.max_length(),
origin.as_id()
);
}
if unsafe_vrps == FilterPolicy::Reject {
continue
}
}
if !exceptions.keep_origin(origin) {
point_metrics.update(|m| m.locally_filtered += 1);
continue
}
match res.origins.entry(origin) {
hash_map::Entry::Vacant(entry) => {
entry.insert(roa_info.into());
point_metrics.update(|m| m.contributed += 1);
}
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_roa(roa_info);
point_metrics.update(|m| m.duplicate += 1);
}
}
}
}
for (origin, info) in exceptions.origin_assertions() {
match res.origins.entry(origin) {
hash_map::Entry::Vacant(entry) => {
entry.insert(info.into());
metrics.local.contributed += 1;
metrics.vrps.contributed += 1;
}
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_local(info);
metrics.local.duplicate += 1;
metrics.vrps.duplicate += 1;
}
}
}
res
}
fn update_refresh(&mut self, refresh: Time) {
self.refresh = match self.refresh {
Some(old) => Some(cmp::min(old, refresh)),
None => Some(refresh)
}
}
fn into_snapshot(self) -> PayloadSnapshot {
let mut origins: Vec<_> = self.origins.into_iter().collect();
origins.sort_by(|left, right| left.0.cmp(&right.0));
PayloadSnapshot {
origins,
created: self.created,
refresh: self.refresh,
}
}
}
impl Default for SnapshotBuilder {
fn default() -> Self {
SnapshotBuilder {
origins: HashMap::new(),
created: Utc::now(),
refresh: None
}
}
}
struct AllVrpMetrics<'a> {
tal: &'a mut VrpMetrics,
repo: Option<&'a mut VrpMetrics>,
all: &'a mut VrpMetrics,
}
impl<'a> AllVrpMetrics<'a> {
fn new(
metrics: &'a mut Metrics, tal_index: usize, repo_index: Option<usize>
) -> Self {
AllVrpMetrics {
tal: &mut metrics.tals[tal_index].vrps,
repo: match repo_index {
Some(index) => Some(&mut metrics.repositories[index].vrps),
None => None
},
all: &mut metrics.vrps,
}
}
fn update(&mut self, op: impl Fn(&mut VrpMetrics)) {
op(self.tal);
if let Some(ref mut repo) = self.repo {
op(repo)
}
op(self.all)
}
}
#[derive(Clone, Debug)]
pub struct PayloadDelta {
serial: Serial,
announced_origins: Vec<RouteOrigin>,
withdrawn_origins: Vec<RouteOrigin>,
}
impl PayloadDelta {
fn construct(
current: &SnapshotBuilder, next: &SnapshotBuilder, serial: Serial
) -> Option<Self> {
let announce = added_keys(&next.origins, ¤t.origins);
let withdraw = added_keys(¤t.origins, &next.origins);
if !announce.is_empty() || !withdraw.is_empty() {
Some(PayloadDelta {
serial: serial.add(1),
announced_origins: announce,
withdrawn_origins: withdraw,
})
}
else {
None
}
}
pub fn empty(serial: Serial) -> Self {
PayloadDelta {
serial,
announced_origins: Vec::new(),
withdrawn_origins: Vec::new(),
}
}
pub fn is_empty(&self) -> bool {
self.announced_origins.is_empty() && self.withdrawn_origins.is_empty()
}
pub fn serial(&self) -> Serial {
self.serial
}
pub fn announced_origins(&self) -> &[RouteOrigin] {
&self.announced_origins
}
pub fn withdrawn_origins(&self) -> &[RouteOrigin] {
&self.withdrawn_origins
}
}
#[derive(Clone, Debug)]
pub struct DeltaVrpIter {
delta: Arc<PayloadDelta>,
pos: Result<usize, usize>,
}
impl DeltaVrpIter {
fn new(delta: Arc<PayloadDelta>) -> Self {
DeltaVrpIter {
delta,
pos: Ok(0)
}
}
}
impl Iterator for DeltaVrpIter {
type Item = (Action, Payload);
fn next(&mut self) -> Option<Self::Item> {
match self.pos {
Ok(pos) => {
match self.delta.announced_origins.get(pos) {
Some(res) => {
self.pos = Ok(pos + 1);
Some((Action::Announce, res.to_payload()))
}
None => {
self.pos = Err(0);
self.next()
}
}
}
Err(pos) => {
match self.delta.withdrawn_origins.get(pos) {
Some(res) => {
self.pos = Err(pos + 1);
Some((Action::Withdraw, res.to_payload()))
}
None => None
}
}
}
}
}
#[derive(Clone, Debug, Default)]
struct DeltaMerger {
serial: Serial,
announced_origins: HashSet<RouteOrigin>,
withdrawn_origins: HashSet<RouteOrigin>,
}
impl DeltaMerger {
fn from_iter<'a>(
mut iter: impl Iterator<Item = &'a Arc<PayloadDelta>>
) -> Self {
let mut res = match iter.next() {
Some(delta) => Self::new(delta),
None => return Self::default()
};
for delta in iter {
res.merge(delta)
}
res
}
fn new(delta: &PayloadDelta) -> Self {
DeltaMerger {
serial: delta.serial,
announced_origins:
delta.announced_origins.iter().cloned().collect(),
withdrawn_origins:
delta.withdrawn_origins.iter().cloned().collect(),
}
}
fn merge(&mut self, delta: &PayloadDelta) {
self.serial = delta.serial;
for origin in &delta.announced_origins {
if !self.withdrawn_origins.remove(origin) {
self.announced_origins.insert(*origin);
}
}
for origin in &delta.withdrawn_origins {
if !self.announced_origins.remove(origin) {
self.withdrawn_origins.insert(*origin);
}
}
}
fn into_delta(self) -> Arc<PayloadDelta> {
Arc::new(PayloadDelta {
serial: self.serial,
announced_origins: self.announced_origins.into_iter().collect(),
withdrawn_origins: self.withdrawn_origins.into_iter().collect(),
})
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct RouteOrigin {
as_id: AsId,
prefix: AddressPrefix,
max_length: u8,
}
impl RouteOrigin {
pub fn new(
as_id: AsId,
prefix: AddressPrefix,
max_length: u8,
) -> Self {
RouteOrigin { as_id, prefix, max_length }
}
fn from_roa(as_id: AsId, prefix: FriendlyRoaIpAddress) -> Self {
Self::new(as_id, prefix.into(), prefix.max_length())
}
pub fn as_id(self) -> AsId {
self.as_id
}
pub fn prefix(self) -> AddressPrefix {
self.prefix
}
pub fn address(self) -> IpAddr {
self.prefix.address()
}
pub fn address_length(self) -> u8 {
self.prefix.address_length()
}
pub fn max_length(self) -> u8 {
self.max_length
}
pub fn to_payload(self) -> Payload {
match self.address() {
IpAddr::V4(addr) => {
Payload::V4(Ipv4Prefix {
prefix: addr,
prefix_len: self.address_length(),
max_len: self.max_length(),
asn: self.as_id().into(),
})
}
IpAddr::V6(addr) => {
Payload::V6(Ipv6Prefix {
prefix: addr,
prefix_len: self.address_length(),
max_len: self.max_length(),
asn: self.as_id().into(),
})
}
}
}
}
impl PartialOrd for RouteOrigin {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for RouteOrigin {
fn cmp(&self, other: &Self) -> Ordering {
match self.max_length.cmp(&other.max_length) {
Ordering::Less => return Ordering::Greater,
Ordering::Greater => return Ordering::Less,
Ordering::Equal => { }
}
match self.prefix.cmp(&other.prefix) {
Ordering::Less => return Ordering::Less,
Ordering::Greater => return Ordering::Greater,
Ordering::Equal => { }
}
self.as_id.cmp(&other.as_id)
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct AddressPrefix {
addr: IpAddr,
len: u8,
}
impl AddressPrefix {
pub fn new(addr: IpAddr, len: u8) -> Result<Self, AddressPrefixError> {
if (addr.is_ipv4() && len > 32) || len > 128 {
return Err(AddressPrefixError::LengthOverflow)
}
let res = AddressPrefix { addr, len };
if !res.is_host_zero() {
Err(AddressPrefixError::NonZeroHost)
}
else {
Ok(res)
}
}
fn is_host_zero(self) -> bool {
match self.addr {
IpAddr::V4(addr) => {
u32::from(addr).trailing_zeros()
>= 32u32.saturating_sub(self.len.into())
}
IpAddr::V6(addr) => {
u128::from(addr).trailing_zeros()
>= 128u32.saturating_sub(self.len.into())
}
}
}
pub fn is_v4(self) -> bool {
self.addr.is_ipv4()
}
pub fn is_v6(self) -> bool {
self.addr.is_ipv6()
}
pub fn address(self) -> IpAddr {
self.addr
}
pub fn address_length(self) -> u8 {
self.len
}
pub fn covers(self, other: Self) -> bool {
match (self.addr, other.addr) {
(IpAddr::V4(left), IpAddr::V4(right)) => {
if self.len > 31 && other.len > 31 {
left == right
}
else if self.len > other.len {
false
}
else {
let left = u32::from(left)
& !(::std::u32::MAX >> self.len);
let right = u32::from(right)
& !(::std::u32::MAX >> self.len);
left == right
}
}
(IpAddr::V6(left), IpAddr::V6(right)) => {
if self.len > 127 && other.len > 127 {
left == right
}
else if self.len > other.len {
false
}
else {
let left = u128::from(left)
& !(::std::u128::MAX >> self.len);
let right = u128::from(right)
& !(::std::u128::MAX >> self.len);
left == right
}
}
_ => false
}
}
}
impl From<FriendlyRoaIpAddress> for AddressPrefix {
fn from(addr: FriendlyRoaIpAddress) -> Self {
AddressPrefix {
addr: addr.address(),
len: addr.address_length(),
}
}
}
impl From<AddressPrefix> for IpBlock {
fn from(src: AddressPrefix) -> Self {
Prefix::new(src.addr, src.len).into()
}
}
impl FromStr for AddressPrefix {
type Err = ParsePrefixError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut iter = s.splitn(2, '/');
let addr = iter.next().ok_or(ParsePrefixError::Empty)?;
if addr.is_empty() {
return Err(ParsePrefixError::Empty)
}
let len = iter.next().ok_or(ParsePrefixError::MissingLen)?;
let addr = IpAddr::from_str(addr).map_err(
ParsePrefixError::InvalidAddr
)?;
let len = u8::from_str(len).map_err(
ParsePrefixError::InvalidLen
)?;
AddressPrefix::new(addr, len).map_err(
ParsePrefixError::InvalidPrefix
)
}
}
impl<'de> Deserialize<'de> for AddressPrefix {
fn deserialize<D: Deserializer<'de>>(
deserializer: D
) -> Result<Self, D::Error> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = AddressPrefix;
fn expecting(
&self, formatter: &mut fmt::Formatter
) -> fmt::Result {
write!(formatter, "a string with a IPv4 or IPv6 prefix")
}
fn visit_str<E: serde::de::Error>(
self, v: &str
) -> Result<Self::Value, E> {
AddressPrefix::from_str(v).map_err(E::custom)
}
}
deserializer.deserialize_str(Visitor)
}
}
impl fmt::Display for AddressPrefix {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}/{}", self.addr, self.len)
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum AddressPrefixError {
LengthOverflow,
NonZeroHost,
}
impl fmt::Display for AddressPrefixError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
AddressPrefixError::LengthOverflow => {
"prefix length too large"
}
AddressPrefixError::NonZeroHost => {
"non-zero host portion"
}
})
}
}
impl error::Error for AddressPrefixError { }
#[derive(Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum ParsePrefixError {
Empty,
MissingLen,
InvalidAddr(AddrParseError),
InvalidLen(ParseIntError),
InvalidPrefix(AddressPrefixError),
}
impl fmt::Display for ParsePrefixError{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ParsePrefixError::Empty => f.write_str("empty string"),
ParsePrefixError::MissingLen => {
f.write_str("missing length portion")
}
ParsePrefixError::InvalidAddr(err) => {
write!(f, "invalid address: {}", err)
}
ParsePrefixError::InvalidLen(err) => {
write!(f, "invalid length: {}", err)
}
ParsePrefixError::InvalidPrefix(err) => err.fmt(f),
}
}
}
impl error::Error for ParsePrefixError { }
#[derive(Clone, Debug)]
pub struct OriginInfo {
head: Result<Arc<RoaInfo>, Arc<ExceptionInfo>>,
tail: Option<Box<OriginInfo>>,
}
impl OriginInfo {
fn add_roa(&mut self, info: Arc<RoaInfo>) {
self.tail = Some(Box::new(OriginInfo {
head: Ok(info),
tail: self.tail.take()
}));
}
fn add_local(&mut self, info: Arc<ExceptionInfo>) {
self.tail = Some(Box::new(OriginInfo {
head: Err(info),
tail: self.tail.take()
}));
}
pub fn iter(&self) -> OriginInfoIter {
OriginInfoIter { info: Some(self) }
}
pub fn tal_name(&self) -> Option<&str> {
self.head.as_ref().map(|info| info.tal.name()).ok()
}
pub fn uri(&self) -> Option<&uri::Rsync> {
self.head.as_ref().ok().and_then(|info| info.uri.as_ref())
}
pub fn validity(&self) -> Option<Validity> {
self.head.as_ref().map(|info| info.roa_validity).ok()
}
pub fn roa_info(&self) -> Option<&RoaInfo> {
match self.head {
Ok(ref info) => Some(info),
Err(_) => None
}
}
pub fn exception_info(&self) -> Option<&ExceptionInfo> {
match self.head {
Ok(_) => None,
Err(ref info) => Some(info),
}
}
}
impl From<Arc<RoaInfo>> for OriginInfo {
fn from(src: Arc<RoaInfo>) -> Self {
OriginInfo { head: Ok(src), tail: None }
}
}
impl From<Arc<ExceptionInfo>> for OriginInfo {
fn from(src: Arc<ExceptionInfo>) -> Self {
OriginInfo { head: Err(src), tail: None }
}
}
impl<'a> IntoIterator for &'a OriginInfo {
type Item = &'a OriginInfo;
type IntoIter = OriginInfoIter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[derive(Clone, Debug)]
pub struct OriginInfoIter<'a> {
info: Option<&'a OriginInfo>,
}
impl<'a> Iterator for OriginInfoIter<'a> {
type Item = &'a OriginInfo;
fn next(&mut self) -> Option<Self::Item> {
let res = self.info?;
self.info = res.tail.as_ref().map(AsRef::as_ref);
Some(res)
}
}
#[derive(Clone, Debug)]
pub struct RoaInfo {
pub tal: Arc<TalInfo>,
pub uri: Option<uri::Rsync>,
pub roa_validity: Validity,
pub chain_validity: Validity,
}
impl RoaInfo {
fn new(cert: &ResourceCert, ca_validity: Validity) -> Self {
RoaInfo {
tal: cert.tal().clone(),
uri: cert.signed_object().cloned().map(|mut uri| {
uri.unshare(); uri
}),
roa_validity: cert.validity(),
chain_validity: cert.validity().trim(ca_validity),
}
}
}
fn added_keys<K: Copy + Hash + Eq, V>(
this: &HashMap<K, V>, other: &HashMap<K, V>
) -> Vec<K> {
this.keys().filter(|key| !other.contains_key(key)).cloned().collect()
}
#[cfg(test)]
mod test {
use super::*;
fn make_prefix(s: &str, l: u8) -> AddressPrefix {
AddressPrefix::new(s.parse().unwrap(), l).unwrap()
}
#[test]
fn address_prefix_covers_v4() {
let outer = make_prefix("10.0.0.0", 16);
let host_roa = make_prefix("10.0.0.0", 32);
let sibling = make_prefix("10.1.0.0", 16);
let inner_low = make_prefix("10.0.0.0", 24);
let inner_mid = make_prefix("10.0.61.0", 24);
let inner_hi = make_prefix("10.0.255.0", 24);
let supernet = make_prefix("10.0.0.0", 8);
assert!(!outer.covers(sibling));
assert!(outer.covers(inner_low));
assert!(outer.covers(inner_mid));
assert!(outer.covers(inner_hi));
assert!(!host_roa.covers(outer));
assert!(!outer.covers(supernet));
}
#[test]
fn address_prefix_covers_v6() {
let outer = make_prefix("2001:db8::", 32);
let host_roa = make_prefix("2001:db8::", 128);
let sibling = make_prefix("2001:db9::", 32);
let inner_low = make_prefix("2001:db8::", 48);
let inner_mid = make_prefix("2001:db8:8000::", 48);
let inner_hi = make_prefix("2001:db8:FFFF::", 48);
let supernet = make_prefix("2001::", 24);
assert!(!outer.covers(sibling));
assert!(outer.covers(inner_low));
assert!(outer.covers(inner_mid));
assert!(outer.covers(inner_hi));
assert!(!host_roa.covers(outer));
assert!(!outer.covers(supernet));
}
#[test]
fn payload_delta_construct() {
fn origin(as_id: u32, prefix: &str, max_len: u8) -> RouteOrigin {
RouteOrigin::new(
as_id.into(),
AddressPrefix::from_str(prefix).unwrap(),
max_len
)
}
let o0 = origin(10, "10.0.0.0/10", 10);
let o1 = origin(11, "10.0.0.0/11", 10);
let o2 = origin(12, "10.0.0.0/12", 10);
let o3 = origin(13, "10.0.0.0/13", 10);
let o4 = origin(14, "10.0.0.0/14", 10);
let info = OriginInfo::from(Arc::new(ExceptionInfo::default()));
let mut current = SnapshotBuilder::default();
current.origins.insert(o0, info.clone());
current.origins.insert(o1, info.clone());
current.origins.insert(o2, info.clone());
current.origins.insert(o3, info.clone());
let mut next = SnapshotBuilder::default();
next.origins.insert(o0, info.clone());
next.origins.insert(o2, info.clone());
next.origins.insert(o4, info);
let delta = PayloadDelta::construct(
¤t, &next, 12.into()
).unwrap();
assert_eq!(delta.serial, Serial::from(13));
let mut add: HashSet<_> = delta.announced_origins.into_iter().collect();
let mut sub: HashSet<_> = delta.withdrawn_origins.into_iter().collect();
assert!(add.remove(&o4));
assert!(add.is_empty());
assert!(sub.remove(&o1));
assert!(sub.remove(&o3));
assert!(sub.is_empty());
assert!(
PayloadDelta::construct(¤t, ¤t, 10.into()).is_none()
);
}
#[test]
fn fn_added_keys() {
use std::iter::FromIterator;
assert_eq!(
added_keys(
&HashMap::from_iter(
vec![(1, ()), (2, ()), (3, ()), (4, ())].into_iter()
),
&HashMap::from_iter(
vec![(2, ()), (4, ()), (5, ())].into_iter()
)
).into_iter().collect::<HashSet<_>>(),
HashSet::from_iter(vec![1, 3].into_iter()),
);
}
#[test]
fn prefix_from_str() {
assert_eq!(
AddressPrefix::from_str("0.0.0.0/0").unwrap(),
AddressPrefix::new(
IpAddr::from_str("0.0.0.0").unwrap(), 0
).unwrap()
);
assert_eq!(
AddressPrefix::from_str("10.0.0.0/8").unwrap(),
AddressPrefix::new(
IpAddr::from_str("10.0.0.0").unwrap(), 8
).unwrap()
);
assert_eq!(
AddressPrefix::from_str("10.0.0.0/32").unwrap(),
AddressPrefix::new(
IpAddr::from_str("10.0.0.0").unwrap(), 32
).unwrap()
);
assert_eq!(
AddressPrefix::from_str("::/0").unwrap(),
AddressPrefix::new(
IpAddr::from_str("::").unwrap(), 0
).unwrap()
);
assert_eq!(
AddressPrefix::from_str("2001:db8::/32").unwrap(),
AddressPrefix::new(
IpAddr::from_str("2001:db8::").unwrap(), 32
).unwrap()
);
assert_eq!(
AddressPrefix::from_str("2001:db8::/128").unwrap(),
AddressPrefix::new(
IpAddr::from_str("2001:db8::").unwrap(), 128
).unwrap()
);
assert_eq!(
AddressPrefix::from_str("").unwrap_err(),
ParsePrefixError::Empty
);
assert_eq!(
AddressPrefix::from_str("10.0.0.0").unwrap_err(),
ParsePrefixError::MissingLen
);
assert_eq!(
AddressPrefix::from_str("2001:db8::").unwrap_err(),
ParsePrefixError::MissingLen
);
assert_eq!(
AddressPrefix::from_str("10.0.0.0/33").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::LengthOverflow)
);
assert_eq!(
AddressPrefix::from_str("2001:db8::/129").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::LengthOverflow)
);
assert_eq!(
AddressPrefix::from_str("10.128.0.0/8").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::NonZeroHost)
);
assert_eq!(
AddressPrefix::from_str("10.0.0.1/8").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::NonZeroHost)
);
assert_eq!(
AddressPrefix::from_str("10.192.0.0/9").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::NonZeroHost)
);
assert_eq!(
AddressPrefix::from_str("2001:db8:8000::/32").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::NonZeroHost)
);
assert_eq!(
AddressPrefix::from_str("2001:db8::1/32").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::NonZeroHost)
);
assert_eq!(
AddressPrefix::from_str("2001:db8:4000::/33").unwrap_err(),
ParsePrefixError::InvalidPrefix(AddressPrefixError::NonZeroHost)
);
}
}