tor_netdir/lib.rs
1#![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_duration_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
46
47pub mod details;
48mod err;
49#[cfg(feature = "hs-common")]
50mod hsdir_params;
51#[cfg(feature = "hs-common")]
52mod hsdir_ring;
53pub mod params;
54mod weight;
55
56#[cfg(any(test, feature = "testing"))]
57pub mod testnet;
58#[cfg(feature = "testing")]
59pub mod testprovider;
60
61use async_trait::async_trait;
62#[cfg(feature = "hs-service")]
63use itertools::chain;
64use static_assertions::const_assert;
65use tor_error::warn_report;
66use tor_linkspec::{
67 ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
68};
69use tor_llcrypto as ll;
70use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
71use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
72use tor_netdoc::doc::netstatus::{self, MdConsensus, MdRouterStatus};
73#[cfg(feature = "hs-common")]
74use {hsdir_ring::HsDirRing, std::iter};
75
76use derive_more::{From, Into};
77use futures::{StreamExt, stream::BoxStream};
78use num_enum::{IntoPrimitive, TryFromPrimitive};
79use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
80use serde::Deserialize;
81use std::collections::HashMap;
82use std::net::IpAddr;
83use std::ops::Deref;
84use std::sync::Arc;
85use std::time::SystemTime;
86use strum::{EnumCount, EnumIter};
87use tracing::warn;
88use typed_index_collections::{TiSlice, TiVec};
89
90#[cfg(feature = "hs-common")]
91use {
92 itertools::Itertools,
93 std::collections::HashSet,
94 tor_error::{Bug, internal},
95 tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
96};
97
98pub use err::Error;
99pub use weight::WeightRole;
100/// A Result using the Error type from the tor-netdir crate
101pub type Result<T> = std::result::Result<T, Error>;
102
103#[cfg(feature = "hs-common")]
104pub use err::OnionDirLookupError;
105
106use params::NetParameters;
107#[cfg(feature = "geoip")]
108use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
109
110#[cfg(feature = "hs-common")]
111#[cfg_attr(docsrs, doc(cfg(feature = "hs-common")))]
112pub use hsdir_params::HsDirParams;
113
114/// Index into the consensus relays
115///
116/// This is an index into the list of relays returned by
117/// [`.c_relays()`](ConsensusRelays::c_relays)
118/// (on the corresponding consensus or netdir).
119///
120/// This is just a `usize` inside, but using a newtype prevents getting a relay index
121/// confused with other kinds of slice indices or counts.
122///
123/// If you are in a part of the code which needs to work with multiple consensuses,
124/// the typechecking cannot tell if you try to index into the wrong consensus.
125#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
126pub(crate) struct RouterStatusIdx(usize);
127
128/// Extension trait to provide index-type-safe `.c_relays()` method
129//
130// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
131// but that would be an API break there.
132pub(crate) trait ConsensusRelays {
133 /// Obtain the list of relays in the consensus
134 //
135 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus>;
136}
137impl ConsensusRelays for MdConsensus {
138 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
139 TiSlice::from_ref(MdConsensus::relays(self))
140 }
141}
142impl ConsensusRelays for NetDir {
143 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
144 self.consensus.c_relays()
145 }
146}
147
148/// Configuration for determining when two relays have addresses "too close" in
149/// the network.
150///
151/// Used by `Relay::low_level_details().in_same_subnet()`.
152#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
153#[serde(deny_unknown_fields)]
154pub struct SubnetConfig {
155 /// Consider IPv4 nodes in the same /x to be the same family.
156 ///
157 /// If this value is 0, all nodes with IPv4 addresses will be in the
158 /// same family. If this value is above 32, then no nodes will be
159 /// placed im the same family based on their IPv4 addresses.
160 subnets_family_v4: u8,
161 /// Consider IPv6 nodes in the same /x to be the same family.
162 ///
163 /// If this value is 0, all nodes with IPv6 addresses will be in the
164 /// same family. If this value is above 128, then no nodes will be
165 /// placed im the same family based on their IPv6 addresses.
166 subnets_family_v6: u8,
167}
168
169impl Default for SubnetConfig {
170 fn default() -> Self {
171 Self::new(16, 32)
172 }
173}
174
175impl SubnetConfig {
176 /// Construct a new SubnetConfig from a pair of bit prefix lengths.
177 ///
178 /// The values are clamped to the appropriate ranges if they are
179 /// out-of-bounds.
180 pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
181 Self {
182 subnets_family_v4,
183 subnets_family_v6,
184 }
185 }
186
187 /// Construct a new SubnetConfig such that addresses are not in the same
188 /// family with anything--not even with themselves.
189 pub fn no_addresses_match() -> SubnetConfig {
190 SubnetConfig {
191 subnets_family_v4: 33,
192 subnets_family_v6: 129,
193 }
194 }
195
196 /// Return true if the two addresses in the same subnet, according to this
197 /// configuration.
198 pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
199 match (a, b) {
200 (IpAddr::V4(a), IpAddr::V4(b)) => {
201 let bits = self.subnets_family_v4;
202 if bits > 32 {
203 return false;
204 }
205 let a = u32::from_be_bytes(a.octets());
206 let b = u32::from_be_bytes(b.octets());
207 (a >> (32 - bits)) == (b >> (32 - bits))
208 }
209 (IpAddr::V6(a), IpAddr::V6(b)) => {
210 let bits = self.subnets_family_v6;
211 if bits > 128 {
212 return false;
213 }
214 let a = u128::from_be_bytes(a.octets());
215 let b = u128::from_be_bytes(b.octets());
216 (a >> (128 - bits)) == (b >> (128 - bits))
217 }
218 _ => false,
219 }
220 }
221
222 /// Return true if any of the addresses in `a` shares a subnet with any of
223 /// the addresses in `b`, according to this configuration.
224 pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
225 where
226 T: tor_linkspec::HasAddrs,
227 U: tor_linkspec::HasAddrs,
228 {
229 a.addrs().iter().any(|aa| {
230 b.addrs()
231 .iter()
232 .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
233 })
234 }
235
236 /// Return a new subnet configuration that is the union of `self` and
237 /// `other`.
238 ///
239 /// That is, return a subnet configuration that puts all addresses in the
240 /// same subnet if and only if at least one of `self` and `other` would put
241 /// them in the same subnet.
242 pub fn union(&self, other: &Self) -> Self {
243 use std::cmp::min;
244 Self {
245 subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
246 subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
247 }
248 }
249}
250
251/// Configuration for which listed family information to use when deciding
252/// whether relays belong to the same family.
253///
254/// Derived from network parameters.
255#[derive(Clone, Copy, Debug)]
256pub struct FamilyRules {
257 /// If true, we use family information from lists of family members.
258 use_family_lists: bool,
259 /// If true, we use family information from lists of family IDs and from family certs.
260 use_family_ids: bool,
261}
262
263impl<'a> From<&'a NetParameters> for FamilyRules {
264 fn from(params: &'a NetParameters) -> Self {
265 FamilyRules {
266 use_family_lists: bool::from(params.use_family_lists),
267 use_family_ids: bool::from(params.use_family_ids),
268 }
269 }
270}
271
272impl FamilyRules {
273 /// Return a `FamilyRules` that will use all recognized kinds of family information.
274 pub fn all_family_info() -> Self {
275 Self {
276 use_family_lists: true,
277 use_family_ids: true,
278 }
279 }
280
281 /// Return a `FamilyRules` that will ignore all family information declared by relays.
282 pub fn ignore_declared_families() -> Self {
283 Self {
284 use_family_lists: false,
285 use_family_ids: false,
286 }
287 }
288
289 /// Configure this `FamilyRules` to use (or not use) family information from
290 /// lists of family members.
291 pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
292 self.use_family_lists = val;
293 self
294 }
295
296 /// Configure this `FamilyRules` to use (or not use) family information from
297 /// family IDs and family certs.
298 pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
299 self.use_family_ids = val;
300 self
301 }
302
303 /// Return a `FamilyRules` that will look at every source of information
304 /// requested by `self` or by `other`.
305 pub fn union(&self, other: &Self) -> Self {
306 Self {
307 use_family_lists: self.use_family_lists || other.use_family_lists,
308 use_family_ids: self.use_family_ids || other.use_family_ids,
309 }
310 }
311}
312
313/// An opaque type representing the weight with which a relay or set of
314/// relays will be selected for a given role.
315///
316/// Most users should ignore this type, and just use pick_relay instead.
317#[derive(
318 Copy,
319 Clone,
320 Debug,
321 derive_more::Add,
322 derive_more::Sum,
323 derive_more::AddAssign,
324 Eq,
325 PartialEq,
326 Ord,
327 PartialOrd,
328)]
329pub struct RelayWeight(u64);
330
331impl RelayWeight {
332 /// Try to divide this weight by `rhs`.
333 ///
334 /// Return a ratio on success, or None on division-by-zero.
335 pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
336 if rhs.0 == 0 {
337 None
338 } else {
339 Some((self.0 as f64) / (rhs.0 as f64))
340 }
341 }
342
343 /// Compute a ratio `frac` of this weight.
344 ///
345 /// Return None if frac is less than zero, since negative weights
346 /// are impossible.
347 pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
348 let product = (self.0 as f64) * frac;
349 if product >= 0.0 && product.is_finite() {
350 Some(RelayWeight(product as u64))
351 } else {
352 None
353 }
354 }
355}
356
357impl From<u64> for RelayWeight {
358 fn from(val: u64) -> Self {
359 RelayWeight(val)
360 }
361}
362
363/// An operation for which we might be requesting a hidden service directory.
364#[derive(Copy, Clone, Debug, PartialEq)]
365// TODO: make this pub(crate) once NetDir::hs_dirs is removed
366#[non_exhaustive]
367pub enum HsDirOp {
368 /// Uploading an onion service descriptor.
369 #[cfg(feature = "hs-service")]
370 Upload,
371 /// Downloading an onion service descriptor.
372 Download,
373}
374
375/// A view of the Tor directory, suitable for use in building circuits.
376///
377/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
378/// has its own properties, identity, and correct weighted probability for use
379/// under different circumstances.
380///
381/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
382/// document, and then adding enough microdescriptors to that `PartialNetDir` so
383/// that it can be used to build paths. (Thus, if you have a NetDir, it is
384/// definitely adequate to build paths.)
385///
386/// # "Usable" relays
387///
388/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays. Unless
389/// otherwise stated, a relay is "usable" if it is listed in the consensus,
390/// if we have full directory information for that relay (including a
391/// microdescriptor), and if that relay does not have any flags indicating that
392/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
393///
394/// # Limitations
395///
396/// The current NetDir implementation assumes fairly strongly that every relay
397/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
398/// by RSA identities, and that the Ed25519 identities are stored in
399/// microdescriptors.
400///
401/// If these assumptions someday change, then we'll have to revise the
402/// implementation.
403#[derive(Debug, Clone)]
404pub struct NetDir {
405 /// A microdescriptor consensus that lists the members of the network,
406 /// and maps each one to a 'microdescriptor' that has more information
407 /// about it
408 consensus: Arc<MdConsensus>,
409 /// A map from keys to integer values, distributed in the consensus,
410 /// and clamped to certain defaults.
411 params: NetParameters,
412 /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
413 mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
414 /// Map from SHA256 of _missing_ microdescriptors to the index of their
415 /// corresponding routerstatus.
416 rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
417 /// Map from ed25519 identity to index of the routerstatus.
418 ///
419 /// Note that we don't know the ed25519 identity of a relay until
420 /// we get the microdescriptor for it, so this won't be filled in
421 /// until we get the microdescriptors.
422 ///
423 /// # Implementation note
424 ///
425 /// For this field, and for `rsidx_by_rsa`,
426 /// it might be cool to have references instead.
427 /// But that would make this into a self-referential structure,
428 /// which isn't possible in safe rust.
429 rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
430 /// Map from RSA identity to index of the routerstatus.
431 ///
432 /// This is constructed at the same time as the NetDir object, so it
433 /// can be immutable.
434 rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
435
436 /// Hash ring(s) describing the onion service directory.
437 ///
438 /// This is empty in a PartialNetDir, and is filled in before the NetDir is
439 /// built.
440 //
441 // TODO hs: It is ugly to have this exist in a partially constructed state
442 // in a PartialNetDir.
443 // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
444 // or perhaps nothing at all, here.
445 #[cfg(feature = "hs-common")]
446 hsdir_rings: Arc<HsDirs<HsDirRing>>,
447
448 /// Weight values to apply to a given relay when deciding how frequently
449 /// to choose it for a given role.
450 weights: weight::WeightSet,
451
452 #[cfg(feature = "geoip")]
453 /// Country codes for each router in our consensus.
454 ///
455 /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
456 /// the country code at position zero in this array).
457 country_codes: Vec<Option<CountryCode>>,
458}
459
460/// Collection of hidden service directories (or parameters for them)
461///
462/// In [`NetDir`] this is used to store the actual hash rings.
463/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
464/// where only the `params` are populated, and the `ring` is empty.)
465///
466/// This same generic type is used as the return type from
467/// [`HsDirParams::compute`](HsDirParams::compute),
468/// where it contains the *parameters* for the primary and secondary rings.
469#[derive(Debug, Clone)]
470#[cfg(feature = "hs-common")]
471pub(crate) struct HsDirs<D> {
472 /// The current ring
473 ///
474 /// It corresponds to the time period containing the `valid-after` time in
475 /// the consensus. Its SRV is whatever SRV was most current at the time when
476 /// that time period began.
477 ///
478 /// This is the hash ring that we should use whenever we are fetching an
479 /// onion service descriptor.
480 current: D,
481
482 /// Secondary rings (based on the parameters for the previous and next time periods)
483 ///
484 /// Onion services upload to positions on these ring as well, based on how
485 /// far into the current time period this directory is, so that
486 /// not-synchronized clients can still find their descriptor.
487 ///
488 /// Note that with the current (2023) network parameters, with
489 /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
490 /// secondary rings will be active at a time. We have two here in order
491 /// to conform with a more flexible regime in proposal 342.
492 //
493 // TODO: hs clients never need this; so I've made it not-present for them.
494 // But does that risk too much with respect to side channels?
495 //
496 // TODO: Perhaps we should refactor this so that it is clear that these
497 // are immutable? On the other hand, the documentation for this type
498 // declares that it is immutable, so we are likely okay.
499 //
500 // TODO: this `Vec` is only ever 0,1,2 elements.
501 // Maybe it should be an ArrayVec or something.
502 #[cfg(feature = "hs-service")]
503 secondary: Vec<D>,
504}
505
506#[cfg(feature = "hs-common")]
507impl<D> HsDirs<D> {
508 /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
509 pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
510 HsDirs {
511 current: f(self.current),
512 #[cfg(feature = "hs-service")]
513 secondary: self.secondary.into_iter().map(f).collect(),
514 }
515 }
516
517 /// Iterate over some of the contained hsdirs, according to `secondary`
518 ///
519 /// The current ring is always included.
520 /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
521 fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
522 let i = iter::once(&self.current);
523
524 // With "hs-service" disabled, there are no secondary rings,
525 // so we don't care.
526 let _ = secondary;
527
528 #[cfg(feature = "hs-service")]
529 let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
530
531 i
532 }
533
534 /// Iterate over all the contained hsdirs
535 pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
536 self.iter_filter_secondary(true)
537 }
538
539 /// Iterate over the hsdirs relevant for `op`
540 pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
541 self.iter_filter_secondary(match op {
542 #[cfg(feature = "hs-service")]
543 HsDirOp::Upload => true,
544 HsDirOp::Download => false,
545 })
546 }
547}
548
549/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
550/// the status of its directory.
551#[derive(
552 Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
553)]
554#[non_exhaustive]
555#[repr(u16)]
556pub enum DirEvent {
557 /// A new consensus has been received, and has enough information to be
558 /// used.
559 ///
560 /// This event is also broadcast when a new set of consensus parameters is
561 /// available, even if that set of parameters comes from a configuration
562 /// change rather than from the latest consensus.
563 NewConsensus,
564
565 /// New descriptors have been received for the current consensus.
566 ///
567 /// (This event is _not_ broadcast when receiving new descriptors for a
568 /// consensus which is not yet ready to replace the current consensus.)
569 NewDescriptors,
570
571 /// We have received updated recommendations and requirements
572 /// for which subprotocols we should have to use the network.
573 NewProtocolRecommendation,
574}
575
576/// The network directory provider is shutting down without giving us the
577/// netdir we asked for.
578#[derive(Clone, Copy, Debug, thiserror::Error)]
579#[error("Network directory provider is shutting down")]
580#[non_exhaustive]
581pub struct NetdirProviderShutdown;
582
583impl tor_error::HasKind for NetdirProviderShutdown {
584 fn kind(&self) -> tor_error::ErrorKind {
585 tor_error::ErrorKind::ArtiShuttingDown
586 }
587}
588
589/// How "timely" must a network directory be?
590///
591/// This enum is used as an argument when requesting a [`NetDir`] object from
592/// [`NetDirProvider`] and other APIs, to specify how recent the information
593/// must be in order to be useful.
594#[derive(Copy, Clone, Eq, PartialEq, Debug)]
595#[allow(clippy::exhaustive_enums)]
596pub enum Timeliness {
597 /// The network directory must be strictly timely.
598 ///
599 /// That is, it must be based on a consensus that valid right now, with no
600 /// tolerance for skew or consensus problems.
601 ///
602 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
603 Strict,
604 /// The network directory must be roughly timely.
605 ///
606 /// This is, it must be be based on a consensus that is not _too_ far in the
607 /// future, and not _too_ far in the past.
608 ///
609 /// (The tolerances for "too far" will depend on configuration.)
610 ///
611 /// This is almost always the option that you want to use.
612 Timely,
613 /// Any network directory is permissible, regardless of how untimely.
614 ///
615 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
616 Unchecked,
617}
618
619/// An object that can provide [`NetDir`]s, as well as inform consumers when
620/// they might have changed.
621///
622/// It is the responsibility of the implementor of `NetDirProvider`
623/// to try to obtain an up-to-date `NetDir`,
624/// and continuously to maintain and update it.
625///
626/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
627/// as its `NetDirProvider`.
628#[async_trait]
629pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
630 /// Return a network directory that's live according to the provided
631 /// `timeliness`.
632 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
633
634 /// Return a reasonable netdir for general usage.
635 ///
636 /// This is an alias for
637 /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
638 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
639 self.netdir(Timeliness::Timely)
640 }
641
642 /// Return a new asynchronous stream that will receive notification
643 /// whenever the consensus has changed.
644 ///
645 /// Multiple events may be batched up into a single item: each time
646 /// this stream yields an event, all you can assume is that the event has
647 /// occurred at least once.
648 fn events(&self) -> BoxStream<'static, DirEvent>;
649
650 /// Return the latest network parameters.
651 ///
652 /// If we have no directory, return a reasonable set of defaults.
653 fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
654
655 /// Get a NetDir from `provider`, waiting until one exists.
656 async fn wait_for_netdir(
657 &self,
658 timeliness: Timeliness,
659 ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
660 if let Ok(nd) = self.netdir(timeliness) {
661 return Ok(nd);
662 }
663
664 let mut stream = self.events();
665 loop {
666 // We need to retry `self.netdir()` before waiting for any stream events, to
667 // avoid deadlock.
668 //
669 // We ignore all errors here: they can all potentially be fixed by
670 // getting a fresh consensus, and they will all get warned about
671 // by the NetDirProvider itself.
672 if let Ok(nd) = self.netdir(timeliness) {
673 return Ok(nd);
674 }
675 match stream.next().await {
676 Some(_) => {}
677 None => {
678 return Err(NetdirProviderShutdown);
679 }
680 }
681 }
682 }
683
684 /// Wait until `provider` lists `target`.
685 ///
686 /// NOTE: This might potentially wait indefinitely, if `target` is never actually
687 /// becomes listed in the directory. It will exit if the `NetDirProvider` shuts down.
688 async fn wait_for_netdir_to_list(
689 &self,
690 target: &tor_linkspec::RelayIds,
691 timeliness: Timeliness,
692 ) -> std::result::Result<(), NetdirProviderShutdown> {
693 let mut events = self.events();
694 loop {
695 // See if the desired relay is in the netdir.
696 //
697 // We do this before waiting for any events, to avoid race conditions.
698 {
699 let netdir = self.wait_for_netdir(timeliness).await?;
700 if netdir.ids_listed(target) == Some(true) {
701 return Ok(());
702 }
703 // If we reach this point, then ids_listed returned `Some(false)`,
704 // meaning "This relay is definitely not in the current directory";
705 // or it returned `None`, meaning "waiting for more information
706 // about this network directory.
707 // In both cases, it's reasonable to just wait for another netdir
708 // event and try again.
709 }
710 // We didn't find the relay; wait for the provider to have a new netdir
711 // or more netdir information.
712 if events.next().await.is_none() {
713 // The event stream is closed; the provider has shut down.
714 return Err(NetdirProviderShutdown);
715 }
716 }
717 }
718
719 /// Return the latest set of recommended and required protocols, if there is one.
720 ///
721 /// This may be more recent (or more available) than this provider's associated NetDir.
722 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
723}
724
725impl<T> NetDirProvider for Arc<T>
726where
727 T: NetDirProvider,
728{
729 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
730 self.deref().netdir(timeliness)
731 }
732
733 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
734 self.deref().timely_netdir()
735 }
736
737 fn events(&self) -> BoxStream<'static, DirEvent> {
738 self.deref().events()
739 }
740
741 fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
742 self.deref().params()
743 }
744
745 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
746 self.deref().protocol_statuses()
747 }
748}
749
750/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
751/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
752///
753/// This trait exists to work around a limitation in rust: when trait upcasting
754/// coercion is stable, this will be unnecessary.
755///
756/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
757pub trait UpcastArcNetDirProvider {
758 /// Return a view of this object as an `Arc<dyn NetDirProvider>`
759 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
760 where
761 Self: 'a;
762}
763
764impl<T> UpcastArcNetDirProvider for T
765where
766 T: NetDirProvider + Sized,
767{
768 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
769 where
770 Self: 'a,
771 {
772 self
773 }
774}
775
776impl AsRef<NetParameters> for NetDir {
777 fn as_ref(&self) -> &NetParameters {
778 self.params()
779 }
780}
781
782/// A partially build NetDir -- it can't be unwrapped until it has
783/// enough information to build safe paths.
784#[derive(Debug, Clone)]
785pub struct PartialNetDir {
786 /// The netdir that's under construction.
787 netdir: NetDir,
788
789 /// The previous netdir, if we had one
790 ///
791 /// Used as a cache, so we can reuse information
792 #[cfg(feature = "hs-common")]
793 prev_netdir: Option<Arc<NetDir>>,
794}
795
796/// A view of a relay on the Tor network, suitable for building circuits.
797// TODO: This should probably be a more specific struct, with a trait
798// that implements it.
799#[derive(Clone)]
800pub struct Relay<'a> {
801 /// A router descriptor for this relay.
802 rs: &'a netstatus::MdRouterStatus,
803 /// A microdescriptor for this relay.
804 md: &'a Microdesc,
805 /// The country code this relay is in, if we know one.
806 #[cfg(feature = "geoip")]
807 cc: Option<CountryCode>,
808}
809
810/// A relay that we haven't checked for validity or usability in
811/// routing.
812#[derive(Debug)]
813pub struct UncheckedRelay<'a> {
814 /// A router descriptor for this relay.
815 rs: &'a netstatus::MdRouterStatus,
816 /// A microdescriptor for this relay, if there is one.
817 md: Option<&'a Microdesc>,
818 /// The country code this relay is in, if we know one.
819 #[cfg(feature = "geoip")]
820 cc: Option<CountryCode>,
821}
822
823/// A partial or full network directory that we can download
824/// microdescriptors for.
825pub trait MdReceiver {
826 /// Return an iterator over the digests for all of the microdescriptors
827 /// that this netdir is missing.
828 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
829 /// Add a microdescriptor to this netdir, if it was wanted.
830 ///
831 /// Return true if it was indeed wanted.
832 fn add_microdesc(&mut self, md: Microdesc) -> bool;
833 /// Return the number of missing microdescriptors.
834 fn n_missing(&self) -> usize;
835}
836
837impl PartialNetDir {
838 /// Create a new PartialNetDir with a given consensus, and no
839 /// microdescriptors loaded.
840 ///
841 /// If `replacement_params` is provided, override network parameters from
842 /// the consensus with those from `replacement_params`.
843 pub fn new(
844 consensus: MdConsensus,
845 replacement_params: Option<&netstatus::NetParams<i32>>,
846 ) -> Self {
847 Self::new_inner(
848 consensus,
849 replacement_params,
850 #[cfg(feature = "geoip")]
851 None,
852 )
853 }
854
855 /// Create a new PartialNetDir with GeoIP support.
856 ///
857 /// This does the same thing as `new()`, except the provided GeoIP database is used to add
858 /// country codes to relays.
859 #[cfg(feature = "geoip")]
860 #[cfg_attr(docsrs, doc(cfg(feature = "geoip")))]
861 pub fn new_with_geoip(
862 consensus: MdConsensus,
863 replacement_params: Option<&netstatus::NetParams<i32>>,
864 geoip_db: &GeoipDb,
865 ) -> Self {
866 Self::new_inner(consensus, replacement_params, Some(geoip_db))
867 }
868
869 /// Implementation of the `new()` functions.
870 fn new_inner(
871 consensus: MdConsensus,
872 replacement_params: Option<&netstatus::NetParams<i32>>,
873 #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
874 ) -> Self {
875 let mut params = NetParameters::default();
876
877 // (We ignore unrecognized options here, since they come from
878 // the consensus, and we don't expect to recognize everything
879 // there.)
880 let _ = params.saturating_update(consensus.params().iter());
881
882 // Now see if the user has any parameters to override.
883 // (We have to do this now, or else changes won't be reflected in our
884 // weights.)
885 if let Some(replacement) = replacement_params {
886 for u in params.saturating_update(replacement.iter()) {
887 warn!("Unrecognized option: override_net_params.{}", u);
888 }
889 }
890
891 // Compute the weights we'll want to use for these relays.
892 let weights = weight::WeightSet::from_consensus(&consensus, ¶ms);
893
894 let n_relays = consensus.c_relays().len();
895
896 let rsidx_by_missing = consensus
897 .c_relays()
898 .iter_enumerated()
899 .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
900 .collect();
901
902 let rsidx_by_rsa = consensus
903 .c_relays()
904 .iter_enumerated()
905 .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
906 .collect();
907
908 #[cfg(feature = "geoip")]
909 let country_codes = if let Some(db) = geoip_db {
910 consensus
911 .c_relays()
912 .iter()
913 .map(|rs| {
914 db.lookup_country_code_multi(rs.addrs().iter().map(|x| x.ip()))
915 .cloned()
916 })
917 .collect()
918 } else {
919 Default::default()
920 };
921
922 #[cfg(feature = "hs-common")]
923 let hsdir_rings = Arc::new({
924 let params = HsDirParams::compute(&consensus, ¶ms).expect("Invalid consensus!");
925 // TODO: It's a bit ugly to use expect above, but this function does
926 // not return a Result. On the other hand, the error conditions under which
927 // HsDirParams::compute can return Err are _very_ narrow and hard to
928 // hit; see documentation in that function. As such, we probably
929 // don't need to have this return a Result.
930
931 params.map(HsDirRing::empty_from_params)
932 });
933
934 let netdir = NetDir {
935 consensus: Arc::new(consensus),
936 params,
937 mds: vec![None; n_relays].into(),
938 rsidx_by_missing,
939 rsidx_by_rsa: Arc::new(rsidx_by_rsa),
940 rsidx_by_ed: HashMap::with_capacity(n_relays),
941 #[cfg(feature = "hs-common")]
942 hsdir_rings,
943 weights,
944 #[cfg(feature = "geoip")]
945 country_codes,
946 };
947
948 PartialNetDir {
949 netdir,
950 #[cfg(feature = "hs-common")]
951 prev_netdir: None,
952 }
953 }
954
955 /// Return the declared lifetime of this PartialNetDir.
956 pub fn lifetime(&self) -> &netstatus::Lifetime {
957 self.netdir.lifetime()
958 }
959
960 /// Record a previous netdir, which can be used for reusing cached information
961 //
962 // Fills in as many missing microdescriptors as possible in this
963 // netdir, using the microdescriptors from the previous netdir.
964 //
965 // With HS enabled, stores the netdir for reuse of relay hash ring index values.
966 #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
967 pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
968 for md in prev.mds.iter().flatten() {
969 self.netdir.add_arc_microdesc(md.clone());
970 }
971
972 #[cfg(feature = "hs-common")]
973 {
974 self.prev_netdir = Some(prev);
975 }
976 }
977
978 /// Compute the hash ring(s) for this NetDir
979 #[cfg(feature = "hs-common")]
980 fn compute_rings(&mut self) {
981 let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
982 .expect("Invalid consensus");
983 // TODO: see TODO by similar expect in new()
984
985 self.netdir.hsdir_rings =
986 Arc::new(params.map(|params| {
987 HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
988 }));
989 }
990
991 /// Return true if this are enough information in this directory
992 /// to build multihop paths.
993 pub fn have_enough_paths(&self) -> bool {
994 self.netdir.have_enough_paths()
995 }
996 /// If this directory has enough information to build multihop
997 /// circuits, return it.
998 pub fn unwrap_if_sufficient(
999 #[allow(unused_mut)] mut self,
1000 ) -> std::result::Result<NetDir, PartialNetDir> {
1001 if self.netdir.have_enough_paths() {
1002 #[cfg(feature = "hs-common")]
1003 self.compute_rings();
1004 Ok(self.netdir)
1005 } else {
1006 Err(self)
1007 }
1008 }
1009}
1010
1011impl MdReceiver for PartialNetDir {
1012 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1013 self.netdir.missing_microdescs()
1014 }
1015 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1016 self.netdir.add_microdesc(md)
1017 }
1018 fn n_missing(&self) -> usize {
1019 self.netdir.n_missing()
1020 }
1021}
1022
1023impl NetDir {
1024 /// Return the declared lifetime of this NetDir.
1025 pub fn lifetime(&self) -> &netstatus::Lifetime {
1026 self.consensus.lifetime()
1027 }
1028
1029 /// Add `md` to this NetDir.
1030 ///
1031 /// Return true if we wanted it, and false otherwise.
1032 fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1033 if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1034 assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1035
1036 // There should never be two approved MDs in the same
1037 // consensus listing the same ID... but if there is,
1038 // we'll let the most recent one win.
1039 self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1040
1041 // Happy path: we did indeed want this one.
1042 self.mds[rsidx] = Some(md);
1043
1044 // Save some space in the missing-descriptor list.
1045 if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1046 self.rsidx_by_missing.shrink_to_fit();
1047 }
1048
1049 return true;
1050 }
1051
1052 // Either we already had it, or we never wanted it at all.
1053 false
1054 }
1055
1056 /// Construct a (possibly invalid) Relay object from a routerstatus and its
1057 /// index within the consensus.
1058 fn relay_from_rs_and_rsidx<'a>(
1059 &'a self,
1060 rs: &'a netstatus::MdRouterStatus,
1061 rsidx: RouterStatusIdx,
1062 ) -> UncheckedRelay<'a> {
1063 debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1064 let md = self.mds[rsidx].as_deref();
1065 if let Some(md) = md {
1066 debug_assert_eq!(rs.md_digest(), md.digest());
1067 }
1068
1069 UncheckedRelay {
1070 rs,
1071 md,
1072 #[cfg(feature = "geoip")]
1073 cc: self.country_codes.get(rsidx.0).copied().flatten(),
1074 }
1075 }
1076
1077 /// Return the value of the hsdir_n_replicas param.
1078 #[cfg(feature = "hs-common")]
1079 fn n_replicas(&self) -> u8 {
1080 self.params
1081 .hsdir_n_replicas
1082 .get()
1083 .try_into()
1084 .expect("BoundedInt did not enforce bounds")
1085 }
1086
1087 /// Return the spread parameter for the specified `op`.
1088 #[cfg(feature = "hs-common")]
1089 fn spread(&self, op: HsDirOp) -> usize {
1090 let spread = match op {
1091 HsDirOp::Download => self.params.hsdir_spread_fetch,
1092 #[cfg(feature = "hs-service")]
1093 HsDirOp::Upload => self.params.hsdir_spread_store,
1094 };
1095
1096 spread
1097 .get()
1098 .try_into()
1099 .expect("BoundedInt did not enforce bounds!")
1100 }
1101
1102 /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1103 ///
1104 /// Algorithm:
1105 ///
1106 /// for idx in 1..=n_replicas:
1107 /// - let H = hsdir_ring::onion_service_index(id, replica, rand,
1108 /// period).
1109 /// - Find the position of H within hsdir_ring.
1110 /// - Take elements from hsdir_ring starting at that position,
1111 /// adding them to Dirs until we have added `spread` new elements
1112 /// that were not there before.
1113 #[cfg(feature = "hs-common")]
1114 fn select_hsdirs<'h, 'r: 'h>(
1115 &'r self,
1116 hsid: HsBlindId,
1117 ring: &'h HsDirRing,
1118 spread: usize,
1119 ) -> impl Iterator<Item = Relay<'r>> + 'h {
1120 let n_replicas = self.n_replicas();
1121
1122 (1..=n_replicas) // 1-indexed !
1123 .flat_map({
1124 let mut selected_nodes = HashSet::new();
1125
1126 move |replica: u8| {
1127 let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1128
1129 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1130 // According to rend-spec 2.2.3:
1131 // ... If any of those
1132 // nodes have already been selected for a lower-numbered replica of the
1133 // service, any nodes already chosen are disregarded (i.e. skipped over)
1134 // when choosing a replica's hsdir_spread_store nodes.
1135 selected_nodes.insert(*hsdir_idx)
1136 })
1137 .collect::<Vec<_>>()
1138 }
1139 })
1140 .filter_map(move |(_hsdir_idx, rs_idx)| {
1141 // This ought not to be None but let's not panic or bail if it is
1142 self.relay_by_rs_idx(*rs_idx)
1143 })
1144 }
1145
1146 /// Replace the overridden parameters in this netdir with `new_replacement`.
1147 ///
1148 /// After this function is done, the netdir's parameters will be those in
1149 /// the consensus, overridden by settings from `new_replacement`. Any
1150 /// settings in the old replacement parameters will be discarded.
1151 pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1152 // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1153 let mut new_params = NetParameters::default();
1154 let _ = new_params.saturating_update(self.consensus.params().iter());
1155 for u in new_params.saturating_update(new_replacement.iter()) {
1156 warn!("Unrecognized option: override_net_params.{}", u);
1157 }
1158
1159 self.params = new_params;
1160 }
1161
1162 /// Return an iterator over all Relay objects, including invalid ones
1163 /// that we can't use.
1164 pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1165 // TODO: I'd like if we could memoize this so we don't have to
1166 // do so many hashtable lookups.
1167 self.c_relays()
1168 .iter_enumerated()
1169 .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1170 }
1171 /// Return an iterator over all [usable](NetDir#usable) Relays.
1172 pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1173 self.all_relays().filter_map(UncheckedRelay::into_relay)
1174 }
1175
1176 /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1177 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1178 pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1179 self.mds.get(rsidx)?.as_deref()
1180 }
1181
1182 /// Return a relay matching a given identity, if we have a
1183 /// _usable_ relay with that key.
1184 ///
1185 /// (Does not return [unusable](NetDir#usable) relays.)
1186 ///
1187 ///
1188 /// Note that a `None` answer is not always permanent: if a microdescriptor
1189 /// is subsequently added for a relay with this ID, the ID may become usable
1190 /// even if it was not usable before.
1191 pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1192 where
1193 T: Into<RelayIdRef<'a>>,
1194 {
1195 let id = id.into();
1196 let answer = match id {
1197 RelayIdRef::Ed25519(ed25519) => {
1198 let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1199 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1200
1201 self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1202 }
1203 RelayIdRef::Rsa(rsa) => self
1204 .by_rsa_id_unchecked(rsa)
1205 .and_then(UncheckedRelay::into_relay)?,
1206 other_type => self.relays().find(|r| r.has_identity(other_type))?,
1207 };
1208 assert!(answer.has_identity(id));
1209 Some(answer)
1210 }
1211
1212 /// Obtain a `Relay` given a `RouterStatusIdx`
1213 ///
1214 /// Differs from `relay_from_rs_and_rsi` as follows:
1215 /// * That function expects the caller to already have an `MdRouterStatus`;
1216 /// it checks with `debug_assert` that the relay in the netdir matches.
1217 /// * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1218 /// * That function returns an `UncheckedRelay`; this one a `Relay`.
1219 ///
1220 /// `None` could be returned here, even with a valid `rsi`,
1221 /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1222 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1223 pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1224 let rs = self.c_relays().get(rs_idx)?;
1225 let md = self.mds.get(rs_idx)?.as_deref();
1226 UncheckedRelay {
1227 rs,
1228 md,
1229 #[cfg(feature = "geoip")]
1230 cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1231 }
1232 .into_relay()
1233 }
1234
1235 /// Return a relay with the same identities as those in `target`, if one
1236 /// exists.
1237 ///
1238 /// Does not return [unusable](NetDir#usable) relays.
1239 ///
1240 /// Note that a negative result from this method is not necessarily permanent:
1241 /// it may be the case that a relay exists,
1242 /// but we don't yet have enough information about it to know all of its IDs.
1243 /// To test whether a relay is *definitely* absent,
1244 /// use [`by_ids_detailed`](Self::by_ids_detailed)
1245 /// or [`ids_listed`](Self::ids_listed).
1246 ///
1247 /// # Limitations
1248 ///
1249 /// This will be very slow if `target` does not have an Ed25519 or RSA
1250 /// identity.
1251 pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1252 where
1253 T: HasRelayIds + ?Sized,
1254 {
1255 let mut identities = target.identities();
1256 // Don't try if there are no identities.
1257 let first_id = identities.next()?;
1258
1259 // Since there is at most one relay with each given ID type,
1260 // we only need to check the first relay we find.
1261 let candidate = self.by_id(first_id)?;
1262 if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1263 Some(candidate)
1264 } else {
1265 None
1266 }
1267 }
1268
1269 /// Check whether there is a relay that has at least one identity from
1270 /// `target`, and which _could_ have every identity from `target`.
1271 /// If so, return such a relay.
1272 ///
1273 /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1274 ///
1275 /// Return `RelayLookupError::Impossible` if we found a relay with at least
1276 /// one identity from `target`, but that relay's other identities contradict
1277 /// what we learned from `target`.
1278 ///
1279 /// Does not return [unusable](NetDir#usable) relays.
1280 ///
1281 /// (This function is only useful if you need to distinguish the
1282 /// "impossible" case from the "no such relay known" case.)
1283 ///
1284 /// # Limitations
1285 ///
1286 /// This will be very slow if `target` does not have an Ed25519 or RSA
1287 /// identity.
1288 //
1289 // TODO HS: This function could use a better name.
1290 //
1291 // TODO: We could remove the feature restriction here once we think this API is
1292 // stable.
1293 #[cfg(feature = "hs-common")]
1294 pub fn by_ids_detailed<T>(
1295 &self,
1296 target: &T,
1297 ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1298 where
1299 T: HasRelayIds + ?Sized,
1300 {
1301 let candidate = target
1302 .identities()
1303 // Find all the relays that share any identity with this set of identities.
1304 .filter_map(|id| self.by_id(id))
1305 // We might find the same relay more than once under a different
1306 // identity, so we remove the duplicates.
1307 //
1308 // Since there is at most one relay per rsa identity per consensus,
1309 // this is a true uniqueness check under current construction rules.
1310 .unique_by(|r| r.rs.rsa_identity())
1311 // If we find two or more distinct relays, then have a contradiction.
1312 .at_most_one()
1313 .map_err(|_| RelayLookupError::Impossible)?;
1314
1315 // If we have no candidate, return None early.
1316 let candidate = match candidate {
1317 Some(relay) => relay,
1318 None => return Ok(None),
1319 };
1320
1321 // Now we know we have a single candidate. Make sure that it does not have any
1322 // identity that does not match the target.
1323 if target
1324 .identities()
1325 .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1326 None => true,
1327 Some(id) => id == wanted_id,
1328 })
1329 {
1330 Ok(Some(candidate))
1331 } else {
1332 Err(RelayLookupError::Impossible)
1333 }
1334 }
1335
1336 /// Return a boolean if this consensus definitely has (or does not have) a
1337 /// relay matching the listed identities.
1338 ///
1339 /// `Some(true)` indicates that the relay exists.
1340 /// `Some(false)` indicates that the relay definitely does not exist.
1341 /// `None` indicates that we can't yet tell whether such a relay exists,
1342 /// due to missing information.
1343 fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1344 let r = self.by_rsa_id_unchecked(rsa_id);
1345 match r {
1346 Some(unchecked) => {
1347 if !unchecked.rs.ed25519_id_is_usable() {
1348 return Some(false);
1349 }
1350 // If md is present, then it's listed iff we have the right
1351 // ed id. Otherwise we don't know if it's listed.
1352 unchecked.md.map(|md| md.ed25519_id() == ed_id)
1353 }
1354 None => {
1355 // Definitely not listed.
1356 Some(false)
1357 }
1358 }
1359 }
1360
1361 /// Check whether a relay exists (or may exist)
1362 /// with the same identities as those in `target`.
1363 ///
1364 /// `Some(true)` indicates that the relay exists.
1365 /// `Some(false)` indicates that the relay definitely does not exist.
1366 /// `None` indicates that we can't yet tell whether such a relay exists,
1367 /// due to missing information.
1368 pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1369 where
1370 T: HasRelayIds + ?Sized,
1371 {
1372 let rsa_id = target.rsa_identity();
1373 let ed25519_id = target.ed_identity();
1374
1375 // TODO: If we later support more identity key types, this will
1376 // become incorrect. This assertion might help us recognize that case.
1377 const_assert!(RelayIdType::COUNT == 2);
1378
1379 match (rsa_id, ed25519_id) {
1380 (Some(r), Some(e)) => self.id_pair_listed(e, r),
1381 (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1382 (None, Some(e)) => {
1383 if self.rsidx_by_ed.contains_key(e) {
1384 Some(true)
1385 } else {
1386 None
1387 }
1388 }
1389 (None, None) => None,
1390 }
1391 }
1392
1393 /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1394 ///
1395 /// This API can be used to find information about a relay that is listed in
1396 /// the current consensus, even if we don't yet have enough information
1397 /// (like a microdescriptor) about the relay to use it.
1398 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1399 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1400 fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1401 let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1402 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1403 assert_eq!(rs.rsa_identity(), rsa_id);
1404 Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1405 }
1406 /// Return the relay with a given RSA identity, if we have one
1407 /// and it is [usable](NetDir#usable).
1408 fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1409 self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1410 }
1411 /// Return true if `rsa_id` is listed in this directory, even if it isn't
1412 /// currently usable.
1413 ///
1414 /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1415 /// directory information.)
1416 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1417 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1418 fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1419 self.by_rsa_id_unchecked(rsa_id).is_some()
1420 }
1421
1422 /// List the hsdirs in this NetDir, that should be in the HSDir rings
1423 ///
1424 /// The results are not returned in any particular order.
1425 #[cfg(feature = "hs-common")]
1426 fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1427 self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1428 let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1429 relay.is_hsdir_for_ring().then_some(())?;
1430 let relay = relay.into_relay()?;
1431 Some((rsidx, relay))
1432 })
1433 }
1434
1435 /// Return the parameters from the consensus, clamped to the
1436 /// correct ranges, with defaults filled in.
1437 ///
1438 /// NOTE: that unsupported parameters aren't returned here; only those
1439 /// values configured in the `params` module are available.
1440 pub fn params(&self) -> &NetParameters {
1441 &self.params
1442 }
1443
1444 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1445 /// network's current requirements and recommendations for the list of
1446 /// protocols that every relay must implement.
1447 //
1448 // TODO HS: I am not sure this is the right API; other alternatives would be:
1449 // * To expose the _required_ relay protocol list instead (since that's all that
1450 // onion service implementations need).
1451 // * To expose the client protocol list as well (for symmetry).
1452 // * To expose the MdConsensus instead (since that's more general, although
1453 // it restricts the future evolution of this API).
1454 //
1455 // I think that this is a reasonably good compromise for now, but I'm going
1456 // to put it behind the `hs-common` feature to give us time to consider more.
1457 #[cfg(feature = "hs-common")]
1458 pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1459 self.consensus.relay_protocol_status()
1460 }
1461
1462 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1463 /// network's current requirements and recommendations for the list of
1464 /// protocols that every relay must implement.
1465 //
1466 // TODO HS: See notes on relay_protocol_status above.
1467 #[cfg(feature = "hs-common")]
1468 pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1469 self.consensus.client_protocol_status()
1470 }
1471
1472 /// Return weighted the fraction of relays we can use. We only
1473 /// consider relays that match the predicate `usable`. We weight
1474 /// this bandwidth according to the provided `role`.
1475 ///
1476 /// If _no_ matching relays in the consensus have a nonzero
1477 /// weighted bandwidth value, we fall back to looking at the
1478 /// unweighted fraction of matching relays.
1479 ///
1480 /// If there are no matching relays in the consensus, we return 0.0.
1481 fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1482 where
1483 F: Fn(&UncheckedRelay<'a>) -> bool,
1484 {
1485 let mut total_weight = 0_u64;
1486 let mut have_weight = 0_u64;
1487 let mut have_count = 0_usize;
1488 let mut total_count = 0_usize;
1489
1490 for r in self.all_relays() {
1491 if !usable(&r) {
1492 continue;
1493 }
1494 let w = self.weights.weight_rs_for_role(r.rs, role);
1495 total_weight += w;
1496 total_count += 1;
1497 if r.is_usable() {
1498 have_weight += w;
1499 have_count += 1;
1500 }
1501 }
1502
1503 if total_weight > 0 {
1504 // The consensus lists some weighted bandwidth so return the
1505 // fraction of the weighted bandwidth for which we have
1506 // descriptors.
1507 (have_weight as f64) / (total_weight as f64)
1508 } else if total_count > 0 {
1509 // The consensus lists no weighted bandwidth for these relays,
1510 // but at least it does list relays. Return the fraction of
1511 // relays for which it we have descriptors.
1512 (have_count as f64) / (total_count as f64)
1513 } else {
1514 // There are no relays of this kind in the consensus. Return
1515 // 0.0, to avoid dividing by zero and giving NaN.
1516 0.0
1517 }
1518 }
1519 /// Return the estimated fraction of possible paths that we have
1520 /// enough microdescriptors to build.
1521 fn frac_usable_paths(&self) -> f64 {
1522 // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1523 // is_flagged_stable() checks here. This will require spec clarification.
1524 let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1525 u.low_level_details().is_suitable_as_guard()
1526 });
1527 let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1528 let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1529 self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1530 } else {
1531 // If there are no exits at all, we use f_m here.
1532 f_m
1533 };
1534 f_g * f_m * f_e
1535 }
1536 /// Return true if there is enough information in this NetDir to build
1537 /// multihop circuits.
1538 fn have_enough_paths(&self) -> bool {
1539 // TODO-A001: This should check for our guards as well, and
1540 // make sure that if they're listed in the consensus, we have
1541 // the descriptors for them.
1542
1543 // If we can build a randomly chosen path with at least this
1544 // probability, we know enough information to participate
1545 // on the network.
1546
1547 let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1548
1549 // What fraction of paths can we build?
1550 let available = self.frac_usable_paths();
1551
1552 available >= min_frac_paths
1553 }
1554 /// Choose a relay at random.
1555 ///
1556 /// Each relay is chosen with probability proportional to its weight
1557 /// in the role `role`, and is only selected if the predicate `usable`
1558 /// returns true for it.
1559 ///
1560 /// This function returns None if (and only if) there are no relays
1561 /// with nonzero weight where `usable` returned true.
1562 //
1563 // TODO this API, with the `usable` closure, invites mistakes where we fail to
1564 // check conditions that are implied by the role we have selected for the relay:
1565 // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1566 // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1567 // be renamed.) -Diziet
1568 pub fn pick_relay<'a, R, P>(
1569 &'a self,
1570 rng: &mut R,
1571 role: WeightRole,
1572 usable: P,
1573 ) -> Option<Relay<'a>>
1574 where
1575 R: rand::Rng,
1576 P: FnMut(&Relay<'a>) -> bool,
1577 {
1578 let relays: Vec<_> = self.relays().filter(usable).collect();
1579 // This algorithm uses rand::distr::WeightedIndex, and uses
1580 // gives O(n) time and space to build the index, plus O(log n)
1581 // sampling time.
1582 //
1583 // We might be better off building a WeightedIndex in advance
1584 // for each `role`, and then sampling it repeatedly until we
1585 // get a relay that satisfies `usable`. Or we might not --
1586 // that depends heavily on the actual particulars of our
1587 // inputs. We probably shouldn't make any changes there
1588 // unless profiling tells us that this function is in a hot
1589 // path.
1590 //
1591 // The C Tor sampling implementation goes through some trouble
1592 // here to try to make its path selection constant-time. I
1593 // believe that there is no actual remotely exploitable
1594 // side-channel here however. It could be worth analyzing in
1595 // the future.
1596 //
1597 // This code will give the wrong result if the total of all weights
1598 // can exceed u64::MAX. We make sure that can't happen when we
1599 // set up `self.weights`.
1600 match relays[..].choose_weighted(rng, |r| self.weights.weight_rs_for_role(r.rs, role)) {
1601 Ok(relay) => Some(relay.clone()),
1602 Err(WeightError::InsufficientNonZero) => {
1603 if relays.is_empty() {
1604 None
1605 } else {
1606 warn!(?self.weights, ?role,
1607 "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1608 relays.len());
1609 relays.choose(rng).cloned()
1610 }
1611 }
1612 Err(e) => {
1613 warn_report!(e, "Unexpected error while sampling a relay");
1614 None
1615 }
1616 }
1617 }
1618
1619 /// Choose `n` relay at random.
1620 ///
1621 /// Each relay is chosen with probability proportional to its weight
1622 /// in the role `role`, and is only selected if the predicate `usable`
1623 /// returns true for it.
1624 ///
1625 /// Relays are chosen without replacement: no relay will be
1626 /// returned twice. Therefore, the resulting vector may be smaller
1627 /// than `n` if we happen to have fewer than `n` appropriate relays.
1628 ///
1629 /// This function returns an empty vector if (and only if) there
1630 /// are no relays with nonzero weight where `usable` returned
1631 /// true.
1632 #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1633 pub fn pick_n_relays<'a, R, P>(
1634 &'a self,
1635 rng: &mut R,
1636 n: usize,
1637 role: WeightRole,
1638 usable: P,
1639 ) -> Vec<Relay<'a>>
1640 where
1641 R: rand::Rng,
1642 P: FnMut(&Relay<'a>) -> bool,
1643 {
1644 let relays: Vec<_> = self.relays().filter(usable).collect();
1645 // NOTE: See discussion in pick_relay().
1646 let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1647 self.weights.weight_rs_for_role(r.rs, role) as f64
1648 }) {
1649 Err(WeightError::InsufficientNonZero) => {
1650 // Too few relays had nonzero weights: return all of those that are okay.
1651 // (This is behavior used to come up with rand 0.9; it no longer does.
1652 // We still detect it.)
1653 let remaining: Vec<_> = relays
1654 .iter()
1655 .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1656 .cloned()
1657 .collect();
1658 if remaining.is_empty() {
1659 warn!(?self.weights, ?role,
1660 "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1661 relays.len());
1662 if relays.len() >= n {
1663 relays.choose_multiple(rng, n).cloned().collect()
1664 } else {
1665 relays
1666 }
1667 } else {
1668 warn!(?self.weights, ?role,
1669 "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1670 remaining.len(), relays.len());
1671 remaining
1672 }
1673 }
1674 Err(e) => {
1675 warn_report!(e, "Unexpected error while sampling a set of relays");
1676 Vec::new()
1677 }
1678 Ok(iter) => {
1679 let selection: Vec<_> = iter.map(Relay::clone).collect();
1680 if selection.len() < n && selection.len() < relays.len() {
1681 warn!(?self.weights, ?role,
1682 "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1683 and having {filtered_len} available after filtering. See bug #1907.",
1684 returned=selection.len(), filtered_len=relays.len());
1685 }
1686 selection
1687 }
1688 };
1689 relays.shuffle(rng);
1690 relays
1691 }
1692
1693 /// Compute the weight with which `relay` will be selected for a given
1694 /// `role`.
1695 pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1696 RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1697 }
1698
1699 /// Compute the total weight with which any relay matching `usable`
1700 /// will be selected for a given `role`.
1701 ///
1702 /// Note: because this function is used to assess the total
1703 /// properties of the consensus, the `usable` predicate takes a
1704 /// [`MdRouterStatus`] rather than a [`Relay`].
1705 pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1706 where
1707 P: Fn(&UncheckedRelay<'_>) -> bool,
1708 {
1709 self.all_relays()
1710 .filter_map(|unchecked| {
1711 if usable(&unchecked) {
1712 Some(RelayWeight(
1713 self.weights.weight_rs_for_role(unchecked.rs, role),
1714 ))
1715 } else {
1716 None
1717 }
1718 })
1719 .sum()
1720 }
1721
1722 /// Compute the weight with which a relay with ID `rsa_id` would be
1723 /// selected for a given `role`.
1724 ///
1725 /// Note that weight returned by this function assumes that the
1726 /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1727 /// then other weight-related functions will call its weight zero.
1728 pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1729 self.by_rsa_id_unchecked(rsa_id)
1730 .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1731 }
1732
1733 /// Return all relays in this NetDir known to be in the same family as
1734 /// `relay`.
1735 ///
1736 /// This list of members will **not** necessarily include `relay` itself.
1737 ///
1738 /// # Limitations
1739 ///
1740 /// Two relays only belong to the same family if _each_ relay
1741 /// claims to share a family with the other. But if we are
1742 /// missing a microdescriptor for one of the relays listed by this
1743 /// relay, we cannot know whether it acknowledges family
1744 /// membership with this relay or not. Therefore, this function
1745 /// can omit family members for which there is not (as yet) any
1746 /// Relay object.
1747 pub fn known_family_members<'a>(
1748 &'a self,
1749 relay: &'a Relay<'a>,
1750 ) -> impl Iterator<Item = Relay<'a>> {
1751 let relay_rsa_id = relay.rsa_id();
1752 relay.md.family().members().filter_map(move |other_rsa_id| {
1753 self.by_rsa_id(other_rsa_id)
1754 .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1755 })
1756 }
1757
1758 /// Return the current hidden service directory "time period".
1759 ///
1760 /// Specifically, this returns the time period that contains the beginning
1761 /// of the validity period of this `NetDir`'s consensus. That time period
1762 /// is the one we use when acting as an hidden service client.
1763 #[cfg(feature = "hs-common")]
1764 pub fn hs_time_period(&self) -> TimePeriod {
1765 self.hsdir_rings.current.time_period()
1766 }
1767
1768 /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1769 ///
1770 /// This includes the current time period (as from
1771 /// [`.hs_time_period`](NetDir::hs_time_period))
1772 /// plus additional time periods that we publish descriptors for when we are
1773 /// acting as a hidden service.
1774 #[cfg(feature = "hs-service")]
1775 pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1776 self.hsdir_rings
1777 .iter()
1778 .map(|r| r.params().clone())
1779 .collect()
1780 }
1781
1782 /// Return the relays in this network directory that will be used as hidden service directories
1783 ///
1784 /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1785 #[cfg(feature = "hs-common")]
1786 pub fn hs_dirs_download<'r, R>(
1787 &'r self,
1788 hsid: HsBlindId,
1789 period: TimePeriod,
1790 rng: &mut R,
1791 ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1792 where
1793 R: rand::Rng,
1794 {
1795 // Algorithm:
1796 //
1797 // 1. Determine which HsDirRing to use, based on the time period.
1798 // 2. Find the shared random value that's associated with that HsDirRing.
1799 // 3. Choose spread = the parameter `hsdir_spread_fetch`
1800 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1801 // 5. Initialize Dirs = []
1802 // 6. for idx in 1..=n_replicas:
1803 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1804 // period).
1805 // - Find the position of H within hsdir_ring.
1806 // - Take elements from hsdir_ring starting at that position,
1807 // adding them to Dirs until we have added `spread` new elements
1808 // that were not there before.
1809 // 7. Shuffle Dirs
1810 // 8. return Dirs.
1811
1812 let spread = self.spread(HsDirOp::Download);
1813
1814 // When downloading, only look at relays on current ring.
1815 let ring = &self.hsdir_rings.current;
1816
1817 if ring.params().time_period != period {
1818 return Err(internal!(
1819 "our current ring is not associated with the requested time period!"
1820 ));
1821 }
1822
1823 let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1824
1825 // When downloading, the order of the returned relays is random.
1826 hs_dirs.shuffle(rng);
1827
1828 Ok(hs_dirs)
1829 }
1830
1831 /// Return the relays in this network directory that will be used as hidden service directories
1832 ///
1833 /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1834 /// given time period.
1835 #[cfg(feature = "hs-service")]
1836 pub fn hs_dirs_upload(
1837 &self,
1838 hsid: HsBlindId,
1839 period: TimePeriod,
1840 ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1841 // Algorithm:
1842 //
1843 // 1. Choose spread = the parameter `hsdir_spread_store`
1844 // 2. Determine which HsDirRing to use, based on the time period.
1845 // 3. Find the shared random value that's associated with that HsDirRing.
1846 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1847 // 5. Initialize Dirs = []
1848 // 6. for idx in 1..=n_replicas:
1849 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1850 // period).
1851 // - Find the position of H within hsdir_ring.
1852 // - Take elements from hsdir_ring starting at that position,
1853 // adding them to Dirs until we have added `spread` new elements
1854 // that were not there before.
1855 // 3. return Dirs.
1856 let spread = self.spread(HsDirOp::Upload);
1857
1858 // For each HsBlindId, determine which HsDirRing to use.
1859 let rings = self
1860 .hsdir_rings
1861 .iter()
1862 .filter_map(move |ring| {
1863 // Make sure the ring matches the TP of the hsid it's matched with.
1864 (ring.params().time_period == period).then_some((ring, hsid, period))
1865 })
1866 .collect::<Vec<_>>();
1867
1868 // The specified period should have an associated ring.
1869 if !rings.iter().any(|(_, _, tp)| *tp == period) {
1870 return Err(internal!(
1871 "the specified time period does not have an associated ring"
1872 ));
1873 };
1874
1875 // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1876 // selecting replicas from each ring.
1877 Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1878 assert_eq!(period, ring.params().time_period());
1879 self.select_hsdirs(hsid, ring, spread)
1880 }))
1881 }
1882
1883 /// Return the relays in this network directory that will be used as hidden service directories
1884 ///
1885 /// Depending on `op`,
1886 /// these are suitable to either store, or retrieve, a
1887 /// given onion service's descriptor at a given time period.
1888 ///
1889 /// When `op` is `Download`, the order is random.
1890 /// When `op` is `Upload`, the order is not specified.
1891 ///
1892 /// Return an error if the time period is not one returned by
1893 /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1894 //
1895 // TODO: make HsDirOp pub(crate) once this is removed
1896 #[cfg(feature = "hs-common")]
1897 #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1898 pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1899 where
1900 R: rand::Rng,
1901 {
1902 // Algorithm:
1903 //
1904 // 1. Determine which HsDirRing to use, based on the time period.
1905 // 2. Find the shared random value that's associated with that HsDirRing.
1906 // 3. Choose spread = the parameter `hsdir_spread_store` or
1907 // `hsdir_spread_fetch` based on `op`.
1908 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1909 // 5. Initialize Dirs = []
1910 // 6. for idx in 1..=n_replicas:
1911 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1912 // period).
1913 // - Find the position of H within hsdir_ring.
1914 // - Take elements from hsdir_ring starting at that position,
1915 // adding them to Dirs until we have added `spread` new elements
1916 // that were not there before.
1917 // 7. return Dirs.
1918 let n_replicas = self
1919 .params
1920 .hsdir_n_replicas
1921 .get()
1922 .try_into()
1923 .expect("BoundedInt did not enforce bounds");
1924
1925 let spread = match op {
1926 HsDirOp::Download => self.params.hsdir_spread_fetch,
1927 #[cfg(feature = "hs-service")]
1928 HsDirOp::Upload => self.params.hsdir_spread_store,
1929 };
1930
1931 let spread = spread
1932 .get()
1933 .try_into()
1934 .expect("BoundedInt did not enforce bounds!");
1935
1936 // TODO: I may be wrong here but I suspect that this function may
1937 // need refactoring so that it does not look at _all_ of the HsDirRings,
1938 // but only at the ones that corresponds to time periods for which
1939 // HsBlindId is valid. Or I could be mistaken, in which case we should
1940 // have a comment to explain why I am, since the logic is subtle.
1941 // (For clients, there is only one ring.) -nickm
1942 //
1943 // (Actually, there is no need to follow through with the above TODO,
1944 // since this function is deprecated, and not used anywhere but the
1945 // tests.)
1946
1947 let mut hs_dirs = self
1948 .hsdir_rings
1949 .iter_for_op(op)
1950 .cartesian_product(1..=n_replicas) // 1-indexed !
1951 .flat_map({
1952 let mut selected_nodes = HashSet::new();
1953
1954 move |(ring, replica): (&HsDirRing, u8)| {
1955 let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
1956
1957 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1958 // According to rend-spec 2.2.3:
1959 // ... If any of those
1960 // nodes have already been selected for a lower-numbered replica of the
1961 // service, any nodes already chosen are disregarded (i.e. skipped over)
1962 // when choosing a replica's hsdir_spread_store nodes.
1963 selected_nodes.insert(*hsdir_idx)
1964 })
1965 .collect::<Vec<_>>()
1966 }
1967 })
1968 .filter_map(|(_hsdir_idx, rs_idx)| {
1969 // This ought not to be None but let's not panic or bail if it is
1970 self.relay_by_rs_idx(*rs_idx)
1971 })
1972 .collect_vec();
1973
1974 match op {
1975 HsDirOp::Download => {
1976 // When `op` is `Download`, the order is random.
1977 hs_dirs.shuffle(rng);
1978 }
1979 #[cfg(feature = "hs-service")]
1980 HsDirOp::Upload => {
1981 // When `op` is `Upload`, the order is not specified.
1982 }
1983 }
1984
1985 hs_dirs
1986 }
1987}
1988
1989impl MdReceiver for NetDir {
1990 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1991 Box::new(self.rsidx_by_missing.keys())
1992 }
1993 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1994 self.add_arc_microdesc(Arc::new(md))
1995 }
1996 fn n_missing(&self) -> usize {
1997 self.rsidx_by_missing.len()
1998 }
1999}
2000
2001impl<'a> UncheckedRelay<'a> {
2002 /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
2003 ///
2004 /// Callers should generally avoid using this information directly if they can;
2005 /// it's better to use a higher-level function that exposes semantic information
2006 /// rather than these properties.
2007 pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2008 details::UncheckedRelayDetails(self)
2009 }
2010
2011 /// Return true if this relay is valid and [usable](NetDir#usable).
2012 ///
2013 /// This function should return `true` for every Relay we expose
2014 /// to the user.
2015 pub fn is_usable(&self) -> bool {
2016 // No need to check for 'valid' or 'running': they are implicit.
2017 self.md.is_some() && self.rs.ed25519_id_is_usable()
2018 }
2019 /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2020 pub fn into_relay(self) -> Option<Relay<'a>> {
2021 if self.is_usable() {
2022 Some(Relay {
2023 rs: self.rs,
2024 md: self.md?,
2025 #[cfg(feature = "geoip")]
2026 cc: self.cc,
2027 })
2028 } else {
2029 None
2030 }
2031 }
2032
2033 /// Return true if this relay is a hidden service directory
2034 ///
2035 /// Ie, if it is to be included in the hsdir ring.
2036 #[cfg(feature = "hs-common")]
2037 pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2038 // TODO are there any other flags should we check?
2039 // rend-spec-v3 2.2.3 says just
2040 // "each node listed in the current consensus with the HSDir flag"
2041 // Do we need to check ed25519_id_is_usable ?
2042 // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2043 self.rs.is_flagged_hsdir()
2044 }
2045}
2046
2047impl<'a> Relay<'a> {
2048 /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2049 ///
2050 /// Callers should generally avoid using this information directly if they can;
2051 /// it's better to use a higher-level function that exposes semantic information
2052 /// rather than these properties.
2053 pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2054 details::RelayDetails(self)
2055 }
2056
2057 /// Return the Ed25519 ID for this relay.
2058 pub fn id(&self) -> &Ed25519Identity {
2059 self.md.ed25519_id()
2060 }
2061 /// Return the RsaIdentity for this relay.
2062 pub fn rsa_id(&self) -> &RsaIdentity {
2063 self.rs.rsa_identity()
2064 }
2065
2066 /// Return a reference to this relay's "router status" entry in
2067 /// the consensus.
2068 ///
2069 /// The router status entry contains information about the relay
2070 /// that the authorities voted on directly. For most use cases,
2071 /// you shouldn't need them.
2072 ///
2073 /// This function is only available if the crate was built with
2074 /// its `experimental-api` feature.
2075 #[cfg(feature = "experimental-api")]
2076 pub fn rs(&self) -> &netstatus::MdRouterStatus {
2077 self.rs
2078 }
2079 /// Return a reference to this relay's "microdescriptor" entry in
2080 /// the consensus.
2081 ///
2082 /// A "microdescriptor" is a synopsis of the information about a relay,
2083 /// used to determine its capabilities and route traffic through it.
2084 /// For most use cases, you shouldn't need it.
2085 ///
2086 /// This function is only available if the crate was built with
2087 /// its `experimental-api` feature.
2088 #[cfg(feature = "experimental-api")]
2089 pub fn md(&self) -> &Microdesc {
2090 self.md
2091 }
2092}
2093
2094/// An error value returned from [`NetDir::by_ids_detailed`].
2095#[cfg(feature = "hs-common")]
2096#[derive(Clone, Debug, thiserror::Error)]
2097#[non_exhaustive]
2098pub enum RelayLookupError {
2099 /// We found a relay whose presence indicates that the provided set of
2100 /// identities is impossible to resolve.
2101 #[error("Provided set of identities is impossible according to consensus.")]
2102 Impossible,
2103}
2104
2105impl<'a> HasAddrs for Relay<'a> {
2106 fn addrs(&self) -> &[std::net::SocketAddr] {
2107 self.rs.addrs()
2108 }
2109}
2110#[cfg(feature = "geoip")]
2111#[cfg_attr(docsrs, doc(cfg(feature = "geoip")))]
2112impl<'a> HasCountryCode for Relay<'a> {
2113 fn country_code(&self) -> Option<CountryCode> {
2114 self.cc
2115 }
2116}
2117impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2118 fn ed_identity(&self) -> &Ed25519Identity {
2119 self.id()
2120 }
2121 fn rsa_identity(&self) -> &RsaIdentity {
2122 self.rsa_id()
2123 }
2124}
2125
2126impl<'a> HasRelayIds for UncheckedRelay<'a> {
2127 fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2128 match key_type {
2129 RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2130 self.md.map(|m| m.ed25519_id().into())
2131 }
2132 RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2133 _ => None,
2134 }
2135 }
2136}
2137#[cfg(feature = "geoip")]
2138impl<'a> HasCountryCode for UncheckedRelay<'a> {
2139 fn country_code(&self) -> Option<CountryCode> {
2140 self.cc
2141 }
2142}
2143
2144impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2145impl<'a> ChanTarget for Relay<'a> {}
2146
2147impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2148 fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2149 self.md.ntor_key()
2150 }
2151 fn protovers(&self) -> &tor_protover::Protocols {
2152 self.rs.protovers()
2153 }
2154}
2155
2156#[cfg(test)]
2157mod test {
2158 // @@ begin test lint list maintained by maint/add_warning @@
2159 #![allow(clippy::bool_assert_comparison)]
2160 #![allow(clippy::clone_on_copy)]
2161 #![allow(clippy::dbg_macro)]
2162 #![allow(clippy::mixed_attributes_style)]
2163 #![allow(clippy::print_stderr)]
2164 #![allow(clippy::print_stdout)]
2165 #![allow(clippy::single_char_pattern)]
2166 #![allow(clippy::unwrap_used)]
2167 #![allow(clippy::unchecked_duration_subtraction)]
2168 #![allow(clippy::useless_vec)]
2169 #![allow(clippy::needless_pass_by_value)]
2170 //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2171 #![allow(clippy::cognitive_complexity)]
2172 use super::*;
2173 use crate::testnet::*;
2174 use float_eq::assert_float_eq;
2175 use std::collections::HashSet;
2176 use std::time::Duration;
2177 use tor_basic_utils::test_rng::{self, testing_rng};
2178 use tor_linkspec::{RelayIdType, RelayIds};
2179
2180 #[cfg(feature = "hs-common")]
2181 fn dummy_hs_blind_id() -> HsBlindId {
2182 let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2183 let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2184 HsBlindId::from(hsid)
2185 }
2186
2187 // Basic functionality for a partial netdir: Add microdescriptors,
2188 // then you have a netdir.
2189 #[test]
2190 fn partial_netdir() {
2191 let (consensus, microdescs) = construct_network().unwrap();
2192 let dir = PartialNetDir::new(consensus, None);
2193
2194 // Check the lifetime
2195 let lifetime = dir.lifetime();
2196 assert_eq!(
2197 lifetime
2198 .valid_until()
2199 .duration_since(lifetime.valid_after())
2200 .unwrap(),
2201 Duration::new(86400, 0)
2202 );
2203
2204 // No microdescriptors, so we don't have enough paths, and can't
2205 // advance.
2206 assert!(!dir.have_enough_paths());
2207 let mut dir = match dir.unwrap_if_sufficient() {
2208 Ok(_) => panic!(),
2209 Err(d) => d,
2210 };
2211
2212 let missing: HashSet<_> = dir.missing_microdescs().collect();
2213 assert_eq!(missing.len(), 40);
2214 assert_eq!(missing.len(), dir.netdir.c_relays().len());
2215 for md in µdescs {
2216 assert!(missing.contains(md.digest()));
2217 }
2218
2219 // Now add all the mds and try again.
2220 for md in microdescs {
2221 let wanted = dir.add_microdesc(md);
2222 assert!(wanted);
2223 }
2224
2225 let missing: HashSet<_> = dir.missing_microdescs().collect();
2226 assert!(missing.is_empty());
2227 assert!(dir.have_enough_paths());
2228 let _complete = match dir.unwrap_if_sufficient() {
2229 Ok(d) => d,
2230 Err(_) => panic!(),
2231 };
2232 }
2233
2234 #[test]
2235 fn override_params() {
2236 let (consensus, _microdescs) = construct_network().unwrap();
2237 let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2238 .parse()
2239 .unwrap();
2240 let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2241 let params = &dir.netdir.params;
2242 assert_eq!(params.bw_weight_scale.get(), 2);
2243 assert_eq!(params.circuit_window.get(), 500_i32);
2244
2245 // try again without the override.
2246 let dir = PartialNetDir::new(consensus, None);
2247 let params = &dir.netdir.params;
2248 assert_eq!(params.bw_weight_scale.get(), 1_i32);
2249 assert_eq!(params.circuit_window.get(), 1000_i32);
2250 }
2251
2252 #[test]
2253 fn fill_from_previous() {
2254 let (consensus, microdescs) = construct_network().unwrap();
2255
2256 let mut dir = PartialNetDir::new(consensus.clone(), None);
2257 for md in microdescs.iter().skip(2) {
2258 let wanted = dir.add_microdesc(md.clone());
2259 assert!(wanted);
2260 }
2261 let dir1 = dir.unwrap_if_sufficient().unwrap();
2262 assert_eq!(dir1.missing_microdescs().count(), 2);
2263
2264 let mut dir = PartialNetDir::new(consensus, None);
2265 assert_eq!(dir.missing_microdescs().count(), 40);
2266 dir.fill_from_previous_netdir(Arc::new(dir1));
2267 assert_eq!(dir.missing_microdescs().count(), 2);
2268 }
2269
2270 #[test]
2271 fn path_count() {
2272 let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2273 let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2274
2275 let (consensus, microdescs) = construct_network().unwrap();
2276
2277 let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2278 for (pos, md) in microdescs.iter().enumerate() {
2279 if pos % 7 == 2 {
2280 continue; // skip a few relays.
2281 }
2282 dir.add_microdesc(md.clone());
2283 }
2284 let dir = dir.unwrap_if_sufficient().unwrap();
2285
2286 // We have 40 relays that we know about from the consensus.
2287 assert_eq!(dir.all_relays().count(), 40);
2288
2289 // But only 34 are usable.
2290 assert_eq!(dir.relays().count(), 34);
2291
2292 // For guards: mds 20..=39 correspond to Guard relays.
2293 // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2294 // We skipped 23, 30, and 37. They have bandwidth
2295 // 4000 + 1000 + 8000 = 13_000. So our fractional bandwidth
2296 // should be (110-13)/110.
2297 let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2298 assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2299
2300 // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2301 // We skipped 16, 30, and 37. Per above our fractional bandwidth is
2302 // (110-16)/110.
2303 let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2304 assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2305
2306 // For middles: all relays are middles. We skipped 2, 9, 16,
2307 // 23, 30, and 37. Per above our fractional bandwidth is
2308 // (220-33)/220
2309 let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2310 assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2311
2312 // Multiplying those together, we get the fraction of paths we can
2313 // build at ~0.64052066, which is above the threshold we set above for
2314 // MinPathsForCircsPct.
2315 let f = dir.frac_usable_paths();
2316 assert!((f - 0.64052066).abs() < 0.000001);
2317
2318 // But if we try again with a slightly higher threshold...
2319 let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2320 for (pos, md) in microdescs.into_iter().enumerate() {
2321 if pos % 7 == 2 {
2322 continue; // skip a few relays.
2323 }
2324 dir.add_microdesc(md);
2325 }
2326 assert!(dir.unwrap_if_sufficient().is_err());
2327 }
2328
2329 /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2330 /// iterations, and a tolerance.
2331 ///
2332 /// If the Rng is deterministic (the default), we can use a faster setup,
2333 /// with a higher tolerance and fewer iterations. But if you've explicitly
2334 /// opted into randomization (or are replaying a seed from an earlier
2335 /// randomized test), we give you more iterations and a tighter tolerance.
2336 fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2337 // Use a deterministic RNG if none is specified, since this is slow otherwise.
2338 let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2339 let (iters, tolerance) = match config {
2340 test_rng::Config::Deterministic => (5000, 0.02),
2341 _ => (50000, 0.01),
2342 };
2343 (config.into_rng(), iters, tolerance)
2344 }
2345
2346 #[test]
2347 fn test_pick() {
2348 let (consensus, microdescs) = construct_network().unwrap();
2349 let mut dir = PartialNetDir::new(consensus, None);
2350 for md in microdescs.into_iter() {
2351 let wanted = dir.add_microdesc(md.clone());
2352 assert!(wanted);
2353 }
2354 let dir = dir.unwrap_if_sufficient().unwrap();
2355
2356 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2357
2358 let mut picked = [0_isize; 40];
2359 for _ in 0..total {
2360 let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2361 r.low_level_details().supports_exit_port_ipv4(80)
2362 });
2363 let r = r.unwrap();
2364 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2365 picked[id_byte as usize] += 1;
2366 }
2367 // non-exits should never get picked.
2368 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2369 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2370
2371 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2372
2373 // We didn't we any non-default weights, so the other relays get
2374 // weighted proportional to their bandwidth.
2375 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2376 assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2377 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2378 }
2379
2380 #[test]
2381 fn test_pick_multiple() {
2382 // This is mostly a copy of test_pick, except that it uses
2383 // pick_n_relays to pick several relays at once.
2384
2385 let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2386
2387 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2388
2389 let mut picked = [0_isize; 40];
2390 for _ in 0..total / 4 {
2391 let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2392 r.low_level_details().supports_exit_port_ipv4(80)
2393 });
2394 assert_eq!(relays.len(), 4);
2395 for r in relays {
2396 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2397 picked[id_byte as usize] += 1;
2398 }
2399 }
2400 // non-exits should never get picked.
2401 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2402 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2403
2404 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2405
2406 // We didn't we any non-default weights, so the other relays get
2407 // weighted proportional to their bandwidth.
2408 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2409 assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2410 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2411 }
2412
2413 #[test]
2414 fn subnets() {
2415 let cfg = SubnetConfig::default();
2416
2417 fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2418 cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2419 }
2420
2421 assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2422 assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2423
2424 assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2425
2426 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2427 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2428
2429 let cfg = SubnetConfig {
2430 subnets_family_v4: 32,
2431 subnets_family_v6: 128,
2432 };
2433 assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2434 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2435
2436 assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2437 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2438 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2439
2440 let cfg = SubnetConfig {
2441 subnets_family_v4: 33,
2442 subnets_family_v6: 129,
2443 };
2444 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2445 assert!(!same_net(&cfg, "::", "::"));
2446 }
2447
2448 #[test]
2449 fn subnet_union() {
2450 let cfg1 = SubnetConfig {
2451 subnets_family_v4: 16,
2452 subnets_family_v6: 64,
2453 };
2454 let cfg2 = SubnetConfig {
2455 subnets_family_v4: 24,
2456 subnets_family_v6: 32,
2457 };
2458 let a1 = "1.2.3.4".parse().unwrap();
2459 let a2 = "1.2.10.10".parse().unwrap();
2460
2461 let a3 = "ffff:ffff::7".parse().unwrap();
2462 let a4 = "ffff:ffff:1234::8".parse().unwrap();
2463
2464 assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2465 assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2466
2467 assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2468 assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2469
2470 let cfg_u = cfg1.union(&cfg2);
2471 assert_eq!(
2472 cfg_u,
2473 SubnetConfig {
2474 subnets_family_v4: 16,
2475 subnets_family_v6: 32,
2476 }
2477 );
2478 assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2479 assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2480
2481 assert_eq!(cfg1.union(&cfg1), cfg1);
2482
2483 assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2484 }
2485
2486 #[test]
2487 fn relay_funcs() {
2488 let (consensus, microdescs) = construct_custom_network(
2489 |pos, nb, _| {
2490 if pos == 15 {
2491 nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2492 } else if pos == 20 {
2493 nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2494 }
2495 },
2496 None,
2497 )
2498 .unwrap();
2499 let subnet_config = SubnetConfig::default();
2500 let all_family_info = FamilyRules::all_family_info();
2501 let mut dir = PartialNetDir::new(consensus, None);
2502 for md in microdescs.into_iter() {
2503 let wanted = dir.add_microdesc(md.clone());
2504 assert!(wanted);
2505 }
2506 let dir = dir.unwrap_if_sufficient().unwrap();
2507
2508 // Pick out a few relays by ID.
2509 let k0 = Ed25519Identity::from([0; 32]);
2510 let k1 = Ed25519Identity::from([1; 32]);
2511 let k2 = Ed25519Identity::from([2; 32]);
2512 let k3 = Ed25519Identity::from([3; 32]);
2513 let k10 = Ed25519Identity::from([10; 32]);
2514 let k15 = Ed25519Identity::from([15; 32]);
2515 let k20 = Ed25519Identity::from([20; 32]);
2516
2517 let r0 = dir.by_id(&k0).unwrap();
2518 let r1 = dir.by_id(&k1).unwrap();
2519 let r2 = dir.by_id(&k2).unwrap();
2520 let r3 = dir.by_id(&k3).unwrap();
2521 let r10 = dir.by_id(&k10).unwrap();
2522 let r15 = dir.by_id(&k15).unwrap();
2523 let r20 = dir.by_id(&k20).unwrap();
2524
2525 assert_eq!(r0.id(), &[0; 32].into());
2526 assert_eq!(r0.rsa_id(), &[0; 20].into());
2527 assert_eq!(r1.id(), &[1; 32].into());
2528 assert_eq!(r1.rsa_id(), &[1; 20].into());
2529
2530 assert!(r0.same_relay_ids(&r0));
2531 assert!(r1.same_relay_ids(&r1));
2532 assert!(!r1.same_relay_ids(&r0));
2533
2534 assert!(r0.low_level_details().is_dir_cache());
2535 assert!(!r1.low_level_details().is_dir_cache());
2536 assert!(r2.low_level_details().is_dir_cache());
2537 assert!(!r3.low_level_details().is_dir_cache());
2538
2539 assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2540 assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2541 assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2542 assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2543
2544 assert!(!r0.low_level_details().policies_allow_some_port());
2545 assert!(!r1.low_level_details().policies_allow_some_port());
2546 assert!(!r2.low_level_details().policies_allow_some_port());
2547 assert!(!r3.low_level_details().policies_allow_some_port());
2548 assert!(r10.low_level_details().policies_allow_some_port());
2549
2550 assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2551 assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2552 assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2553 assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2554 assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2555 assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2556 assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2557 assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2558
2559 assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2560 assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2561 assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2562 assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2563 assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2564 assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2565
2566 // Make sure IPv6 families work.
2567 let subnet_config = SubnetConfig {
2568 subnets_family_v4: 128,
2569 subnets_family_v6: 96,
2570 };
2571 assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2572 assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2573
2574 // Make sure that subnet configs can be disabled.
2575 let subnet_config = SubnetConfig {
2576 subnets_family_v4: 255,
2577 subnets_family_v6: 255,
2578 };
2579 assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2580 }
2581
2582 #[test]
2583 fn test_badexit() {
2584 // make a netdir where relays 10-19 are badexit, and everybody
2585 // exits to 443 on IPv6.
2586 use tor_netdoc::doc::netstatus::RelayFlags;
2587 let netdir = construct_custom_netdir(|pos, nb, _| {
2588 if (10..20).contains(&pos) {
2589 nb.rs.add_flags(RelayFlags::BAD_EXIT);
2590 }
2591 nb.md.parse_ipv6_policy("accept 443").unwrap();
2592 })
2593 .unwrap()
2594 .unwrap_if_sufficient()
2595 .unwrap();
2596
2597 let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2598 let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2599
2600 assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2601 assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2602
2603 assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2604 assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2605 assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2606
2607 assert!(!e12.low_level_details().policies_allow_some_port());
2608 assert!(e32.low_level_details().policies_allow_some_port());
2609
2610 assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2611 assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2612 assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2613 assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2614
2615 assert!(
2616 e12.low_level_details()
2617 .ipv4_declared_policy()
2618 .allows_some_port()
2619 );
2620 assert!(
2621 e12.low_level_details()
2622 .ipv6_declared_policy()
2623 .allows_some_port()
2624 );
2625 }
2626
2627 #[cfg(feature = "experimental-api")]
2628 #[test]
2629 fn test_accessors() {
2630 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2631
2632 let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2633 let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2634
2635 assert!(!r4.md().ipv4_policy().allows_some_port());
2636 assert!(r16.md().ipv4_policy().allows_some_port());
2637
2638 assert!(!r4.rs().is_flagged_exit());
2639 assert!(r16.rs().is_flagged_exit());
2640 }
2641
2642 #[test]
2643 fn test_by_id() {
2644 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2645 let netdir = construct_custom_netdir(|pos, nb, _| {
2646 nb.omit_md = pos == 13;
2647 })
2648 .unwrap();
2649
2650 let netdir = netdir.unwrap_if_sufficient().unwrap();
2651
2652 let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2653 assert_eq!(r.id().as_bytes(), &[0; 32]);
2654
2655 assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2656
2657 let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2658 assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2659 assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2660
2661 assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2662
2663 assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2664 assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2665
2666 let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2667 assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2668 assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2669
2670 let pair_13_13 = RelayIds::builder()
2671 .ed_identity([13; 32].into())
2672 .rsa_identity([13; 20].into())
2673 .build()
2674 .unwrap();
2675 let pair_14_14 = RelayIds::builder()
2676 .ed_identity([14; 32].into())
2677 .rsa_identity([14; 20].into())
2678 .build()
2679 .unwrap();
2680 let pair_14_99 = RelayIds::builder()
2681 .ed_identity([14; 32].into())
2682 .rsa_identity([99; 20].into())
2683 .build()
2684 .unwrap();
2685
2686 let r = netdir.by_ids(&pair_13_13);
2687 assert!(r.is_none());
2688 let r = netdir.by_ids(&pair_14_14).unwrap();
2689 assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2690 assert_eq!(
2691 r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2692 &[14; 32]
2693 );
2694 let r = netdir.by_ids(&pair_14_99);
2695 assert!(r.is_none());
2696
2697 assert_eq!(
2698 netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2699 None
2700 );
2701 assert_eq!(
2702 netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2703 Some(true)
2704 );
2705 assert_eq!(
2706 netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2707 Some(false)
2708 );
2709 }
2710
2711 #[test]
2712 #[cfg(feature = "hs-common")]
2713 fn test_by_ids_detailed() {
2714 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2715 let netdir = construct_custom_netdir(|pos, nb, _| {
2716 nb.omit_md = pos == 13;
2717 })
2718 .unwrap();
2719
2720 let netdir = netdir.unwrap_if_sufficient().unwrap();
2721
2722 let id13_13 = RelayIds::builder()
2723 .ed_identity([13; 32].into())
2724 .rsa_identity([13; 20].into())
2725 .build()
2726 .unwrap();
2727 let id15_15 = RelayIds::builder()
2728 .ed_identity([15; 32].into())
2729 .rsa_identity([15; 20].into())
2730 .build()
2731 .unwrap();
2732 let id15_99 = RelayIds::builder()
2733 .ed_identity([15; 32].into())
2734 .rsa_identity([99; 20].into())
2735 .build()
2736 .unwrap();
2737 let id99_15 = RelayIds::builder()
2738 .ed_identity([99; 32].into())
2739 .rsa_identity([15; 20].into())
2740 .build()
2741 .unwrap();
2742 let id99_99 = RelayIds::builder()
2743 .ed_identity([99; 32].into())
2744 .rsa_identity([99; 20].into())
2745 .build()
2746 .unwrap();
2747 let id15_xx = RelayIds::builder()
2748 .ed_identity([15; 32].into())
2749 .build()
2750 .unwrap();
2751 let idxx_15 = RelayIds::builder()
2752 .rsa_identity([15; 20].into())
2753 .build()
2754 .unwrap();
2755
2756 assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2757 assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2758 assert!(matches!(
2759 netdir.by_ids_detailed(&id15_99),
2760 Err(RelayLookupError::Impossible)
2761 ));
2762 assert!(matches!(
2763 netdir.by_ids_detailed(&id99_15),
2764 Err(RelayLookupError::Impossible)
2765 ));
2766 assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2767 assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2768 assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2769 }
2770
2771 #[test]
2772 fn weight_type() {
2773 let r0 = RelayWeight(0);
2774 let r100 = RelayWeight(100);
2775 let r200 = RelayWeight(200);
2776 let r300 = RelayWeight(300);
2777 assert_eq!(r100 + r200, r300);
2778 assert_eq!(r100.checked_div(r200), Some(0.5));
2779 assert!(r100.checked_div(r0).is_none());
2780 assert_eq!(r200.ratio(0.5), Some(r100));
2781 assert!(r200.ratio(-1.0).is_none());
2782 }
2783
2784 #[test]
2785 fn weight_accessors() {
2786 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2787 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2788
2789 let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2790 // This is just the total guard weight, since all our Wxy = 1.
2791 assert_eq!(g_total, RelayWeight(110_000));
2792
2793 let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2794 assert_eq!(g_total, RelayWeight(0));
2795
2796 let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2797 assert!(relay.rs.is_flagged_guard());
2798 let w = netdir.relay_weight(&relay, WeightRole::Guard);
2799 assert_eq!(w, RelayWeight(6_000));
2800
2801 let w = netdir
2802 .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2803 .unwrap();
2804 assert_eq!(w, RelayWeight(4_000));
2805
2806 assert!(
2807 netdir
2808 .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2809 .is_none()
2810 );
2811 }
2812
2813 #[test]
2814 fn family_list() {
2815 let netdir = construct_custom_netdir(|pos, n, _| {
2816 if pos == 0x0a {
2817 n.md.family(
2818 "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2819 $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2820 $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2821 .parse()
2822 .unwrap(),
2823 );
2824 } else if pos == 0x0c {
2825 n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2826 }
2827 })
2828 .unwrap()
2829 .unwrap_if_sufficient()
2830 .unwrap();
2831
2832 // In the testing netdir, adjacent members are in the same family by default...
2833 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2834 let family: Vec<_> = netdir.known_family_members(&r0).collect();
2835 assert_eq!(family.len(), 1);
2836 assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2837
2838 // But we've made this relay claim membership with several others.
2839 let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2840 let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2841 assert_eq!(family.len(), 2);
2842 assert!(family.contains(&Ed25519Identity::from([11; 32])));
2843 assert!(family.contains(&Ed25519Identity::from([12; 32])));
2844 // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2845 // membership with 10.
2846 }
2847 #[test]
2848 #[cfg(feature = "geoip")]
2849 fn relay_has_country_code() {
2850 let src_v6 = r#"
2851 fe80:dead:beef::,fe80:dead:ffff::,US
2852 fe80:feed:eeee::1,fe80:feed:eeee::2,AT
2853 fe80:feed:eeee::2,fe80:feed:ffff::,DE
2854 "#;
2855 let db = GeoipDb::new_from_legacy_format("", src_v6).unwrap();
2856
2857 let netdir = construct_custom_netdir_with_geoip(
2858 |pos, n, _| {
2859 if pos == 0x01 {
2860 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2861 }
2862 if pos == 0x02 {
2863 n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2864 n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2865 }
2866 if pos == 0x03 {
2867 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2868 n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2869 }
2870 },
2871 &db,
2872 )
2873 .unwrap()
2874 .unwrap_if_sufficient()
2875 .unwrap();
2876
2877 // No GeoIP data available -> None
2878 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2879 assert_eq!(r0.cc, None);
2880
2881 // Exactly one match -> Some
2882 let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2883 assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2884
2885 // Conflicting matches -> None
2886 let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2887 assert_eq!(r2.cc, None);
2888
2889 // Multiple agreeing matches -> Some
2890 let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2891 assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2892 }
2893
2894 #[test]
2895 #[cfg(feature = "hs-common")]
2896 #[allow(deprecated)]
2897 fn hs_dirs_selection() {
2898 use tor_basic_utils::test_rng::testing_rng;
2899
2900 const HSDIR_SPREAD_STORE: i32 = 6;
2901 const HSDIR_SPREAD_FETCH: i32 = 2;
2902 const PARAMS: [(&str, i32); 2] = [
2903 ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2904 ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2905 ];
2906
2907 let netdir: Arc<NetDir> =
2908 crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2909 .unwrap()
2910 .unwrap_if_sufficient()
2911 .unwrap()
2912 .into();
2913 let hsid = dummy_hs_blind_id();
2914
2915 const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2916 // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2917 // are only 10 relays with the HsDir flag in the consensus.
2918 #[cfg(feature = "hs-service")]
2919 (HsDirOp::Upload, 10),
2920 (HsDirOp::Download, 4),
2921 ];
2922
2923 for (op, relay_count) in OP_RELAY_COUNT {
2924 let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2925
2926 assert_eq!(relays.len(), *relay_count);
2927
2928 // There should be no duplicates (the filtering function passed to
2929 // HsDirRing::ring_items_at() ensures the relays that are already in use for
2930 // lower-numbered replicas aren't considered a second time for a higher-numbered
2931 // replica).
2932 let unique = relays
2933 .iter()
2934 .map(|relay| relay.ed_identity())
2935 .collect::<HashSet<_>>();
2936 assert_eq!(unique.len(), relays.len());
2937 }
2938
2939 // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2940 // expected relays.
2941 //
2942 // For example, let's say we have the following hsdir ring:
2943 //
2944 // A - B
2945 // / \
2946 // F C
2947 // \ /
2948 // E - D
2949 //
2950 // Let's also assume that:
2951 //
2952 // * hsdir_spread_store = 3
2953 // * the ordering of the relays on the ring is [A, B, C, D, E, F]
2954 //
2955 // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
2956 // relays [E, F, D]. We should have a test that checks this.
2957 }
2958
2959 #[test]
2960 fn zero_weights() {
2961 // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
2962 // in the presence of items whose weight is 0.
2963 //
2964 // We think that the behavior is:
2965 // - An item with weight 0 is never returned.
2966 // - If all items have weight 0, choose_weighted returns an error.
2967 // - If all items have weight 0, choose_multiple_weighted returns an empty list.
2968 // - If we request n items from choose_multiple_weighted,
2969 // but only m<n items have nonzero weight, we return all m of those items.
2970 // - if the request for n items can't be completely satisfied with n items of weight >= 0,
2971 // we get InsufficientNonZero.
2972 let items = vec![1, 2, 3];
2973 let mut rng = testing_rng();
2974
2975 let a = items.choose_weighted(&mut rng, |_| 0);
2976 assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
2977
2978 let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
2979 let xs: Vec<_> = x.unwrap().collect();
2980 assert!(xs.is_empty());
2981
2982 let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
2983 let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
2984 let xs: Vec<_> = x.unwrap().collect();
2985 assert_eq!(&xs[..], &[&1]);
2986
2987 for _ in 0..100 {
2988 let a = items.choose_weighted(&mut rng, only_one);
2989 assert_eq!(a.unwrap(), &1);
2990
2991 let x = items
2992 .choose_multiple_weighted(&mut rng, 1, only_one)
2993 .unwrap()
2994 .collect::<Vec<_>>();
2995 assert_eq!(x, vec![&1]);
2996 }
2997 }
2998
2999 #[test]
3000 fn insufficient_but_nonzero() {
3001 // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
3002 // but there are insufficient values.
3003 // (If this behavior changes, we need to change our usage.)
3004
3005 let items = vec![1, 2, 3];
3006 let mut rng = testing_rng();
3007 let mut a = items
3008 .choose_multiple_weighted(&mut rng, 10, |_| 1)
3009 .unwrap()
3010 .copied()
3011 .collect::<Vec<_>>();
3012 a.sort();
3013 assert_eq!(a, items);
3014 }
3015}