tor_netdir/lib.rs
1#![cfg_attr(docsrs, feature(doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_time_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45#![allow(clippy::collapsible_if)] // See arti#2342
46#![deny(clippy::unused_async)]
47//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
48
49pub mod details;
50mod err;
51#[cfg(feature = "hs-common")]
52mod hsdir_params;
53#[cfg(feature = "hs-common")]
54mod hsdir_ring;
55pub mod params;
56mod weight;
57
58#[cfg(any(test, feature = "testing"))]
59pub mod testnet;
60#[cfg(feature = "testing")]
61pub mod testprovider;
62
63use async_trait::async_trait;
64#[cfg(feature = "hs-service")]
65use itertools::chain;
66use tor_error::warn_report;
67#[cfg(feature = "hs-common")]
68use tor_linkspec::OwnedCircTarget;
69use tor_linkspec::{
70 ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
71};
72use tor_llcrypto as ll;
73use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
74use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
75use tor_netdoc::doc::netstatus::{self, MdConsensus, MdRouterStatus};
76#[cfg(feature = "hs-common")]
77use {hsdir_ring::HsDirRing, std::iter};
78
79use derive_more::{From, Into};
80use futures::{StreamExt, stream::BoxStream};
81use num_enum::{IntoPrimitive, TryFromPrimitive};
82use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
83use serde::Deserialize;
84use std::collections::HashMap;
85use std::net::IpAddr;
86use std::ops::Deref;
87use std::sync::Arc;
88use std::time::SystemTime;
89use strum::{EnumCount, EnumIter};
90use tracing::warn;
91use typed_index_collections::{TiSlice, TiVec};
92
93#[cfg(feature = "hs-common")]
94use {
95 itertools::Itertools,
96 std::collections::HashSet,
97 std::result::Result as StdResult,
98 tor_error::{Bug, internal},
99 tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
100 tor_linkspec::{OwnedChanTargetBuilder, verbatim::VerbatimLinkSpecCircTarget},
101 tor_llcrypto::pk::curve25519,
102};
103
104pub use err::Error;
105pub use weight::WeightRole;
106/// A Result using the Error type from the tor-netdir crate
107pub type Result<T> = std::result::Result<T, Error>;
108
109#[cfg(feature = "hs-common")]
110pub use err::{OnionDirLookupError, VerbatimCircTargetDecodeError};
111
112use params::NetParameters;
113#[cfg(feature = "geoip")]
114use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
115
116#[cfg(feature = "hs-common")]
117pub use hsdir_params::HsDirParams;
118
119/// Index into the consensus relays
120///
121/// This is an index into the list of relays returned by
122/// [`.c_relays()`](ConsensusRelays::c_relays)
123/// (on the corresponding consensus or netdir).
124///
125/// This is just a `usize` inside, but using a newtype prevents getting a relay index
126/// confused with other kinds of slice indices or counts.
127///
128/// If you are in a part of the code which needs to work with multiple consensuses,
129/// the typechecking cannot tell if you try to index into the wrong consensus.
130#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
131pub(crate) struct RouterStatusIdx(usize);
132
133/// Extension trait to provide index-type-safe `.c_relays()` method
134//
135// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
136// but that would be an API break there.
137pub(crate) trait ConsensusRelays {
138 /// Obtain the list of relays in the consensus
139 //
140 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus>;
141}
142impl ConsensusRelays for MdConsensus {
143 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
144 TiSlice::from_ref(MdConsensus::relays(self))
145 }
146}
147impl ConsensusRelays for NetDir {
148 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
149 self.consensus.c_relays()
150 }
151}
152
153/// Configuration for determining when two relays have addresses "too close" in
154/// the network.
155///
156/// Used by `Relay::low_level_details().in_same_subnet()`.
157#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
158#[serde(deny_unknown_fields)]
159pub struct SubnetConfig {
160 /// Consider IPv4 nodes in the same /x to be the same family.
161 ///
162 /// If this value is 0, all nodes with IPv4 addresses will be in the
163 /// same family. If this value is above 32, then no nodes will be
164 /// placed im the same family based on their IPv4 addresses.
165 subnets_family_v4: u8,
166 /// Consider IPv6 nodes in the same /x to be the same family.
167 ///
168 /// If this value is 0, all nodes with IPv6 addresses will be in the
169 /// same family. If this value is above 128, then no nodes will be
170 /// placed im the same family based on their IPv6 addresses.
171 subnets_family_v6: u8,
172}
173
174impl Default for SubnetConfig {
175 fn default() -> Self {
176 Self::new(16, 32)
177 }
178}
179
180impl SubnetConfig {
181 /// Construct a new SubnetConfig from a pair of bit prefix lengths.
182 ///
183 /// The values are clamped to the appropriate ranges if they are
184 /// out-of-bounds.
185 pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
186 Self {
187 subnets_family_v4,
188 subnets_family_v6,
189 }
190 }
191
192 /// Construct a new SubnetConfig such that addresses are not in the same
193 /// family with anything--not even with themselves.
194 pub fn no_addresses_match() -> SubnetConfig {
195 SubnetConfig {
196 subnets_family_v4: 33,
197 subnets_family_v6: 129,
198 }
199 }
200
201 /// Return true if the two addresses in the same subnet, according to this
202 /// configuration.
203 pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
204 match (a, b) {
205 (IpAddr::V4(a), IpAddr::V4(b)) => {
206 let bits = self.subnets_family_v4;
207 if bits > 32 {
208 return false;
209 }
210 let a = u32::from_be_bytes(a.octets());
211 let b = u32::from_be_bytes(b.octets());
212 (a >> (32 - bits)) == (b >> (32 - bits))
213 }
214 (IpAddr::V6(a), IpAddr::V6(b)) => {
215 let bits = self.subnets_family_v6;
216 if bits > 128 {
217 return false;
218 }
219 let a = u128::from_be_bytes(a.octets());
220 let b = u128::from_be_bytes(b.octets());
221 (a >> (128 - bits)) == (b >> (128 - bits))
222 }
223 _ => false,
224 }
225 }
226
227 /// Return true if any of the addresses in `a` shares a subnet with any of
228 /// the addresses in `b`, according to this configuration.
229 pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
230 where
231 T: tor_linkspec::HasAddrs,
232 U: tor_linkspec::HasAddrs,
233 {
234 a.addrs().any(|aa| {
235 b.addrs()
236 .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
237 })
238 }
239
240 /// Return a new subnet configuration that is the union of `self` and
241 /// `other`.
242 ///
243 /// That is, return a subnet configuration that puts all addresses in the
244 /// same subnet if and only if at least one of `self` and `other` would put
245 /// them in the same subnet.
246 pub fn union(&self, other: &Self) -> Self {
247 use std::cmp::min;
248 Self {
249 subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
250 subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
251 }
252 }
253}
254
255/// Configuration for which listed family information to use when deciding
256/// whether relays belong to the same family.
257///
258/// Derived from network parameters.
259#[derive(Clone, Copy, Debug)]
260pub struct FamilyRules {
261 /// If true, we use family information from lists of family members.
262 use_family_lists: bool,
263 /// If true, we use family information from lists of family IDs and from family certs.
264 use_family_ids: bool,
265}
266
267impl<'a> From<&'a NetParameters> for FamilyRules {
268 fn from(params: &'a NetParameters) -> Self {
269 FamilyRules {
270 use_family_lists: bool::from(params.use_family_lists),
271 use_family_ids: bool::from(params.use_family_ids),
272 }
273 }
274}
275
276impl FamilyRules {
277 /// Return a `FamilyRules` that will use all recognized kinds of family information.
278 pub fn all_family_info() -> Self {
279 Self {
280 use_family_lists: true,
281 use_family_ids: true,
282 }
283 }
284
285 /// Return a `FamilyRules` that will ignore all family information declared by relays.
286 pub fn ignore_declared_families() -> Self {
287 Self {
288 use_family_lists: false,
289 use_family_ids: false,
290 }
291 }
292
293 /// Configure this `FamilyRules` to use (or not use) family information from
294 /// lists of family members.
295 pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
296 self.use_family_lists = val;
297 self
298 }
299
300 /// Configure this `FamilyRules` to use (or not use) family information from
301 /// family IDs and family certs.
302 pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
303 self.use_family_ids = val;
304 self
305 }
306
307 /// Return a `FamilyRules` that will look at every source of information
308 /// requested by `self` or by `other`.
309 pub fn union(&self, other: &Self) -> Self {
310 Self {
311 use_family_lists: self.use_family_lists || other.use_family_lists,
312 use_family_ids: self.use_family_ids || other.use_family_ids,
313 }
314 }
315}
316
317/// An opaque type representing the weight with which a relay or set of
318/// relays will be selected for a given role.
319///
320/// Most users should ignore this type, and just use pick_relay instead.
321#[derive(
322 Copy,
323 Clone,
324 Debug,
325 derive_more::Add,
326 derive_more::Sum,
327 derive_more::AddAssign,
328 Eq,
329 PartialEq,
330 Ord,
331 PartialOrd,
332)]
333pub struct RelayWeight(u64);
334
335impl RelayWeight {
336 /// Try to divide this weight by `rhs`.
337 ///
338 /// Return a ratio on success, or None on division-by-zero.
339 pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
340 if rhs.0 == 0 {
341 None
342 } else {
343 Some((self.0 as f64) / (rhs.0 as f64))
344 }
345 }
346
347 /// Compute a ratio `frac` of this weight.
348 ///
349 /// Return None if frac is less than zero, since negative weights
350 /// are impossible.
351 pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
352 let product = (self.0 as f64) * frac;
353 if product >= 0.0 && product.is_finite() {
354 Some(RelayWeight(product as u64))
355 } else {
356 None
357 }
358 }
359}
360
361impl From<u64> for RelayWeight {
362 fn from(val: u64) -> Self {
363 RelayWeight(val)
364 }
365}
366
367/// An operation for which we might be requesting a hidden service directory.
368#[derive(Copy, Clone, Debug, PartialEq)]
369// TODO: make this pub(crate) once NetDir::hs_dirs is removed
370#[non_exhaustive]
371pub enum HsDirOp {
372 /// Uploading an onion service descriptor.
373 #[cfg(feature = "hs-service")]
374 Upload,
375 /// Downloading an onion service descriptor.
376 Download,
377}
378
379/// A view of the Tor directory, suitable for use in building circuits.
380///
381/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
382/// has its own properties, identity, and correct weighted probability for use
383/// under different circumstances.
384///
385/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
386/// document, and then adding enough microdescriptors to that `PartialNetDir` so
387/// that it can be used to build paths. (Thus, if you have a NetDir, it is
388/// definitely adequate to build paths.)
389///
390/// # "Usable" relays
391///
392/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays. Unless
393/// otherwise stated, a relay is "usable" if it is listed in the consensus,
394/// if we have full directory information for that relay (including a
395/// microdescriptor), and if that relay does not have any flags indicating that
396/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
397///
398/// # Limitations
399///
400/// The current NetDir implementation assumes fairly strongly that every relay
401/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
402/// by RSA identities, and that the Ed25519 identities are stored in
403/// microdescriptors.
404///
405/// If these assumptions someday change, then we'll have to revise the
406/// implementation.
407#[derive(Debug, Clone)]
408pub struct NetDir {
409 /// A microdescriptor consensus that lists the members of the network,
410 /// and maps each one to a 'microdescriptor' that has more information
411 /// about it
412 consensus: Arc<MdConsensus>,
413 /// A map from keys to integer values, distributed in the consensus,
414 /// and clamped to certain defaults.
415 params: NetParameters,
416 /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
417 mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
418 /// Map from SHA256 of _missing_ microdescriptors to the index of their
419 /// corresponding routerstatus.
420 rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
421 /// Map from ed25519 identity to index of the routerstatus.
422 ///
423 /// Note that we don't know the ed25519 identity of a relay until
424 /// we get the microdescriptor for it, so this won't be filled in
425 /// until we get the microdescriptors.
426 ///
427 /// # Implementation note
428 ///
429 /// For this field, and for `rsidx_by_rsa`,
430 /// it might be cool to have references instead.
431 /// But that would make this into a self-referential structure,
432 /// which isn't possible in safe rust.
433 rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
434 /// Map from RSA identity to index of the routerstatus.
435 ///
436 /// This is constructed at the same time as the NetDir object, so it
437 /// can be immutable.
438 rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
439
440 /// Hash ring(s) describing the onion service directory.
441 ///
442 /// This is empty in a PartialNetDir, and is filled in before the NetDir is
443 /// built.
444 //
445 // TODO hs: It is ugly to have this exist in a partially constructed state
446 // in a PartialNetDir.
447 // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
448 // or perhaps nothing at all, here.
449 #[cfg(feature = "hs-common")]
450 hsdir_rings: Arc<HsDirs<HsDirRing>>,
451
452 /// Weight values to apply to a given relay when deciding how frequently
453 /// to choose it for a given role.
454 weights: weight::WeightSet,
455
456 #[cfg(feature = "geoip")]
457 /// Country codes for each router in our consensus.
458 ///
459 /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
460 /// the country code at position zero in this array).
461 country_codes: Vec<Option<CountryCode>>,
462}
463
464/// Collection of hidden service directories (or parameters for them)
465///
466/// In [`NetDir`] this is used to store the actual hash rings.
467/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
468/// where only the `params` are populated, and the `ring` is empty.)
469///
470/// This same generic type is used as the return type from
471/// [`HsDirParams::compute`](HsDirParams::compute),
472/// where it contains the *parameters* for the primary and secondary rings.
473#[derive(Debug, Clone)]
474#[cfg(feature = "hs-common")]
475pub(crate) struct HsDirs<D> {
476 /// The current ring
477 ///
478 /// It corresponds to the time period containing the `valid-after` time in
479 /// the consensus. Its SRV is whatever SRV was most current at the time when
480 /// that time period began.
481 ///
482 /// This is the hash ring that we should use whenever we are fetching an
483 /// onion service descriptor.
484 current: D,
485
486 /// Secondary rings (based on the parameters for the previous and next time periods)
487 ///
488 /// Onion services upload to positions on these ring as well, based on how
489 /// far into the current time period this directory is, so that
490 /// not-synchronized clients can still find their descriptor.
491 ///
492 /// Note that with the current (2023) network parameters, with
493 /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
494 /// secondary rings will be active at a time. We have two here in order
495 /// to conform with a more flexible regime in proposal 342.
496 //
497 // TODO: hs clients never need this; so I've made it not-present for them.
498 // But does that risk too much with respect to side channels?
499 //
500 // TODO: Perhaps we should refactor this so that it is clear that these
501 // are immutable? On the other hand, the documentation for this type
502 // declares that it is immutable, so we are likely okay.
503 //
504 // TODO: this `Vec` is only ever 0,1,2 elements.
505 // Maybe it should be an ArrayVec or something.
506 #[cfg(feature = "hs-service")]
507 secondary: Vec<D>,
508}
509
510#[cfg(feature = "hs-common")]
511impl<D> HsDirs<D> {
512 /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
513 pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
514 HsDirs {
515 current: f(self.current),
516 #[cfg(feature = "hs-service")]
517 secondary: self.secondary.into_iter().map(f).collect(),
518 }
519 }
520
521 /// Iterate over some of the contained hsdirs, according to `secondary`
522 ///
523 /// The current ring is always included.
524 /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
525 fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
526 let i = iter::once(&self.current);
527
528 // With "hs-service" disabled, there are no secondary rings,
529 // so we don't care.
530 let _ = secondary;
531
532 #[cfg(feature = "hs-service")]
533 let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
534
535 i
536 }
537
538 /// Iterate over all the contained hsdirs
539 pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
540 self.iter_filter_secondary(true)
541 }
542
543 /// Iterate over the hsdirs relevant for `op`
544 pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
545 self.iter_filter_secondary(match op {
546 #[cfg(feature = "hs-service")]
547 HsDirOp::Upload => true,
548 HsDirOp::Download => false,
549 })
550 }
551}
552
553/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
554/// the status of its directory.
555#[derive(
556 Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
557)]
558#[non_exhaustive]
559#[repr(u16)]
560pub enum DirEvent {
561 /// A new consensus has been received, and has enough information to be
562 /// used.
563 ///
564 /// This event is also broadcast when a new set of consensus parameters is
565 /// available, even if that set of parameters comes from a configuration
566 /// change rather than from the latest consensus.
567 NewConsensus,
568
569 /// New descriptors have been received for the current consensus.
570 ///
571 /// (This event is _not_ broadcast when receiving new descriptors for a
572 /// consensus which is not yet ready to replace the current consensus.)
573 NewDescriptors,
574
575 /// We have received updated recommendations and requirements
576 /// for which subprotocols we should have to use the network.
577 NewProtocolRecommendation,
578}
579
580/// The network directory provider is shutting down without giving us the
581/// netdir we asked for.
582#[derive(Clone, Copy, Debug, thiserror::Error)]
583#[error("Network directory provider is shutting down")]
584#[non_exhaustive]
585pub struct NetdirProviderShutdown;
586
587impl tor_error::HasKind for NetdirProviderShutdown {
588 fn kind(&self) -> tor_error::ErrorKind {
589 tor_error::ErrorKind::ArtiShuttingDown
590 }
591}
592
593/// How "timely" must a network directory be?
594///
595/// This enum is used as an argument when requesting a [`NetDir`] object from
596/// [`NetDirProvider`] and other APIs, to specify how recent the information
597/// must be in order to be useful.
598#[derive(Copy, Clone, Eq, PartialEq, Debug)]
599#[allow(clippy::exhaustive_enums)]
600pub enum Timeliness {
601 /// The network directory must be strictly timely.
602 ///
603 /// That is, it must be based on a consensus that valid right now, with no
604 /// tolerance for skew or consensus problems.
605 ///
606 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
607 Strict,
608 /// The network directory must be roughly timely.
609 ///
610 /// This is, it must be be based on a consensus that is not _too_ far in the
611 /// future, and not _too_ far in the past.
612 ///
613 /// (The tolerances for "too far" will depend on configuration.)
614 ///
615 /// This is almost always the option that you want to use.
616 Timely,
617 /// Any network directory is permissible, regardless of how untimely.
618 ///
619 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
620 Unchecked,
621}
622
623/// An object that can provide [`NetDir`]s, as well as inform consumers when
624/// they might have changed.
625///
626/// It is the responsibility of the implementor of `NetDirProvider`
627/// to try to obtain an up-to-date `NetDir`,
628/// and continuously to maintain and update it.
629///
630/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
631/// as its `NetDirProvider`.
632#[async_trait]
633pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
634 /// Return a network directory that's live according to the provided
635 /// `timeliness`.
636 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
637
638 /// Return a reasonable netdir for general usage.
639 ///
640 /// This is an alias for
641 /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
642 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
643 self.netdir(Timeliness::Timely)
644 }
645
646 /// Return a new asynchronous stream that will receive notification
647 /// whenever the consensus has changed.
648 ///
649 /// Multiple events may be batched up into a single item: each time
650 /// this stream yields an event, all you can assume is that the event has
651 /// occurred at least once.
652 fn events(&self) -> BoxStream<'static, DirEvent>;
653
654 /// Return the latest network parameters.
655 ///
656 /// If we have no directory, return a reasonable set of defaults.
657 fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
658
659 /// Get a NetDir from `provider`, waiting until one exists.
660 async fn wait_for_netdir(
661 &self,
662 timeliness: Timeliness,
663 ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
664 if let Ok(nd) = self.netdir(timeliness) {
665 return Ok(nd);
666 }
667
668 let mut stream = self.events();
669 loop {
670 // We need to retry `self.netdir()` before waiting for any stream events, to
671 // avoid deadlock.
672 //
673 // We ignore all errors here: they can all potentially be fixed by
674 // getting a fresh consensus, and they will all get warned about
675 // by the NetDirProvider itself.
676 if let Ok(nd) = self.netdir(timeliness) {
677 return Ok(nd);
678 }
679 match stream.next().await {
680 Some(_) => {}
681 None => {
682 return Err(NetdirProviderShutdown);
683 }
684 }
685 }
686 }
687
688 /// Wait until `provider` lists `target`.
689 ///
690 /// NOTE: This might potentially wait indefinitely, if `target` is never actually
691 /// becomes listed in the directory. It will exit if the `NetDirProvider` shuts down.
692 async fn wait_for_netdir_to_list(
693 &self,
694 target: &tor_linkspec::RelayIds,
695 timeliness: Timeliness,
696 ) -> std::result::Result<(), NetdirProviderShutdown> {
697 let mut events = self.events();
698 loop {
699 // See if the desired relay is in the netdir.
700 //
701 // We do this before waiting for any events, to avoid race conditions.
702 {
703 let netdir = self.wait_for_netdir(timeliness).await?;
704 if netdir.ids_listed(target) == Some(true) {
705 return Ok(());
706 }
707 // If we reach this point, then ids_listed returned `Some(false)`,
708 // meaning "This relay is definitely not in the current directory";
709 // or it returned `None`, meaning "waiting for more information
710 // about this network directory.
711 // In both cases, it's reasonable to just wait for another netdir
712 // event and try again.
713 }
714 // We didn't find the relay; wait for the provider to have a new netdir
715 // or more netdir information.
716 if events.next().await.is_none() {
717 // The event stream is closed; the provider has shut down.
718 return Err(NetdirProviderShutdown);
719 }
720 }
721 }
722
723 /// Return the latest set of recommended and required protocols, if there is one.
724 ///
725 /// This may be more recent (or more available) than this provider's associated NetDir.
726 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
727}
728
729impl<T> NetDirProvider for Arc<T>
730where
731 T: NetDirProvider,
732{
733 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
734 self.deref().netdir(timeliness)
735 }
736
737 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
738 self.deref().timely_netdir()
739 }
740
741 fn events(&self) -> BoxStream<'static, DirEvent> {
742 self.deref().events()
743 }
744
745 fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
746 self.deref().params()
747 }
748
749 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
750 self.deref().protocol_statuses()
751 }
752}
753
754/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
755/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
756///
757/// This trait exists to work around a limitation in rust: when trait upcasting
758/// coercion is stable, this will be unnecessary.
759///
760/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
761pub trait UpcastArcNetDirProvider {
762 /// Return a view of this object as an `Arc<dyn NetDirProvider>`
763 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
764 where
765 Self: 'a;
766}
767
768impl<T> UpcastArcNetDirProvider for T
769where
770 T: NetDirProvider + Sized,
771{
772 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
773 where
774 Self: 'a,
775 {
776 self
777 }
778}
779
780impl AsRef<NetParameters> for NetDir {
781 fn as_ref(&self) -> &NetParameters {
782 self.params()
783 }
784}
785
786/// A partially build NetDir -- it can't be unwrapped until it has
787/// enough information to build safe paths.
788#[derive(Debug, Clone)]
789pub struct PartialNetDir {
790 /// The netdir that's under construction.
791 netdir: NetDir,
792
793 /// The previous netdir, if we had one
794 ///
795 /// Used as a cache, so we can reuse information
796 #[cfg(feature = "hs-common")]
797 prev_netdir: Option<Arc<NetDir>>,
798}
799
800/// A view of a relay on the Tor network, suitable for building circuits.
801// TODO: This should probably be a more specific struct, with a trait
802// that implements it.
803#[derive(Clone)]
804pub struct Relay<'a> {
805 /// A router descriptor for this relay.
806 rs: &'a netstatus::MdRouterStatus,
807 /// A microdescriptor for this relay.
808 md: &'a Microdesc,
809 /// The country code this relay is in, if we know one.
810 #[cfg(feature = "geoip")]
811 cc: Option<CountryCode>,
812}
813
814/// A relay that we haven't checked for validity or usability in
815/// routing.
816#[derive(Debug)]
817pub struct UncheckedRelay<'a> {
818 /// A router descriptor for this relay.
819 rs: &'a netstatus::MdRouterStatus,
820 /// A microdescriptor for this relay, if there is one.
821 md: Option<&'a Microdesc>,
822 /// The country code this relay is in, if we know one.
823 #[cfg(feature = "geoip")]
824 cc: Option<CountryCode>,
825}
826
827/// A partial or full network directory that we can download
828/// microdescriptors for.
829pub trait MdReceiver {
830 /// Return an iterator over the digests for all of the microdescriptors
831 /// that this netdir is missing.
832 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
833 /// Add a microdescriptor to this netdir, if it was wanted.
834 ///
835 /// Return true if it was indeed wanted.
836 fn add_microdesc(&mut self, md: Microdesc) -> bool;
837 /// Return the number of missing microdescriptors.
838 fn n_missing(&self) -> usize;
839}
840
841impl PartialNetDir {
842 /// Create a new PartialNetDir with a given consensus, and no
843 /// microdescriptors loaded.
844 ///
845 /// If `replacement_params` is provided, override network parameters from
846 /// the consensus with those from `replacement_params`.
847 pub fn new(
848 consensus: MdConsensus,
849 replacement_params: Option<&netstatus::NetParams<i32>>,
850 ) -> Self {
851 Self::new_inner(
852 consensus,
853 replacement_params,
854 #[cfg(feature = "geoip")]
855 None,
856 )
857 }
858
859 /// Create a new PartialNetDir with GeoIP support.
860 ///
861 /// This does the same thing as `new()`, except the provided GeoIP database is used to add
862 /// country codes to relays.
863 #[cfg(feature = "geoip")]
864 pub fn new_with_geoip(
865 consensus: MdConsensus,
866 replacement_params: Option<&netstatus::NetParams<i32>>,
867 geoip_db: &GeoipDb,
868 ) -> Self {
869 Self::new_inner(consensus, replacement_params, Some(geoip_db))
870 }
871
872 /// Implementation of the `new()` functions.
873 fn new_inner(
874 consensus: MdConsensus,
875 replacement_params: Option<&netstatus::NetParams<i32>>,
876 #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
877 ) -> Self {
878 let mut params = NetParameters::default();
879
880 // (We ignore unrecognized options here, since they come from
881 // the consensus, and we don't expect to recognize everything
882 // there.)
883 let _ = params.saturating_update(consensus.params().iter());
884
885 // Now see if the user has any parameters to override.
886 // (We have to do this now, or else changes won't be reflected in our
887 // weights.)
888 if let Some(replacement) = replacement_params {
889 for u in params.saturating_update(replacement.iter()) {
890 warn!("Unrecognized option: override_net_params.{}", u);
891 }
892 }
893
894 // Compute the weights we'll want to use for these relays.
895 let weights = weight::WeightSet::from_consensus(&consensus, ¶ms);
896
897 let n_relays = consensus.c_relays().len();
898
899 let rsidx_by_missing = consensus
900 .c_relays()
901 .iter_enumerated()
902 .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
903 .collect();
904
905 let rsidx_by_rsa = consensus
906 .c_relays()
907 .iter_enumerated()
908 .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
909 .collect();
910
911 #[cfg(feature = "geoip")]
912 let country_codes = if let Some(db) = geoip_db {
913 consensus
914 .c_relays()
915 .iter()
916 .map(|rs| {
917 db.lookup_country_code_multi(rs.addrs().map(|x| x.ip()))
918 .cloned()
919 })
920 .collect()
921 } else {
922 Default::default()
923 };
924
925 #[cfg(feature = "hs-common")]
926 let hsdir_rings = Arc::new({
927 let params = HsDirParams::compute(&consensus, ¶ms).expect("Invalid consensus!");
928 // TODO: It's a bit ugly to use expect above, but this function does
929 // not return a Result. On the other hand, the error conditions under which
930 // HsDirParams::compute can return Err are _very_ narrow and hard to
931 // hit; see documentation in that function. As such, we probably
932 // don't need to have this return a Result.
933
934 params.map(HsDirRing::empty_from_params)
935 });
936
937 let netdir = NetDir {
938 consensus: Arc::new(consensus),
939 params,
940 mds: vec![None; n_relays].into(),
941 rsidx_by_missing,
942 rsidx_by_rsa: Arc::new(rsidx_by_rsa),
943 rsidx_by_ed: HashMap::with_capacity(n_relays),
944 #[cfg(feature = "hs-common")]
945 hsdir_rings,
946 weights,
947 #[cfg(feature = "geoip")]
948 country_codes,
949 };
950
951 PartialNetDir {
952 netdir,
953 #[cfg(feature = "hs-common")]
954 prev_netdir: None,
955 }
956 }
957
958 /// Return the declared lifetime of this PartialNetDir.
959 pub fn lifetime(&self) -> &netstatus::Lifetime {
960 self.netdir.lifetime()
961 }
962
963 /// Record a previous netdir, which can be used for reusing cached information
964 //
965 // Fills in as many missing microdescriptors as possible in this
966 // netdir, using the microdescriptors from the previous netdir.
967 //
968 // With HS enabled, stores the netdir for reuse of relay hash ring index values.
969 #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
970 pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
971 for md in prev.mds.iter().flatten() {
972 self.netdir.add_arc_microdesc(md.clone());
973 }
974
975 #[cfg(feature = "hs-common")]
976 {
977 self.prev_netdir = Some(prev);
978 }
979 }
980
981 /// Compute the hash ring(s) for this NetDir
982 #[cfg(feature = "hs-common")]
983 fn compute_rings(&mut self) {
984 let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
985 .expect("Invalid consensus");
986 // TODO: see TODO by similar expect in new()
987
988 self.netdir.hsdir_rings =
989 Arc::new(params.map(|params| {
990 HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
991 }));
992 }
993
994 /// Return true if this are enough information in this directory
995 /// to build multihop paths.
996 pub fn have_enough_paths(&self) -> bool {
997 self.netdir.have_enough_paths()
998 }
999 /// If this directory has enough information to build multihop
1000 /// circuits, return it.
1001 pub fn unwrap_if_sufficient(
1002 #[allow(unused_mut)] mut self,
1003 ) -> std::result::Result<NetDir, PartialNetDir> {
1004 if self.netdir.have_enough_paths() {
1005 #[cfg(feature = "hs-common")]
1006 self.compute_rings();
1007 Ok(self.netdir)
1008 } else {
1009 Err(self)
1010 }
1011 }
1012}
1013
1014impl MdReceiver for PartialNetDir {
1015 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1016 self.netdir.missing_microdescs()
1017 }
1018 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1019 self.netdir.add_microdesc(md)
1020 }
1021 fn n_missing(&self) -> usize {
1022 self.netdir.n_missing()
1023 }
1024}
1025
1026impl NetDir {
1027 /// Return the declared lifetime of this NetDir.
1028 pub fn lifetime(&self) -> &netstatus::Lifetime {
1029 self.consensus.lifetime()
1030 }
1031
1032 /// Add `md` to this NetDir.
1033 ///
1034 /// Return true if we wanted it, and false otherwise.
1035 fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1036 if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1037 assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1038
1039 // There should never be two approved MDs in the same
1040 // consensus listing the same ID... but if there is,
1041 // we'll let the most recent one win.
1042 self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1043
1044 // Happy path: we did indeed want this one.
1045 self.mds[rsidx] = Some(md);
1046
1047 // Save some space in the missing-descriptor list.
1048 if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1049 self.rsidx_by_missing.shrink_to_fit();
1050 }
1051
1052 return true;
1053 }
1054
1055 // Either we already had it, or we never wanted it at all.
1056 false
1057 }
1058
1059 /// Construct a (possibly invalid) Relay object from a routerstatus and its
1060 /// index within the consensus.
1061 fn relay_from_rs_and_rsidx<'a>(
1062 &'a self,
1063 rs: &'a netstatus::MdRouterStatus,
1064 rsidx: RouterStatusIdx,
1065 ) -> UncheckedRelay<'a> {
1066 debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1067 let md = self.mds[rsidx].as_deref();
1068 if let Some(md) = md {
1069 debug_assert_eq!(rs.md_digest(), md.digest());
1070 }
1071
1072 UncheckedRelay {
1073 rs,
1074 md,
1075 #[cfg(feature = "geoip")]
1076 cc: self.country_codes.get(rsidx.0).copied().flatten(),
1077 }
1078 }
1079
1080 /// Return the value of the hsdir_n_replicas param.
1081 #[cfg(feature = "hs-common")]
1082 fn n_replicas(&self) -> u8 {
1083 self.params
1084 .hsdir_n_replicas
1085 .get()
1086 .try_into()
1087 .expect("BoundedInt did not enforce bounds")
1088 }
1089
1090 /// Return the spread parameter for the specified `op`.
1091 #[cfg(feature = "hs-common")]
1092 fn spread(&self, op: HsDirOp) -> usize {
1093 let spread = match op {
1094 HsDirOp::Download => self.params.hsdir_spread_fetch,
1095 #[cfg(feature = "hs-service")]
1096 HsDirOp::Upload => self.params.hsdir_spread_store,
1097 };
1098
1099 spread
1100 .get()
1101 .try_into()
1102 .expect("BoundedInt did not enforce bounds!")
1103 }
1104
1105 /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1106 ///
1107 /// Algorithm:
1108 ///
1109 /// for idx in 1..=n_replicas:
1110 /// - let H = hsdir_ring::onion_service_index(id, replica, rand,
1111 /// period).
1112 /// - Find the position of H within hsdir_ring.
1113 /// - Take elements from hsdir_ring starting at that position,
1114 /// adding them to Dirs until we have added `spread` new elements
1115 /// that were not there before.
1116 #[cfg(feature = "hs-common")]
1117 fn select_hsdirs<'h, 'r: 'h>(
1118 &'r self,
1119 hsid: HsBlindId,
1120 ring: &'h HsDirRing,
1121 spread: usize,
1122 ) -> impl Iterator<Item = Relay<'r>> + 'h {
1123 let n_replicas = self.n_replicas();
1124
1125 (1..=n_replicas) // 1-indexed !
1126 .flat_map({
1127 let mut selected_nodes = HashSet::new();
1128
1129 move |replica: u8| {
1130 let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1131
1132 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1133 // According to rend-spec 2.2.3:
1134 // ... If any of those
1135 // nodes have already been selected for a lower-numbered replica of the
1136 // service, any nodes already chosen are disregarded (i.e. skipped over)
1137 // when choosing a replica's hsdir_spread_store nodes.
1138 selected_nodes.insert(*hsdir_idx)
1139 })
1140 .collect::<Vec<_>>()
1141 }
1142 })
1143 .filter_map(move |(_hsdir_idx, rs_idx)| {
1144 // This ought not to be None but let's not panic or bail if it is
1145 self.relay_by_rs_idx(*rs_idx)
1146 })
1147 }
1148
1149 /// Replace the overridden parameters in this netdir with `new_replacement`.
1150 ///
1151 /// After this function is done, the netdir's parameters will be those in
1152 /// the consensus, overridden by settings from `new_replacement`. Any
1153 /// settings in the old replacement parameters will be discarded.
1154 pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1155 // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1156 let mut new_params = NetParameters::default();
1157 let _ = new_params.saturating_update(self.consensus.params().iter());
1158 for u in new_params.saturating_update(new_replacement.iter()) {
1159 warn!("Unrecognized option: override_net_params.{}", u);
1160 }
1161
1162 self.params = new_params;
1163 }
1164
1165 /// Return an iterator over all Relay objects, including invalid ones
1166 /// that we can't use.
1167 pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1168 // TODO: I'd like if we could memoize this so we don't have to
1169 // do so many hashtable lookups.
1170 self.c_relays()
1171 .iter_enumerated()
1172 .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1173 }
1174 /// Return an iterator over all [usable](NetDir#usable) Relays.
1175 pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1176 self.all_relays().filter_map(UncheckedRelay::into_relay)
1177 }
1178
1179 /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1180 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1181 pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1182 self.mds.get(rsidx)?.as_deref()
1183 }
1184
1185 /// Return a relay matching a given identity, if we have a
1186 /// _usable_ relay with that key.
1187 ///
1188 /// (Does not return [unusable](NetDir#usable) relays.)
1189 ///
1190 ///
1191 /// Note that a `None` answer is not always permanent: if a microdescriptor
1192 /// is subsequently added for a relay with this ID, the ID may become usable
1193 /// even if it was not usable before.
1194 pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1195 where
1196 T: Into<RelayIdRef<'a>>,
1197 {
1198 let id = id.into();
1199 let answer = match id {
1200 RelayIdRef::Ed25519(ed25519) => {
1201 let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1202 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1203
1204 self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1205 }
1206 RelayIdRef::Rsa(rsa) => self
1207 .by_rsa_id_unchecked(rsa)
1208 .and_then(UncheckedRelay::into_relay)?,
1209 other_type => self.relays().find(|r| r.has_identity(other_type))?,
1210 };
1211 assert!(answer.has_identity(id));
1212 Some(answer)
1213 }
1214
1215 /// Obtain a `Relay` given a `RouterStatusIdx`
1216 ///
1217 /// Differs from `relay_from_rs_and_rsi` as follows:
1218 /// * That function expects the caller to already have an `MdRouterStatus`;
1219 /// it checks with `debug_assert` that the relay in the netdir matches.
1220 /// * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1221 /// * That function returns an `UncheckedRelay`; this one a `Relay`.
1222 ///
1223 /// `None` could be returned here, even with a valid `rsi`,
1224 /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1225 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1226 pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1227 let rs = self.c_relays().get(rs_idx)?;
1228 let md = self.mds.get(rs_idx)?.as_deref();
1229 UncheckedRelay {
1230 rs,
1231 md,
1232 #[cfg(feature = "geoip")]
1233 cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1234 }
1235 .into_relay()
1236 }
1237
1238 /// Return a relay with the same identities as those in `target`, if one
1239 /// exists.
1240 ///
1241 /// Does not return [unusable](NetDir#usable) relays.
1242 ///
1243 /// Note that a negative result from this method is not necessarily permanent:
1244 /// it may be the case that a relay exists,
1245 /// but we don't yet have enough information about it to know all of its IDs.
1246 /// To test whether a relay is *definitely* absent,
1247 /// use [`by_ids_detailed`](Self::by_ids_detailed)
1248 /// or [`ids_listed`](Self::ids_listed).
1249 ///
1250 /// # Limitations
1251 ///
1252 /// This will be very slow if `target` does not have an Ed25519 or RSA
1253 /// identity.
1254 pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1255 where
1256 T: HasRelayIds + ?Sized,
1257 {
1258 let mut identities = target.identities();
1259 // Don't try if there are no identities.
1260 let first_id = identities.next()?;
1261
1262 // Since there is at most one relay with each given ID type,
1263 // we only need to check the first relay we find.
1264 let candidate = self.by_id(first_id)?;
1265 if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1266 Some(candidate)
1267 } else {
1268 None
1269 }
1270 }
1271
1272 /// Check whether there is a relay that has at least one identity from
1273 /// `target`, and which _could_ have every identity from `target`.
1274 /// If so, return such a relay.
1275 ///
1276 /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1277 ///
1278 /// Return `RelayLookupError::Impossible` if we found a relay with at least
1279 /// one identity from `target`, but that relay's other identities contradict
1280 /// what we learned from `target`.
1281 ///
1282 /// Does not return [unusable](NetDir#usable) relays.
1283 ///
1284 /// (This function is only useful if you need to distinguish the
1285 /// "impossible" case from the "no such relay known" case.)
1286 ///
1287 /// # Limitations
1288 ///
1289 /// This will be very slow if `target` does not have an Ed25519 or RSA
1290 /// identity.
1291 //
1292 // TODO HS: This function could use a better name.
1293 //
1294 // TODO: We could remove the feature restriction here once we think this API is
1295 // stable.
1296 #[cfg(feature = "hs-common")]
1297 pub fn by_ids_detailed<T>(
1298 &self,
1299 target: &T,
1300 ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1301 where
1302 T: HasRelayIds + ?Sized,
1303 {
1304 let candidate = target
1305 .identities()
1306 // Find all the relays that share any identity with this set of identities.
1307 .filter_map(|id| self.by_id(id))
1308 // We might find the same relay more than once under a different
1309 // identity, so we remove the duplicates.
1310 //
1311 // Since there is at most one relay per rsa identity per consensus,
1312 // this is a true uniqueness check under current construction rules.
1313 .unique_by(|r| r.rs.rsa_identity())
1314 // If we find two or more distinct relays, then have a contradiction.
1315 .at_most_one()
1316 .map_err(|_| RelayLookupError::Impossible)?;
1317
1318 // If we have no candidate, return None early.
1319 let candidate = match candidate {
1320 Some(relay) => relay,
1321 None => return Ok(None),
1322 };
1323
1324 // Now we know we have a single candidate. Make sure that it does not have any
1325 // identity that does not match the target.
1326 if target
1327 .identities()
1328 .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1329 None => true,
1330 Some(id) => id == wanted_id,
1331 })
1332 {
1333 Ok(Some(candidate))
1334 } else {
1335 Err(RelayLookupError::Impossible)
1336 }
1337 }
1338
1339 /// Return a boolean if this consensus definitely has (or does not have) a
1340 /// relay matching the listed identities.
1341 ///
1342 /// `Some(true)` indicates that the relay exists.
1343 /// `Some(false)` indicates that the relay definitely does not exist.
1344 /// `None` indicates that we can't yet tell whether such a relay exists,
1345 /// due to missing information.
1346 fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1347 let r = self.by_rsa_id_unchecked(rsa_id);
1348 match r {
1349 Some(unchecked) => {
1350 if !unchecked.rs.ed25519_id_is_usable() {
1351 return Some(false);
1352 }
1353 // If md is present, then it's listed iff we have the right
1354 // ed id. Otherwise we don't know if it's listed.
1355 unchecked.md.map(|md| md.ed25519_id() == ed_id)
1356 }
1357 None => {
1358 // Definitely not listed.
1359 Some(false)
1360 }
1361 }
1362 }
1363
1364 /// Check whether a relay exists (or may exist)
1365 /// with the same identities as those in `target`.
1366 ///
1367 /// `Some(true)` indicates that the relay exists.
1368 /// `Some(false)` indicates that the relay definitely does not exist.
1369 /// `None` indicates that we can't yet tell whether such a relay exists,
1370 /// due to missing information.
1371 pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1372 where
1373 T: HasRelayIds + ?Sized,
1374 {
1375 let rsa_id = target.rsa_identity();
1376 let ed25519_id = target.ed_identity();
1377
1378 // TODO: If we later support more identity key types, this will
1379 // become incorrect. This assertion might help us recognize that case.
1380 const _: () = assert!(RelayIdType::COUNT == 2);
1381
1382 match (rsa_id, ed25519_id) {
1383 (Some(r), Some(e)) => self.id_pair_listed(e, r),
1384 (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1385 (None, Some(e)) => {
1386 if self.rsidx_by_ed.contains_key(e) {
1387 Some(true)
1388 } else {
1389 None
1390 }
1391 }
1392 (None, None) => None,
1393 }
1394 }
1395
1396 /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1397 ///
1398 /// This API can be used to find information about a relay that is listed in
1399 /// the current consensus, even if we don't yet have enough information
1400 /// (like a microdescriptor) about the relay to use it.
1401 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1402 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1403 fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1404 let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1405 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1406 assert_eq!(rs.rsa_identity(), rsa_id);
1407 Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1408 }
1409 /// Return the relay with a given RSA identity, if we have one
1410 /// and it is [usable](NetDir#usable).
1411 fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1412 self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1413 }
1414 /// Return true if `rsa_id` is listed in this directory, even if it isn't
1415 /// currently usable.
1416 ///
1417 /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1418 /// directory information.)
1419 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1420 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1421 fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1422 self.by_rsa_id_unchecked(rsa_id).is_some()
1423 }
1424
1425 /// List the hsdirs in this NetDir, that should be in the HSDir rings
1426 ///
1427 /// The results are not returned in any particular order.
1428 #[cfg(feature = "hs-common")]
1429 fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1430 self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1431 let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1432 relay.is_hsdir_for_ring().then_some(())?;
1433 let relay = relay.into_relay()?;
1434 Some((rsidx, relay))
1435 })
1436 }
1437
1438 /// Return the parameters from the consensus, clamped to the
1439 /// correct ranges, with defaults filled in.
1440 ///
1441 /// NOTE: that unsupported parameters aren't returned here; only those
1442 /// values configured in the `params` module are available.
1443 pub fn params(&self) -> &NetParameters {
1444 &self.params
1445 }
1446
1447 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1448 /// network's current requirements and recommendations for the list of
1449 /// protocols that every relay must implement.
1450 //
1451 // TODO HS: I am not sure this is the right API; other alternatives would be:
1452 // * To expose the _required_ relay protocol list instead (since that's all that
1453 // onion service implementations need).
1454 // * To expose the client protocol list as well (for symmetry).
1455 // * To expose the MdConsensus instead (since that's more general, although
1456 // it restricts the future evolution of this API).
1457 //
1458 // I think that this is a reasonably good compromise for now, but I'm going
1459 // to put it behind the `hs-common` feature to give us time to consider more.
1460 #[cfg(feature = "hs-common")]
1461 pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1462 self.consensus.relay_protocol_status()
1463 }
1464
1465 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1466 /// network's current requirements and recommendations for the list of
1467 /// protocols that every relay must implement.
1468 //
1469 // TODO HS: See notes on relay_protocol_status above.
1470 #[cfg(feature = "hs-common")]
1471 pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1472 self.consensus.client_protocol_status()
1473 }
1474
1475 /// Construct a `CircTarget` from an externally provided list of link specifiers,
1476 /// and an externally provided onion key.
1477 ///
1478 /// This method is used in the onion service protocol,
1479 /// where introduction points and rendezvous points are specified using these inputs.
1480 ///
1481 /// This function is a member of `NetDir` so that it can provide a reasonable list of
1482 /// [`Protocols`](tor_protover::Protocols) capabilities for the generated `CircTarget`.
1483 /// It does not (and should not!) look up anything else from the directory.
1484 #[cfg(feature = "hs-common")]
1485 pub fn circ_target_from_verbatim_linkspecs(
1486 &self,
1487 linkspecs: &[tor_linkspec::EncodedLinkSpec],
1488 ntor_onion_key: &curve25519::PublicKey,
1489 ) -> StdResult<VerbatimLinkSpecCircTarget<OwnedCircTarget>, VerbatimCircTargetDecodeError> {
1490 use VerbatimCircTargetDecodeError as E;
1491 use tor_linkspec::CircTarget as _;
1492 use tor_linkspec::decode::Strictness;
1493
1494 let mut bld = OwnedCircTarget::builder();
1495 use tor_error::into_internal;
1496
1497 *bld.chan_target() =
1498 OwnedChanTargetBuilder::from_encoded_linkspecs(Strictness::Standard, linkspecs)?;
1499 let protocols = {
1500 let chan_target = bld.chan_target().build().map_err(into_internal!(
1501 "from_encoded_linkspecs gave an invalid output"
1502 ))?;
1503 match self
1504 .by_ids_detailed(&chan_target)
1505 .map_err(E::ImpossibleIds)?
1506 {
1507 Some(relay) => relay.protovers().clone(),
1508 None => self.relay_protocol_status().required_protocols().clone(),
1509 }
1510 };
1511 bld.protocols(protocols);
1512 bld.ntor_onion_key(*ntor_onion_key);
1513 Ok(VerbatimLinkSpecCircTarget::new(
1514 bld.build()
1515 .map_err(into_internal!("Failed to construct a valid circtarget"))?,
1516 linkspecs.to_vec(),
1517 ))
1518 }
1519
1520 /// Return weighted the fraction of relays we can use. We only
1521 /// consider relays that match the predicate `usable`. We weight
1522 /// this bandwidth according to the provided `role`.
1523 ///
1524 /// If _no_ matching relays in the consensus have a nonzero
1525 /// weighted bandwidth value, we fall back to looking at the
1526 /// unweighted fraction of matching relays.
1527 ///
1528 /// If there are no matching relays in the consensus, we return 0.0.
1529 fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1530 where
1531 F: Fn(&UncheckedRelay<'a>) -> bool,
1532 {
1533 let mut total_weight = 0_u64;
1534 let mut have_weight = 0_u64;
1535 let mut have_count = 0_usize;
1536 let mut total_count = 0_usize;
1537
1538 for r in self.all_relays() {
1539 if !usable(&r) {
1540 continue;
1541 }
1542 let w = self.weights.weight_rs_for_role(r.rs, role);
1543 total_weight += w;
1544 total_count += 1;
1545 if r.is_usable() {
1546 have_weight += w;
1547 have_count += 1;
1548 }
1549 }
1550
1551 if total_weight > 0 {
1552 // The consensus lists some weighted bandwidth so return the
1553 // fraction of the weighted bandwidth for which we have
1554 // descriptors.
1555 (have_weight as f64) / (total_weight as f64)
1556 } else if total_count > 0 {
1557 // The consensus lists no weighted bandwidth for these relays,
1558 // but at least it does list relays. Return the fraction of
1559 // relays for which it we have descriptors.
1560 (have_count as f64) / (total_count as f64)
1561 } else {
1562 // There are no relays of this kind in the consensus. Return
1563 // 0.0, to avoid dividing by zero and giving NaN.
1564 0.0
1565 }
1566 }
1567 /// Return the estimated fraction of possible paths that we have
1568 /// enough microdescriptors to build.
1569 fn frac_usable_paths(&self) -> f64 {
1570 // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1571 // is_flagged_stable() checks here. This will require spec clarification.
1572 let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1573 u.low_level_details().is_suitable_as_guard()
1574 });
1575 let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1576 let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1577 self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1578 } else {
1579 // If there are no exits at all, we use f_m here.
1580 f_m
1581 };
1582 f_g * f_m * f_e
1583 }
1584 /// Return true if there is enough information in this NetDir to build
1585 /// multihop circuits.
1586 fn have_enough_paths(&self) -> bool {
1587 // TODO-A001: This should check for our guards as well, and
1588 // make sure that if they're listed in the consensus, we have
1589 // the descriptors for them.
1590
1591 // If we can build a randomly chosen path with at least this
1592 // probability, we know enough information to participate
1593 // on the network.
1594
1595 let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1596
1597 // What fraction of paths can we build?
1598 let available = self.frac_usable_paths();
1599
1600 available >= min_frac_paths
1601 }
1602 /// Choose a relay at random.
1603 ///
1604 /// Each relay is chosen with probability proportional to its weight
1605 /// in the role `role`, and is only selected if the predicate `usable`
1606 /// returns true for it.
1607 ///
1608 /// This function returns None if (and only if) there are no relays
1609 /// with nonzero weight where `usable` returned true.
1610 //
1611 // TODO this API, with the `usable` closure, invites mistakes where we fail to
1612 // check conditions that are implied by the role we have selected for the relay:
1613 // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1614 // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1615 // be renamed.) -Diziet
1616 pub fn pick_relay<'a, R, P>(
1617 &'a self,
1618 rng: &mut R,
1619 role: WeightRole,
1620 usable: P,
1621 ) -> Option<Relay<'a>>
1622 where
1623 R: rand::Rng,
1624 P: FnMut(&Relay<'a>) -> bool,
1625 {
1626 let relays: Vec<_> = self.relays().filter(usable).collect();
1627 // This algorithm uses rand::distr::WeightedIndex, and uses
1628 // gives O(n) time and space to build the index, plus O(log n)
1629 // sampling time.
1630 //
1631 // We might be better off building a WeightedIndex in advance
1632 // for each `role`, and then sampling it repeatedly until we
1633 // get a relay that satisfies `usable`. Or we might not --
1634 // that depends heavily on the actual particulars of our
1635 // inputs. We probably shouldn't make any changes there
1636 // unless profiling tells us that this function is in a hot
1637 // path.
1638 //
1639 // The C Tor sampling implementation goes through some trouble
1640 // here to try to make its path selection constant-time. I
1641 // believe that there is no actual remotely exploitable
1642 // side-channel here however. It could be worth analyzing in
1643 // the future.
1644 //
1645 // This code will give the wrong result if the total of all weights
1646 // can exceed u64::MAX. We make sure that can't happen when we
1647 // set up `self.weights`.
1648 match relays[..].choose_weighted(rng, |r| self.weights.weight_rs_for_role(r.rs, role)) {
1649 Ok(relay) => Some(relay.clone()),
1650 Err(WeightError::InsufficientNonZero) => {
1651 if relays.is_empty() {
1652 None
1653 } else {
1654 warn!(?self.weights, ?role,
1655 "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1656 relays.len());
1657 relays.choose(rng).cloned()
1658 }
1659 }
1660 Err(e) => {
1661 warn_report!(e, "Unexpected error while sampling a relay");
1662 None
1663 }
1664 }
1665 }
1666
1667 /// Choose `n` relay at random.
1668 ///
1669 /// Each relay is chosen with probability proportional to its weight
1670 /// in the role `role`, and is only selected if the predicate `usable`
1671 /// returns true for it.
1672 ///
1673 /// Relays are chosen without replacement: no relay will be
1674 /// returned twice. Therefore, the resulting vector may be smaller
1675 /// than `n` if we happen to have fewer than `n` appropriate relays.
1676 ///
1677 /// This function returns an empty vector if (and only if) there
1678 /// are no relays with nonzero weight where `usable` returned
1679 /// true.
1680 #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1681 pub fn pick_n_relays<'a, R, P>(
1682 &'a self,
1683 rng: &mut R,
1684 n: usize,
1685 role: WeightRole,
1686 usable: P,
1687 ) -> Vec<Relay<'a>>
1688 where
1689 R: rand::Rng,
1690 P: FnMut(&Relay<'a>) -> bool,
1691 {
1692 let relays: Vec<_> = self.relays().filter(usable).collect();
1693 // NOTE: See discussion in pick_relay().
1694 let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1695 self.weights.weight_rs_for_role(r.rs, role) as f64
1696 }) {
1697 Err(WeightError::InsufficientNonZero) => {
1698 // Too few relays had nonzero weights: return all of those that are okay.
1699 // (This is behavior used to come up with rand 0.9; it no longer does.
1700 // We still detect it.)
1701 let remaining: Vec<_> = relays
1702 .iter()
1703 .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1704 .cloned()
1705 .collect();
1706 if remaining.is_empty() {
1707 warn!(?self.weights, ?role,
1708 "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1709 relays.len());
1710 if relays.len() >= n {
1711 relays.choose_multiple(rng, n).cloned().collect()
1712 } else {
1713 relays
1714 }
1715 } else {
1716 warn!(?self.weights, ?role,
1717 "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1718 remaining.len(), relays.len());
1719 remaining
1720 }
1721 }
1722 Err(e) => {
1723 warn_report!(e, "Unexpected error while sampling a set of relays");
1724 Vec::new()
1725 }
1726 Ok(iter) => {
1727 let selection: Vec<_> = iter.map(Relay::clone).collect();
1728 if selection.len() < n && selection.len() < relays.len() {
1729 warn!(?self.weights, ?role,
1730 "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1731 and having {filtered_len} available after filtering. See bug #1907.",
1732 returned=selection.len(), filtered_len=relays.len());
1733 }
1734 selection
1735 }
1736 };
1737 relays.shuffle(rng);
1738 relays
1739 }
1740
1741 /// Compute the weight with which `relay` will be selected for a given
1742 /// `role`.
1743 pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1744 RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1745 }
1746
1747 /// Compute the total weight with which any relay matching `usable`
1748 /// will be selected for a given `role`.
1749 ///
1750 /// Note: because this function is used to assess the total
1751 /// properties of the consensus, the `usable` predicate takes a
1752 /// [`MdRouterStatus`] rather than a [`Relay`].
1753 pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1754 where
1755 P: Fn(&UncheckedRelay<'_>) -> bool,
1756 {
1757 self.all_relays()
1758 .filter_map(|unchecked| {
1759 if usable(&unchecked) {
1760 Some(RelayWeight(
1761 self.weights.weight_rs_for_role(unchecked.rs, role),
1762 ))
1763 } else {
1764 None
1765 }
1766 })
1767 .sum()
1768 }
1769
1770 /// Compute the weight with which a relay with ID `rsa_id` would be
1771 /// selected for a given `role`.
1772 ///
1773 /// Note that weight returned by this function assumes that the
1774 /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1775 /// then other weight-related functions will call its weight zero.
1776 pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1777 self.by_rsa_id_unchecked(rsa_id)
1778 .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1779 }
1780
1781 /// Return all relays in this NetDir known to be in the same family as
1782 /// `relay`.
1783 ///
1784 /// This list of members will **not** necessarily include `relay` itself.
1785 ///
1786 /// # Limitations
1787 ///
1788 /// Two relays only belong to the same family if _each_ relay
1789 /// claims to share a family with the other. But if we are
1790 /// missing a microdescriptor for one of the relays listed by this
1791 /// relay, we cannot know whether it acknowledges family
1792 /// membership with this relay or not. Therefore, this function
1793 /// can omit family members for which there is not (as yet) any
1794 /// Relay object.
1795 pub fn known_family_members<'a>(
1796 &'a self,
1797 relay: &'a Relay<'a>,
1798 ) -> impl Iterator<Item = Relay<'a>> {
1799 let relay_rsa_id = relay.rsa_id();
1800 relay.md.family().members().filter_map(move |other_rsa_id| {
1801 self.by_rsa_id(other_rsa_id)
1802 .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1803 })
1804 }
1805
1806 /// Return the current hidden service directory "time period".
1807 ///
1808 /// Specifically, this returns the time period that contains the beginning
1809 /// of the validity period of this `NetDir`'s consensus. That time period
1810 /// is the one we use when acting as an hidden service client.
1811 #[cfg(feature = "hs-common")]
1812 pub fn hs_time_period(&self) -> TimePeriod {
1813 self.hsdir_rings.current.time_period()
1814 }
1815
1816 /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1817 ///
1818 /// This includes the current time period (as from
1819 /// [`.hs_time_period`](NetDir::hs_time_period))
1820 /// plus additional time periods that we publish descriptors for when we are
1821 /// acting as a hidden service.
1822 #[cfg(feature = "hs-service")]
1823 pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1824 self.hsdir_rings
1825 .iter()
1826 .map(|r| r.params().clone())
1827 .collect()
1828 }
1829
1830 /// Return the relays in this network directory that will be used as hidden service directories
1831 ///
1832 /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1833 #[cfg(feature = "hs-common")]
1834 pub fn hs_dirs_download<'r, R>(
1835 &'r self,
1836 hsid: HsBlindId,
1837 period: TimePeriod,
1838 rng: &mut R,
1839 ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1840 where
1841 R: rand::Rng,
1842 {
1843 // Algorithm:
1844 //
1845 // 1. Determine which HsDirRing to use, based on the time period.
1846 // 2. Find the shared random value that's associated with that HsDirRing.
1847 // 3. Choose spread = the parameter `hsdir_spread_fetch`
1848 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1849 // 5. Initialize Dirs = []
1850 // 6. for idx in 1..=n_replicas:
1851 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1852 // period).
1853 // - Find the position of H within hsdir_ring.
1854 // - Take elements from hsdir_ring starting at that position,
1855 // adding them to Dirs until we have added `spread` new elements
1856 // that were not there before.
1857 // 7. Shuffle Dirs
1858 // 8. return Dirs.
1859
1860 let spread = self.spread(HsDirOp::Download);
1861
1862 // When downloading, only look at relays on current ring.
1863 let ring = &self.hsdir_rings.current;
1864
1865 if ring.params().time_period != period {
1866 return Err(internal!(
1867 "our current ring is not associated with the requested time period!"
1868 ));
1869 }
1870
1871 let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1872
1873 // When downloading, the order of the returned relays is random.
1874 hs_dirs.shuffle(rng);
1875
1876 Ok(hs_dirs)
1877 }
1878
1879 /// Return the relays in this network directory that will be used as hidden service directories
1880 ///
1881 /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1882 /// given time period.
1883 #[cfg(feature = "hs-service")]
1884 pub fn hs_dirs_upload(
1885 &self,
1886 hsid: HsBlindId,
1887 period: TimePeriod,
1888 ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1889 // Algorithm:
1890 //
1891 // 1. Choose spread = the parameter `hsdir_spread_store`
1892 // 2. Determine which HsDirRing to use, based on the time period.
1893 // 3. Find the shared random value that's associated with that HsDirRing.
1894 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1895 // 5. Initialize Dirs = []
1896 // 6. for idx in 1..=n_replicas:
1897 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1898 // period).
1899 // - Find the position of H within hsdir_ring.
1900 // - Take elements from hsdir_ring starting at that position,
1901 // adding them to Dirs until we have added `spread` new elements
1902 // that were not there before.
1903 // 3. return Dirs.
1904 let spread = self.spread(HsDirOp::Upload);
1905
1906 // For each HsBlindId, determine which HsDirRing to use.
1907 let rings = self
1908 .hsdir_rings
1909 .iter()
1910 .filter_map(move |ring| {
1911 // Make sure the ring matches the TP of the hsid it's matched with.
1912 (ring.params().time_period == period).then_some((ring, hsid, period))
1913 })
1914 .collect::<Vec<_>>();
1915
1916 // The specified period should have an associated ring.
1917 if !rings.iter().any(|(_, _, tp)| *tp == period) {
1918 return Err(internal!(
1919 "the specified time period does not have an associated ring"
1920 ));
1921 };
1922
1923 // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1924 // selecting replicas from each ring.
1925 Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1926 assert_eq!(period, ring.params().time_period());
1927 self.select_hsdirs(hsid, ring, spread)
1928 }))
1929 }
1930
1931 /// Return the relays in this network directory that will be used as hidden service directories
1932 ///
1933 /// Depending on `op`,
1934 /// these are suitable to either store, or retrieve, a
1935 /// given onion service's descriptor at a given time period.
1936 ///
1937 /// When `op` is `Download`, the order is random.
1938 /// When `op` is `Upload`, the order is not specified.
1939 ///
1940 /// Return an error if the time period is not one returned by
1941 /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1942 //
1943 // TODO: make HsDirOp pub(crate) once this is removed
1944 #[cfg(feature = "hs-common")]
1945 #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1946 pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1947 where
1948 R: rand::Rng,
1949 {
1950 // Algorithm:
1951 //
1952 // 1. Determine which HsDirRing to use, based on the time period.
1953 // 2. Find the shared random value that's associated with that HsDirRing.
1954 // 3. Choose spread = the parameter `hsdir_spread_store` or
1955 // `hsdir_spread_fetch` based on `op`.
1956 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1957 // 5. Initialize Dirs = []
1958 // 6. for idx in 1..=n_replicas:
1959 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1960 // period).
1961 // - Find the position of H within hsdir_ring.
1962 // - Take elements from hsdir_ring starting at that position,
1963 // adding them to Dirs until we have added `spread` new elements
1964 // that were not there before.
1965 // 7. return Dirs.
1966 let n_replicas = self
1967 .params
1968 .hsdir_n_replicas
1969 .get()
1970 .try_into()
1971 .expect("BoundedInt did not enforce bounds");
1972
1973 let spread = match op {
1974 HsDirOp::Download => self.params.hsdir_spread_fetch,
1975 #[cfg(feature = "hs-service")]
1976 HsDirOp::Upload => self.params.hsdir_spread_store,
1977 };
1978
1979 let spread = spread
1980 .get()
1981 .try_into()
1982 .expect("BoundedInt did not enforce bounds!");
1983
1984 // TODO: I may be wrong here but I suspect that this function may
1985 // need refactoring so that it does not look at _all_ of the HsDirRings,
1986 // but only at the ones that corresponds to time periods for which
1987 // HsBlindId is valid. Or I could be mistaken, in which case we should
1988 // have a comment to explain why I am, since the logic is subtle.
1989 // (For clients, there is only one ring.) -nickm
1990 //
1991 // (Actually, there is no need to follow through with the above TODO,
1992 // since this function is deprecated, and not used anywhere but the
1993 // tests.)
1994
1995 let mut hs_dirs = self
1996 .hsdir_rings
1997 .iter_for_op(op)
1998 .cartesian_product(1..=n_replicas) // 1-indexed !
1999 .flat_map({
2000 let mut selected_nodes = HashSet::new();
2001
2002 move |(ring, replica): (&HsDirRing, u8)| {
2003 let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
2004
2005 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
2006 // According to rend-spec 2.2.3:
2007 // ... If any of those
2008 // nodes have already been selected for a lower-numbered replica of the
2009 // service, any nodes already chosen are disregarded (i.e. skipped over)
2010 // when choosing a replica's hsdir_spread_store nodes.
2011 selected_nodes.insert(*hsdir_idx)
2012 })
2013 .collect::<Vec<_>>()
2014 }
2015 })
2016 .filter_map(|(_hsdir_idx, rs_idx)| {
2017 // This ought not to be None but let's not panic or bail if it is
2018 self.relay_by_rs_idx(*rs_idx)
2019 })
2020 .collect_vec();
2021
2022 match op {
2023 HsDirOp::Download => {
2024 // When `op` is `Download`, the order is random.
2025 hs_dirs.shuffle(rng);
2026 }
2027 #[cfg(feature = "hs-service")]
2028 HsDirOp::Upload => {
2029 // When `op` is `Upload`, the order is not specified.
2030 }
2031 }
2032
2033 hs_dirs
2034 }
2035}
2036
2037impl MdReceiver for NetDir {
2038 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
2039 Box::new(self.rsidx_by_missing.keys())
2040 }
2041 fn add_microdesc(&mut self, md: Microdesc) -> bool {
2042 self.add_arc_microdesc(Arc::new(md))
2043 }
2044 fn n_missing(&self) -> usize {
2045 self.rsidx_by_missing.len()
2046 }
2047}
2048
2049impl<'a> UncheckedRelay<'a> {
2050 /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
2051 ///
2052 /// Callers should generally avoid using this information directly if they can;
2053 /// it's better to use a higher-level function that exposes semantic information
2054 /// rather than these properties.
2055 pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2056 details::UncheckedRelayDetails(self)
2057 }
2058
2059 /// Return true if this relay is valid and [usable](NetDir#usable).
2060 ///
2061 /// This function should return `true` for every Relay we expose
2062 /// to the user.
2063 pub fn is_usable(&self) -> bool {
2064 // No need to check for 'valid' or 'running': they are implicit.
2065 self.md.is_some() && self.rs.ed25519_id_is_usable()
2066 }
2067 /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2068 pub fn into_relay(self) -> Option<Relay<'a>> {
2069 if self.is_usable() {
2070 Some(Relay {
2071 rs: self.rs,
2072 md: self.md?,
2073 #[cfg(feature = "geoip")]
2074 cc: self.cc,
2075 })
2076 } else {
2077 None
2078 }
2079 }
2080
2081 /// Return true if this relay is a hidden service directory
2082 ///
2083 /// Ie, if it is to be included in the hsdir ring.
2084 #[cfg(feature = "hs-common")]
2085 pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2086 // TODO are there any other flags should we check?
2087 // rend-spec-v3 2.2.3 says just
2088 // "each node listed in the current consensus with the HSDir flag"
2089 // Do we need to check ed25519_id_is_usable ?
2090 // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2091 self.rs.is_flagged_hsdir()
2092 }
2093}
2094
2095impl<'a> Relay<'a> {
2096 /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2097 ///
2098 /// Callers should generally avoid using this information directly if they can;
2099 /// it's better to use a higher-level function that exposes semantic information
2100 /// rather than these properties.
2101 pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2102 details::RelayDetails(self)
2103 }
2104
2105 /// Return the Ed25519 ID for this relay.
2106 pub fn id(&self) -> &Ed25519Identity {
2107 self.md.ed25519_id()
2108 }
2109 /// Return the RsaIdentity for this relay.
2110 pub fn rsa_id(&self) -> &RsaIdentity {
2111 self.rs.rsa_identity()
2112 }
2113
2114 /// Return a reference to this relay's "router status" entry in
2115 /// the consensus.
2116 ///
2117 /// The router status entry contains information about the relay
2118 /// that the authorities voted on directly. For most use cases,
2119 /// you shouldn't need them.
2120 ///
2121 /// This function is only available if the crate was built with
2122 /// its `experimental-api` feature.
2123 #[cfg(feature = "experimental-api")]
2124 pub fn rs(&self) -> &netstatus::MdRouterStatus {
2125 self.rs
2126 }
2127 /// Return a reference to this relay's "microdescriptor" entry in
2128 /// the consensus.
2129 ///
2130 /// A "microdescriptor" is a synopsis of the information about a relay,
2131 /// used to determine its capabilities and route traffic through it.
2132 /// For most use cases, you shouldn't need it.
2133 ///
2134 /// This function is only available if the crate was built with
2135 /// its `experimental-api` feature.
2136 #[cfg(feature = "experimental-api")]
2137 pub fn md(&self) -> &Microdesc {
2138 self.md
2139 }
2140}
2141
2142/// An error value returned from [`NetDir::by_ids_detailed`].
2143#[cfg(feature = "hs-common")]
2144#[derive(Clone, Debug, thiserror::Error)]
2145#[non_exhaustive]
2146pub enum RelayLookupError {
2147 /// We found a relay whose presence indicates that the provided set of
2148 /// identities is impossible to resolve.
2149 #[error("Provided set of identities is impossible according to consensus.")]
2150 Impossible,
2151}
2152
2153impl<'a> HasAddrs for Relay<'a> {
2154 fn addrs(&self) -> impl Iterator<Item = std::net::SocketAddr> {
2155 self.rs.addrs()
2156 }
2157}
2158#[cfg(feature = "geoip")]
2159impl<'a> HasCountryCode for Relay<'a> {
2160 fn country_code(&self) -> Option<CountryCode> {
2161 self.cc
2162 }
2163}
2164impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2165 fn ed_identity(&self) -> &Ed25519Identity {
2166 self.id()
2167 }
2168 fn rsa_identity(&self) -> &RsaIdentity {
2169 self.rsa_id()
2170 }
2171}
2172
2173impl<'a> HasRelayIds for UncheckedRelay<'a> {
2174 fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2175 match key_type {
2176 RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2177 self.md.map(|m| m.ed25519_id().into())
2178 }
2179 RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2180 _ => None,
2181 }
2182 }
2183}
2184#[cfg(feature = "geoip")]
2185impl<'a> HasCountryCode for UncheckedRelay<'a> {
2186 fn country_code(&self) -> Option<CountryCode> {
2187 self.cc
2188 }
2189}
2190
2191impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2192impl<'a> ChanTarget for Relay<'a> {}
2193
2194impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2195 fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2196 self.md.ntor_key()
2197 }
2198 fn protovers(&self) -> &tor_protover::Protocols {
2199 self.rs.protovers()
2200 }
2201}
2202
2203#[cfg(test)]
2204mod test {
2205 // @@ begin test lint list maintained by maint/add_warning @@
2206 #![allow(clippy::bool_assert_comparison)]
2207 #![allow(clippy::clone_on_copy)]
2208 #![allow(clippy::dbg_macro)]
2209 #![allow(clippy::mixed_attributes_style)]
2210 #![allow(clippy::print_stderr)]
2211 #![allow(clippy::print_stdout)]
2212 #![allow(clippy::single_char_pattern)]
2213 #![allow(clippy::unwrap_used)]
2214 #![allow(clippy::unchecked_time_subtraction)]
2215 #![allow(clippy::useless_vec)]
2216 #![allow(clippy::needless_pass_by_value)]
2217 //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2218 #![allow(clippy::cognitive_complexity)]
2219 use super::*;
2220 use crate::testnet::*;
2221 use float_eq::assert_float_eq;
2222 use std::collections::HashSet;
2223 use std::time::Duration;
2224 use tor_basic_utils::test_rng::{self, testing_rng};
2225 use tor_linkspec::{RelayIdType, RelayIds};
2226
2227 #[cfg(feature = "hs-common")]
2228 fn dummy_hs_blind_id() -> HsBlindId {
2229 let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2230 let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2231 HsBlindId::from(hsid)
2232 }
2233
2234 // Basic functionality for a partial netdir: Add microdescriptors,
2235 // then you have a netdir.
2236 #[test]
2237 fn partial_netdir() {
2238 let (consensus, microdescs) = construct_network().unwrap();
2239 let dir = PartialNetDir::new(consensus, None);
2240
2241 // Check the lifetime
2242 let lifetime = dir.lifetime();
2243 assert_eq!(
2244 lifetime
2245 .valid_until()
2246 .duration_since(lifetime.valid_after())
2247 .unwrap(),
2248 Duration::new(86400, 0)
2249 );
2250
2251 // No microdescriptors, so we don't have enough paths, and can't
2252 // advance.
2253 assert!(!dir.have_enough_paths());
2254 let mut dir = match dir.unwrap_if_sufficient() {
2255 Ok(_) => panic!(),
2256 Err(d) => d,
2257 };
2258
2259 let missing: HashSet<_> = dir.missing_microdescs().collect();
2260 assert_eq!(missing.len(), 40);
2261 assert_eq!(missing.len(), dir.netdir.c_relays().len());
2262 for md in µdescs {
2263 assert!(missing.contains(md.digest()));
2264 }
2265
2266 // Now add all the mds and try again.
2267 for md in microdescs {
2268 let wanted = dir.add_microdesc(md);
2269 assert!(wanted);
2270 }
2271
2272 let missing: HashSet<_> = dir.missing_microdescs().collect();
2273 assert!(missing.is_empty());
2274 assert!(dir.have_enough_paths());
2275 let _complete = match dir.unwrap_if_sufficient() {
2276 Ok(d) => d,
2277 Err(_) => panic!(),
2278 };
2279 }
2280
2281 #[test]
2282 fn override_params() {
2283 let (consensus, _microdescs) = construct_network().unwrap();
2284 let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2285 .parse()
2286 .unwrap();
2287 let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2288 let params = &dir.netdir.params;
2289 assert_eq!(params.bw_weight_scale.get(), 2);
2290 assert_eq!(params.circuit_window.get(), 500_i32);
2291
2292 // try again without the override.
2293 let dir = PartialNetDir::new(consensus, None);
2294 let params = &dir.netdir.params;
2295 assert_eq!(params.bw_weight_scale.get(), 1_i32);
2296 assert_eq!(params.circuit_window.get(), 1000_i32);
2297 }
2298
2299 #[test]
2300 fn fill_from_previous() {
2301 let (consensus, microdescs) = construct_network().unwrap();
2302
2303 let mut dir = PartialNetDir::new(consensus.clone(), None);
2304 for md in microdescs.iter().skip(2) {
2305 let wanted = dir.add_microdesc(md.clone());
2306 assert!(wanted);
2307 }
2308 let dir1 = dir.unwrap_if_sufficient().unwrap();
2309 assert_eq!(dir1.missing_microdescs().count(), 2);
2310
2311 let mut dir = PartialNetDir::new(consensus, None);
2312 assert_eq!(dir.missing_microdescs().count(), 40);
2313 dir.fill_from_previous_netdir(Arc::new(dir1));
2314 assert_eq!(dir.missing_microdescs().count(), 2);
2315 }
2316
2317 #[test]
2318 fn path_count() {
2319 let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2320 let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2321
2322 let (consensus, microdescs) = construct_network().unwrap();
2323
2324 let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2325 for (pos, md) in microdescs.iter().enumerate() {
2326 if pos % 7 == 2 {
2327 continue; // skip a few relays.
2328 }
2329 dir.add_microdesc(md.clone());
2330 }
2331 let dir = dir.unwrap_if_sufficient().unwrap();
2332
2333 // We have 40 relays that we know about from the consensus.
2334 assert_eq!(dir.all_relays().count(), 40);
2335
2336 // But only 34 are usable.
2337 assert_eq!(dir.relays().count(), 34);
2338
2339 // For guards: mds 20..=39 correspond to Guard relays.
2340 // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2341 // We skipped 23, 30, and 37. They have bandwidth
2342 // 4000 + 1000 + 8000 = 13_000. So our fractional bandwidth
2343 // should be (110-13)/110.
2344 let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2345 assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2346
2347 // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2348 // We skipped 16, 30, and 37. Per above our fractional bandwidth is
2349 // (110-16)/110.
2350 let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2351 assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2352
2353 // For middles: all relays are middles. We skipped 2, 9, 16,
2354 // 23, 30, and 37. Per above our fractional bandwidth is
2355 // (220-33)/220
2356 let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2357 assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2358
2359 // Multiplying those together, we get the fraction of paths we can
2360 // build at ~0.64052066, which is above the threshold we set above for
2361 // MinPathsForCircsPct.
2362 let f = dir.frac_usable_paths();
2363 assert!((f - 0.64052066).abs() < 0.000001);
2364
2365 // But if we try again with a slightly higher threshold...
2366 let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2367 for (pos, md) in microdescs.into_iter().enumerate() {
2368 if pos % 7 == 2 {
2369 continue; // skip a few relays.
2370 }
2371 dir.add_microdesc(md);
2372 }
2373 assert!(dir.unwrap_if_sufficient().is_err());
2374 }
2375
2376 /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2377 /// iterations, and a tolerance.
2378 ///
2379 /// If the Rng is deterministic (the default), we can use a faster setup,
2380 /// with a higher tolerance and fewer iterations. But if you've explicitly
2381 /// opted into randomization (or are replaying a seed from an earlier
2382 /// randomized test), we give you more iterations and a tighter tolerance.
2383 fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2384 // Use a deterministic RNG if none is specified, since this is slow otherwise.
2385 let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2386 let (iters, tolerance) = match config {
2387 test_rng::Config::Deterministic => (5000, 0.02),
2388 _ => (50000, 0.01),
2389 };
2390 (config.into_rng(), iters, tolerance)
2391 }
2392
2393 #[test]
2394 fn test_pick() {
2395 let (consensus, microdescs) = construct_network().unwrap();
2396 let mut dir = PartialNetDir::new(consensus, None);
2397 for md in microdescs.into_iter() {
2398 let wanted = dir.add_microdesc(md.clone());
2399 assert!(wanted);
2400 }
2401 let dir = dir.unwrap_if_sufficient().unwrap();
2402
2403 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2404
2405 let mut picked = [0_isize; 40];
2406 for _ in 0..total {
2407 let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2408 r.low_level_details().supports_exit_port_ipv4(80)
2409 });
2410 let r = r.unwrap();
2411 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2412 picked[id_byte as usize] += 1;
2413 }
2414 // non-exits should never get picked.
2415 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2416 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2417
2418 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2419
2420 // We didn't we any non-default weights, so the other relays get
2421 // weighted proportional to their bandwidth.
2422 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2423 assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2424 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2425 }
2426
2427 #[test]
2428 fn test_pick_multiple() {
2429 // This is mostly a copy of test_pick, except that it uses
2430 // pick_n_relays to pick several relays at once.
2431
2432 let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2433
2434 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2435
2436 let mut picked = [0_isize; 40];
2437 for _ in 0..total / 4 {
2438 let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2439 r.low_level_details().supports_exit_port_ipv4(80)
2440 });
2441 assert_eq!(relays.len(), 4);
2442 for r in relays {
2443 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2444 picked[id_byte as usize] += 1;
2445 }
2446 }
2447 // non-exits should never get picked.
2448 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2449 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2450
2451 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2452
2453 // We didn't we any non-default weights, so the other relays get
2454 // weighted proportional to their bandwidth.
2455 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2456 assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2457 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2458 }
2459
2460 #[test]
2461 fn subnets() {
2462 let cfg = SubnetConfig::default();
2463
2464 fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2465 cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2466 }
2467
2468 assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2469 assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2470
2471 assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2472
2473 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2474 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2475
2476 let cfg = SubnetConfig {
2477 subnets_family_v4: 32,
2478 subnets_family_v6: 128,
2479 };
2480 assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2481 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2482
2483 assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2484 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2485 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2486
2487 let cfg = SubnetConfig {
2488 subnets_family_v4: 33,
2489 subnets_family_v6: 129,
2490 };
2491 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2492 assert!(!same_net(&cfg, "::", "::"));
2493 }
2494
2495 #[test]
2496 fn subnet_union() {
2497 let cfg1 = SubnetConfig {
2498 subnets_family_v4: 16,
2499 subnets_family_v6: 64,
2500 };
2501 let cfg2 = SubnetConfig {
2502 subnets_family_v4: 24,
2503 subnets_family_v6: 32,
2504 };
2505 let a1 = "1.2.3.4".parse().unwrap();
2506 let a2 = "1.2.10.10".parse().unwrap();
2507
2508 let a3 = "ffff:ffff::7".parse().unwrap();
2509 let a4 = "ffff:ffff:1234::8".parse().unwrap();
2510
2511 assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2512 assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2513
2514 assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2515 assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2516
2517 let cfg_u = cfg1.union(&cfg2);
2518 assert_eq!(
2519 cfg_u,
2520 SubnetConfig {
2521 subnets_family_v4: 16,
2522 subnets_family_v6: 32,
2523 }
2524 );
2525 assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2526 assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2527
2528 assert_eq!(cfg1.union(&cfg1), cfg1);
2529
2530 assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2531 }
2532
2533 #[test]
2534 fn relay_funcs() {
2535 let (consensus, microdescs) = construct_custom_network(
2536 |pos, nb, _| {
2537 if pos == 15 {
2538 nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2539 } else if pos == 20 {
2540 nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2541 }
2542 },
2543 None,
2544 )
2545 .unwrap();
2546 let subnet_config = SubnetConfig::default();
2547 let all_family_info = FamilyRules::all_family_info();
2548 let mut dir = PartialNetDir::new(consensus, None);
2549 for md in microdescs.into_iter() {
2550 let wanted = dir.add_microdesc(md.clone());
2551 assert!(wanted);
2552 }
2553 let dir = dir.unwrap_if_sufficient().unwrap();
2554
2555 // Pick out a few relays by ID.
2556 let k0 = Ed25519Identity::from([0; 32]);
2557 let k1 = Ed25519Identity::from([1; 32]);
2558 let k2 = Ed25519Identity::from([2; 32]);
2559 let k3 = Ed25519Identity::from([3; 32]);
2560 let k10 = Ed25519Identity::from([10; 32]);
2561 let k15 = Ed25519Identity::from([15; 32]);
2562 let k20 = Ed25519Identity::from([20; 32]);
2563
2564 let r0 = dir.by_id(&k0).unwrap();
2565 let r1 = dir.by_id(&k1).unwrap();
2566 let r2 = dir.by_id(&k2).unwrap();
2567 let r3 = dir.by_id(&k3).unwrap();
2568 let r10 = dir.by_id(&k10).unwrap();
2569 let r15 = dir.by_id(&k15).unwrap();
2570 let r20 = dir.by_id(&k20).unwrap();
2571
2572 assert_eq!(r0.id(), &[0; 32].into());
2573 assert_eq!(r0.rsa_id(), &[0; 20].into());
2574 assert_eq!(r1.id(), &[1; 32].into());
2575 assert_eq!(r1.rsa_id(), &[1; 20].into());
2576
2577 assert!(r0.same_relay_ids(&r0));
2578 assert!(r1.same_relay_ids(&r1));
2579 assert!(!r1.same_relay_ids(&r0));
2580
2581 assert!(r0.low_level_details().is_dir_cache());
2582 assert!(!r1.low_level_details().is_dir_cache());
2583 assert!(r2.low_level_details().is_dir_cache());
2584 assert!(!r3.low_level_details().is_dir_cache());
2585
2586 assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2587 assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2588 assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2589 assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2590
2591 assert!(!r0.low_level_details().policies_allow_some_port());
2592 assert!(!r1.low_level_details().policies_allow_some_port());
2593 assert!(!r2.low_level_details().policies_allow_some_port());
2594 assert!(!r3.low_level_details().policies_allow_some_port());
2595 assert!(r10.low_level_details().policies_allow_some_port());
2596
2597 assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2598 assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2599 assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2600 assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2601 assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2602 assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2603 assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2604 assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2605
2606 assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2607 assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2608 assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2609 assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2610 assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2611 assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2612
2613 // Make sure IPv6 families work.
2614 let subnet_config = SubnetConfig {
2615 subnets_family_v4: 128,
2616 subnets_family_v6: 96,
2617 };
2618 assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2619 assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2620
2621 // Make sure that subnet configs can be disabled.
2622 let subnet_config = SubnetConfig {
2623 subnets_family_v4: 255,
2624 subnets_family_v6: 255,
2625 };
2626 assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2627 }
2628
2629 #[test]
2630 fn test_badexit() {
2631 // make a netdir where relays 10-19 are badexit, and everybody
2632 // exits to 443 on IPv6.
2633 use tor_netdoc::types::relay_flags::RelayFlag;
2634 let netdir = construct_custom_netdir(|pos, nb, _| {
2635 if (10..20).contains(&pos) {
2636 nb.rs.add_flags(RelayFlag::BadExit);
2637 }
2638 nb.md.parse_ipv6_policy("accept 443").unwrap();
2639 })
2640 .unwrap()
2641 .unwrap_if_sufficient()
2642 .unwrap();
2643
2644 let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2645 let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2646
2647 assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2648 assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2649
2650 assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2651 assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2652 assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2653
2654 assert!(!e12.low_level_details().policies_allow_some_port());
2655 assert!(e32.low_level_details().policies_allow_some_port());
2656
2657 assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2658 assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2659 assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2660 assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2661
2662 assert!(
2663 e12.low_level_details()
2664 .ipv4_declared_policy()
2665 .allows_some_port()
2666 );
2667 assert!(
2668 e12.low_level_details()
2669 .ipv6_declared_policy()
2670 .allows_some_port()
2671 );
2672 }
2673
2674 #[cfg(feature = "experimental-api")]
2675 #[test]
2676 fn test_accessors() {
2677 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2678
2679 let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2680 let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2681
2682 assert!(!r4.md().ipv4_policy().allows_some_port());
2683 assert!(r16.md().ipv4_policy().allows_some_port());
2684
2685 assert!(!r4.rs().is_flagged_exit());
2686 assert!(r16.rs().is_flagged_exit());
2687 }
2688
2689 #[test]
2690 fn test_by_id() {
2691 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2692 let netdir = construct_custom_netdir(|pos, nb, _| {
2693 nb.omit_md = pos == 13;
2694 })
2695 .unwrap();
2696
2697 let netdir = netdir.unwrap_if_sufficient().unwrap();
2698
2699 let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2700 assert_eq!(r.id().as_bytes(), &[0; 32]);
2701
2702 assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2703
2704 let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2705 assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2706 assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2707
2708 assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2709
2710 assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2711 assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2712
2713 let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2714 assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2715 assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2716
2717 let pair_13_13 = RelayIds::builder()
2718 .ed_identity([13; 32].into())
2719 .rsa_identity([13; 20].into())
2720 .build()
2721 .unwrap();
2722 let pair_14_14 = RelayIds::builder()
2723 .ed_identity([14; 32].into())
2724 .rsa_identity([14; 20].into())
2725 .build()
2726 .unwrap();
2727 let pair_14_99 = RelayIds::builder()
2728 .ed_identity([14; 32].into())
2729 .rsa_identity([99; 20].into())
2730 .build()
2731 .unwrap();
2732
2733 let r = netdir.by_ids(&pair_13_13);
2734 assert!(r.is_none());
2735 let r = netdir.by_ids(&pair_14_14).unwrap();
2736 assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2737 assert_eq!(
2738 r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2739 &[14; 32]
2740 );
2741 let r = netdir.by_ids(&pair_14_99);
2742 assert!(r.is_none());
2743
2744 assert_eq!(
2745 netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2746 None
2747 );
2748 assert_eq!(
2749 netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2750 Some(true)
2751 );
2752 assert_eq!(
2753 netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2754 Some(false)
2755 );
2756 }
2757
2758 #[test]
2759 #[cfg(feature = "hs-common")]
2760 fn test_by_ids_detailed() {
2761 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2762 let netdir = construct_custom_netdir(|pos, nb, _| {
2763 nb.omit_md = pos == 13;
2764 })
2765 .unwrap();
2766
2767 let netdir = netdir.unwrap_if_sufficient().unwrap();
2768
2769 let id13_13 = RelayIds::builder()
2770 .ed_identity([13; 32].into())
2771 .rsa_identity([13; 20].into())
2772 .build()
2773 .unwrap();
2774 let id15_15 = RelayIds::builder()
2775 .ed_identity([15; 32].into())
2776 .rsa_identity([15; 20].into())
2777 .build()
2778 .unwrap();
2779 let id15_99 = RelayIds::builder()
2780 .ed_identity([15; 32].into())
2781 .rsa_identity([99; 20].into())
2782 .build()
2783 .unwrap();
2784 let id99_15 = RelayIds::builder()
2785 .ed_identity([99; 32].into())
2786 .rsa_identity([15; 20].into())
2787 .build()
2788 .unwrap();
2789 let id99_99 = RelayIds::builder()
2790 .ed_identity([99; 32].into())
2791 .rsa_identity([99; 20].into())
2792 .build()
2793 .unwrap();
2794 let id15_xx = RelayIds::builder()
2795 .ed_identity([15; 32].into())
2796 .build()
2797 .unwrap();
2798 let idxx_15 = RelayIds::builder()
2799 .rsa_identity([15; 20].into())
2800 .build()
2801 .unwrap();
2802
2803 assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2804 assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2805 assert!(matches!(
2806 netdir.by_ids_detailed(&id15_99),
2807 Err(RelayLookupError::Impossible)
2808 ));
2809 assert!(matches!(
2810 netdir.by_ids_detailed(&id99_15),
2811 Err(RelayLookupError::Impossible)
2812 ));
2813 assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2814 assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2815 assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2816 }
2817
2818 #[test]
2819 fn weight_type() {
2820 let r0 = RelayWeight(0);
2821 let r100 = RelayWeight(100);
2822 let r200 = RelayWeight(200);
2823 let r300 = RelayWeight(300);
2824 assert_eq!(r100 + r200, r300);
2825 assert_eq!(r100.checked_div(r200), Some(0.5));
2826 assert!(r100.checked_div(r0).is_none());
2827 assert_eq!(r200.ratio(0.5), Some(r100));
2828 assert!(r200.ratio(-1.0).is_none());
2829 }
2830
2831 #[test]
2832 fn weight_accessors() {
2833 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2834 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2835
2836 let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2837 // This is just the total guard weight, since all our Wxy = 1.
2838 assert_eq!(g_total, RelayWeight(110_000));
2839
2840 let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2841 assert_eq!(g_total, RelayWeight(0));
2842
2843 let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2844 assert!(relay.rs.is_flagged_guard());
2845 let w = netdir.relay_weight(&relay, WeightRole::Guard);
2846 assert_eq!(w, RelayWeight(6_000));
2847
2848 let w = netdir
2849 .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2850 .unwrap();
2851 assert_eq!(w, RelayWeight(4_000));
2852
2853 assert!(
2854 netdir
2855 .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2856 .is_none()
2857 );
2858 }
2859
2860 #[test]
2861 fn family_list() {
2862 let netdir = construct_custom_netdir(|pos, n, _| {
2863 if pos == 0x0a {
2864 n.md.family(
2865 "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2866 $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2867 $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2868 .parse()
2869 .unwrap(),
2870 );
2871 } else if pos == 0x0c {
2872 n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2873 }
2874 })
2875 .unwrap()
2876 .unwrap_if_sufficient()
2877 .unwrap();
2878
2879 // In the testing netdir, adjacent members are in the same family by default...
2880 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2881 let family: Vec<_> = netdir.known_family_members(&r0).collect();
2882 assert_eq!(family.len(), 1);
2883 assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2884
2885 // But we've made this relay claim membership with several others.
2886 let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2887 let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2888 assert_eq!(family.len(), 2);
2889 assert!(family.contains(&Ed25519Identity::from([11; 32])));
2890 assert!(family.contains(&Ed25519Identity::from([12; 32])));
2891 // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2892 // membership with 10.
2893 }
2894 #[test]
2895 #[cfg(feature = "geoip")]
2896 fn relay_has_country_code() {
2897 let src_v6 = r#"
2898 fe80:dead:beef::,fe80:dead:ffff::,US
2899 fe80:feed:eeee::1,fe80:feed:eeee::2,AT
2900 fe80:feed:eeee::2,fe80:feed:ffff::,DE
2901 "#;
2902 let db = GeoipDb::new_from_legacy_format("", src_v6).unwrap();
2903
2904 let netdir = construct_custom_netdir_with_geoip(
2905 |pos, n, _| {
2906 if pos == 0x01 {
2907 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2908 }
2909 if pos == 0x02 {
2910 n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2911 n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2912 }
2913 if pos == 0x03 {
2914 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2915 n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2916 }
2917 },
2918 &db,
2919 )
2920 .unwrap()
2921 .unwrap_if_sufficient()
2922 .unwrap();
2923
2924 // No GeoIP data available -> None
2925 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2926 assert_eq!(r0.cc, None);
2927
2928 // Exactly one match -> Some
2929 let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2930 assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2931
2932 // Conflicting matches -> None
2933 let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2934 assert_eq!(r2.cc, None);
2935
2936 // Multiple agreeing matches -> Some
2937 let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2938 assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2939 }
2940
2941 #[test]
2942 #[cfg(feature = "hs-common")]
2943 #[allow(deprecated)]
2944 fn hs_dirs_selection() {
2945 use tor_basic_utils::test_rng::testing_rng;
2946
2947 const HSDIR_SPREAD_STORE: i32 = 6;
2948 const HSDIR_SPREAD_FETCH: i32 = 2;
2949 const PARAMS: [(&str, i32); 2] = [
2950 ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2951 ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2952 ];
2953
2954 let netdir: Arc<NetDir> =
2955 crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2956 .unwrap()
2957 .unwrap_if_sufficient()
2958 .unwrap()
2959 .into();
2960 let hsid = dummy_hs_blind_id();
2961
2962 const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2963 // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2964 // are only 10 relays with the HsDir flag in the consensus.
2965 #[cfg(feature = "hs-service")]
2966 (HsDirOp::Upload, 10),
2967 (HsDirOp::Download, 4),
2968 ];
2969
2970 for (op, relay_count) in OP_RELAY_COUNT {
2971 let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2972
2973 assert_eq!(relays.len(), *relay_count);
2974
2975 // There should be no duplicates (the filtering function passed to
2976 // HsDirRing::ring_items_at() ensures the relays that are already in use for
2977 // lower-numbered replicas aren't considered a second time for a higher-numbered
2978 // replica).
2979 let unique = relays
2980 .iter()
2981 .map(|relay| relay.ed_identity())
2982 .collect::<HashSet<_>>();
2983 assert_eq!(unique.len(), relays.len());
2984 }
2985
2986 // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2987 // expected relays.
2988 //
2989 // For example, let's say we have the following hsdir ring:
2990 //
2991 // A - B
2992 // / \
2993 // F C
2994 // \ /
2995 // E - D
2996 //
2997 // Let's also assume that:
2998 //
2999 // * hsdir_spread_store = 3
3000 // * the ordering of the relays on the ring is [A, B, C, D, E, F]
3001 //
3002 // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
3003 // relays [E, F, D]. We should have a test that checks this.
3004 }
3005
3006 #[test]
3007 fn zero_weights() {
3008 // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
3009 // in the presence of items whose weight is 0.
3010 //
3011 // We think that the behavior is:
3012 // - An item with weight 0 is never returned.
3013 // - If all items have weight 0, choose_weighted returns an error.
3014 // - If all items have weight 0, choose_multiple_weighted returns an empty list.
3015 // - If we request n items from choose_multiple_weighted,
3016 // but only m<n items have nonzero weight, we return all m of those items.
3017 // - if the request for n items can't be completely satisfied with n items of weight >= 0,
3018 // we get InsufficientNonZero.
3019 let items = vec![1, 2, 3];
3020 let mut rng = testing_rng();
3021
3022 let a = items.choose_weighted(&mut rng, |_| 0);
3023 assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
3024
3025 let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
3026 let xs: Vec<_> = x.unwrap().collect();
3027 assert!(xs.is_empty());
3028
3029 let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
3030 let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
3031 let xs: Vec<_> = x.unwrap().collect();
3032 assert_eq!(&xs[..], &[&1]);
3033
3034 for _ in 0..100 {
3035 let a = items.choose_weighted(&mut rng, only_one);
3036 assert_eq!(a.unwrap(), &1);
3037
3038 let x = items
3039 .choose_multiple_weighted(&mut rng, 1, only_one)
3040 .unwrap()
3041 .collect::<Vec<_>>();
3042 assert_eq!(x, vec![&1]);
3043 }
3044 }
3045
3046 #[test]
3047 fn insufficient_but_nonzero() {
3048 // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
3049 // but there are insufficient values.
3050 // (If this behavior changes, we need to change our usage.)
3051
3052 let items = vec![1, 2, 3];
3053 let mut rng = testing_rng();
3054 let mut a = items
3055 .choose_multiple_weighted(&mut rng, 10, |_| 1)
3056 .unwrap()
3057 .copied()
3058 .collect::<Vec<_>>();
3059 a.sort();
3060 assert_eq!(a, items);
3061 }
3062}