tor_netdir/lib.rs
1#![cfg_attr(docsrs, feature(doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_time_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45#![deny(clippy::unused_async)]
46//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
47
48pub mod details;
49mod err;
50#[cfg(feature = "hs-common")]
51mod hsdir_params;
52#[cfg(feature = "hs-common")]
53mod hsdir_ring;
54pub mod params;
55mod weight;
56
57#[cfg(any(test, feature = "testing"))]
58pub mod testnet;
59#[cfg(feature = "testing")]
60pub mod testprovider;
61
62use async_trait::async_trait;
63#[cfg(feature = "hs-service")]
64use itertools::chain;
65use tor_error::warn_report;
66#[cfg(feature = "hs-common")]
67use tor_linkspec::OwnedCircTarget;
68use tor_linkspec::{
69 ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
70};
71use tor_llcrypto as ll;
72use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
73use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
74use tor_netdoc::doc::netstatus::{self, MdConsensus, MdRouterStatus};
75#[cfg(feature = "hs-common")]
76use {hsdir_ring::HsDirRing, std::iter};
77
78use derive_more::{From, Into};
79use futures::{StreamExt, stream::BoxStream};
80use num_enum::{IntoPrimitive, TryFromPrimitive};
81use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
82use serde::Deserialize;
83use std::collections::HashMap;
84use std::net::IpAddr;
85use std::ops::Deref;
86use std::sync::Arc;
87use std::time::SystemTime;
88use strum::{EnumCount, EnumIter};
89use tracing::warn;
90use typed_index_collections::{TiSlice, TiVec};
91
92#[cfg(feature = "hs-common")]
93use {
94 itertools::Itertools,
95 std::collections::HashSet,
96 std::result::Result as StdResult,
97 tor_error::{Bug, internal},
98 tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
99 tor_linkspec::{OwnedChanTargetBuilder, verbatim::VerbatimLinkSpecCircTarget},
100 tor_llcrypto::pk::curve25519,
101};
102
103pub use err::Error;
104pub use weight::WeightRole;
105/// A Result using the Error type from the tor-netdir crate
106pub type Result<T> = std::result::Result<T, Error>;
107
108#[cfg(feature = "hs-common")]
109pub use err::{OnionDirLookupError, VerbatimCircTargetDecodeError};
110
111use params::NetParameters;
112#[cfg(feature = "geoip")]
113use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
114
115#[cfg(feature = "hs-common")]
116pub use hsdir_params::HsDirParams;
117
118/// Index into the consensus relays
119///
120/// This is an index into the list of relays returned by
121/// [`.c_relays()`](ConsensusRelays::c_relays)
122/// (on the corresponding consensus or netdir).
123///
124/// This is just a `usize` inside, but using a newtype prevents getting a relay index
125/// confused with other kinds of slice indices or counts.
126///
127/// If you are in a part of the code which needs to work with multiple consensuses,
128/// the typechecking cannot tell if you try to index into the wrong consensus.
129#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
130pub(crate) struct RouterStatusIdx(usize);
131
132/// Extension trait to provide index-type-safe `.c_relays()` method
133//
134// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
135// but that would be an API break there.
136pub(crate) trait ConsensusRelays {
137 /// Obtain the list of relays in the consensus
138 //
139 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus>;
140}
141impl ConsensusRelays for MdConsensus {
142 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
143 TiSlice::from_ref(MdConsensus::relays(self))
144 }
145}
146impl ConsensusRelays for NetDir {
147 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
148 self.consensus.c_relays()
149 }
150}
151
152/// Configuration for determining when two relays have addresses "too close" in
153/// the network.
154///
155/// Used by `Relay::low_level_details().in_same_subnet()`.
156#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
157#[serde(deny_unknown_fields)]
158pub struct SubnetConfig {
159 /// Consider IPv4 nodes in the same /x to be the same family.
160 ///
161 /// If this value is 0, all nodes with IPv4 addresses will be in the
162 /// same family. If this value is above 32, then no nodes will be
163 /// placed im the same family based on their IPv4 addresses.
164 subnets_family_v4: u8,
165 /// Consider IPv6 nodes in the same /x to be the same family.
166 ///
167 /// If this value is 0, all nodes with IPv6 addresses will be in the
168 /// same family. If this value is above 128, then no nodes will be
169 /// placed im the same family based on their IPv6 addresses.
170 subnets_family_v6: u8,
171}
172
173impl Default for SubnetConfig {
174 fn default() -> Self {
175 Self::new(16, 32)
176 }
177}
178
179impl SubnetConfig {
180 /// Construct a new SubnetConfig from a pair of bit prefix lengths.
181 ///
182 /// The values are clamped to the appropriate ranges if they are
183 /// out-of-bounds.
184 pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
185 Self {
186 subnets_family_v4,
187 subnets_family_v6,
188 }
189 }
190
191 /// Construct a new SubnetConfig such that addresses are not in the same
192 /// family with anything--not even with themselves.
193 pub fn no_addresses_match() -> SubnetConfig {
194 SubnetConfig {
195 subnets_family_v4: 33,
196 subnets_family_v6: 129,
197 }
198 }
199
200 /// Return true if the two addresses in the same subnet, according to this
201 /// configuration.
202 pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
203 match (a, b) {
204 (IpAddr::V4(a), IpAddr::V4(b)) => {
205 let bits = self.subnets_family_v4;
206 if bits > 32 {
207 return false;
208 }
209 let a = u32::from_be_bytes(a.octets());
210 let b = u32::from_be_bytes(b.octets());
211 (a >> (32 - bits)) == (b >> (32 - bits))
212 }
213 (IpAddr::V6(a), IpAddr::V6(b)) => {
214 let bits = self.subnets_family_v6;
215 if bits > 128 {
216 return false;
217 }
218 let a = u128::from_be_bytes(a.octets());
219 let b = u128::from_be_bytes(b.octets());
220 (a >> (128 - bits)) == (b >> (128 - bits))
221 }
222 _ => false,
223 }
224 }
225
226 /// Return true if any of the addresses in `a` shares a subnet with any of
227 /// the addresses in `b`, according to this configuration.
228 pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
229 where
230 T: tor_linkspec::HasAddrs,
231 U: tor_linkspec::HasAddrs,
232 {
233 a.addrs().any(|aa| {
234 b.addrs()
235 .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
236 })
237 }
238
239 /// Return a new subnet configuration that is the union of `self` and
240 /// `other`.
241 ///
242 /// That is, return a subnet configuration that puts all addresses in the
243 /// same subnet if and only if at least one of `self` and `other` would put
244 /// them in the same subnet.
245 pub fn union(&self, other: &Self) -> Self {
246 use std::cmp::min;
247 Self {
248 subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
249 subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
250 }
251 }
252}
253
254/// Configuration for which listed family information to use when deciding
255/// whether relays belong to the same family.
256///
257/// Derived from network parameters.
258#[derive(Clone, Copy, Debug)]
259pub struct FamilyRules {
260 /// If true, we use family information from lists of family members.
261 use_family_lists: bool,
262 /// If true, we use family information from lists of family IDs and from family certs.
263 use_family_ids: bool,
264}
265
266impl<'a> From<&'a NetParameters> for FamilyRules {
267 fn from(params: &'a NetParameters) -> Self {
268 FamilyRules {
269 use_family_lists: bool::from(params.use_family_lists),
270 use_family_ids: bool::from(params.use_family_ids),
271 }
272 }
273}
274
275impl FamilyRules {
276 /// Return a `FamilyRules` that will use all recognized kinds of family information.
277 pub fn all_family_info() -> Self {
278 Self {
279 use_family_lists: true,
280 use_family_ids: true,
281 }
282 }
283
284 /// Return a `FamilyRules` that will ignore all family information declared by relays.
285 pub fn ignore_declared_families() -> Self {
286 Self {
287 use_family_lists: false,
288 use_family_ids: false,
289 }
290 }
291
292 /// Configure this `FamilyRules` to use (or not use) family information from
293 /// lists of family members.
294 pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
295 self.use_family_lists = val;
296 self
297 }
298
299 /// Configure this `FamilyRules` to use (or not use) family information from
300 /// family IDs and family certs.
301 pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
302 self.use_family_ids = val;
303 self
304 }
305
306 /// Return a `FamilyRules` that will look at every source of information
307 /// requested by `self` or by `other`.
308 pub fn union(&self, other: &Self) -> Self {
309 Self {
310 use_family_lists: self.use_family_lists || other.use_family_lists,
311 use_family_ids: self.use_family_ids || other.use_family_ids,
312 }
313 }
314}
315
316/// An opaque type representing the weight with which a relay or set of
317/// relays will be selected for a given role.
318///
319/// Most users should ignore this type, and just use pick_relay instead.
320#[derive(
321 Copy,
322 Clone,
323 Debug,
324 derive_more::Add,
325 derive_more::Sum,
326 derive_more::AddAssign,
327 Eq,
328 PartialEq,
329 Ord,
330 PartialOrd,
331)]
332pub struct RelayWeight(u64);
333
334impl RelayWeight {
335 /// Try to divide this weight by `rhs`.
336 ///
337 /// Return a ratio on success, or None on division-by-zero.
338 pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
339 if rhs.0 == 0 {
340 None
341 } else {
342 Some((self.0 as f64) / (rhs.0 as f64))
343 }
344 }
345
346 /// Compute a ratio `frac` of this weight.
347 ///
348 /// Return None if frac is less than zero, since negative weights
349 /// are impossible.
350 pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
351 let product = (self.0 as f64) * frac;
352 if product >= 0.0 && product.is_finite() {
353 Some(RelayWeight(product as u64))
354 } else {
355 None
356 }
357 }
358}
359
360impl From<u64> for RelayWeight {
361 fn from(val: u64) -> Self {
362 RelayWeight(val)
363 }
364}
365
366/// An operation for which we might be requesting a hidden service directory.
367#[derive(Copy, Clone, Debug, PartialEq)]
368// TODO: make this pub(crate) once NetDir::hs_dirs is removed
369#[non_exhaustive]
370pub enum HsDirOp {
371 /// Uploading an onion service descriptor.
372 #[cfg(feature = "hs-service")]
373 Upload,
374 /// Downloading an onion service descriptor.
375 Download,
376}
377
378/// A view of the Tor directory, suitable for use in building circuits.
379///
380/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
381/// has its own properties, identity, and correct weighted probability for use
382/// under different circumstances.
383///
384/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
385/// document, and then adding enough microdescriptors to that `PartialNetDir` so
386/// that it can be used to build paths. (Thus, if you have a NetDir, it is
387/// definitely adequate to build paths.)
388///
389/// # "Usable" relays
390///
391/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays. Unless
392/// otherwise stated, a relay is "usable" if it is listed in the consensus,
393/// if we have full directory information for that relay (including a
394/// microdescriptor), and if that relay does not have any flags indicating that
395/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
396///
397/// # Limitations
398///
399/// The current NetDir implementation assumes fairly strongly that every relay
400/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
401/// by RSA identities, and that the Ed25519 identities are stored in
402/// microdescriptors.
403///
404/// If these assumptions someday change, then we'll have to revise the
405/// implementation.
406#[derive(Debug, Clone)]
407pub struct NetDir {
408 /// A microdescriptor consensus that lists the members of the network,
409 /// and maps each one to a 'microdescriptor' that has more information
410 /// about it
411 consensus: Arc<MdConsensus>,
412 /// A map from keys to integer values, distributed in the consensus,
413 /// and clamped to certain defaults.
414 params: NetParameters,
415 /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
416 mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
417 /// Map from SHA256 of _missing_ microdescriptors to the index of their
418 /// corresponding routerstatus.
419 rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
420 /// Map from ed25519 identity to index of the routerstatus.
421 ///
422 /// Note that we don't know the ed25519 identity of a relay until
423 /// we get the microdescriptor for it, so this won't be filled in
424 /// until we get the microdescriptors.
425 ///
426 /// # Implementation note
427 ///
428 /// For this field, and for `rsidx_by_rsa`,
429 /// it might be cool to have references instead.
430 /// But that would make this into a self-referential structure,
431 /// which isn't possible in safe rust.
432 rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
433 /// Map from RSA identity to index of the routerstatus.
434 ///
435 /// This is constructed at the same time as the NetDir object, so it
436 /// can be immutable.
437 rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
438
439 /// Hash ring(s) describing the onion service directory.
440 ///
441 /// This is empty in a PartialNetDir, and is filled in before the NetDir is
442 /// built.
443 //
444 // TODO hs: It is ugly to have this exist in a partially constructed state
445 // in a PartialNetDir.
446 // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
447 // or perhaps nothing at all, here.
448 #[cfg(feature = "hs-common")]
449 hsdir_rings: Arc<HsDirs<HsDirRing>>,
450
451 /// Weight values to apply to a given relay when deciding how frequently
452 /// to choose it for a given role.
453 weights: weight::WeightSet,
454
455 #[cfg(feature = "geoip")]
456 /// Country codes for each router in our consensus.
457 ///
458 /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
459 /// the country code at position zero in this array).
460 country_codes: Vec<Option<CountryCode>>,
461}
462
463/// Collection of hidden service directories (or parameters for them)
464///
465/// In [`NetDir`] this is used to store the actual hash rings.
466/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
467/// where only the `params` are populated, and the `ring` is empty.)
468///
469/// This same generic type is used as the return type from
470/// [`HsDirParams::compute`](HsDirParams::compute),
471/// where it contains the *parameters* for the primary and secondary rings.
472#[derive(Debug, Clone)]
473#[cfg(feature = "hs-common")]
474pub(crate) struct HsDirs<D> {
475 /// The current ring
476 ///
477 /// It corresponds to the time period containing the `valid-after` time in
478 /// the consensus. Its SRV is whatever SRV was most current at the time when
479 /// that time period began.
480 ///
481 /// This is the hash ring that we should use whenever we are fetching an
482 /// onion service descriptor.
483 current: D,
484
485 /// Secondary rings (based on the parameters for the previous and next time periods)
486 ///
487 /// Onion services upload to positions on these ring as well, based on how
488 /// far into the current time period this directory is, so that
489 /// not-synchronized clients can still find their descriptor.
490 ///
491 /// Note that with the current (2023) network parameters, with
492 /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
493 /// secondary rings will be active at a time. We have two here in order
494 /// to conform with a more flexible regime in proposal 342.
495 //
496 // TODO: hs clients never need this; so I've made it not-present for them.
497 // But does that risk too much with respect to side channels?
498 //
499 // TODO: Perhaps we should refactor this so that it is clear that these
500 // are immutable? On the other hand, the documentation for this type
501 // declares that it is immutable, so we are likely okay.
502 //
503 // TODO: this `Vec` is only ever 0,1,2 elements.
504 // Maybe it should be an ArrayVec or something.
505 #[cfg(feature = "hs-service")]
506 secondary: Vec<D>,
507}
508
509#[cfg(feature = "hs-common")]
510impl<D> HsDirs<D> {
511 /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
512 pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
513 HsDirs {
514 current: f(self.current),
515 #[cfg(feature = "hs-service")]
516 secondary: self.secondary.into_iter().map(f).collect(),
517 }
518 }
519
520 /// Iterate over some of the contained hsdirs, according to `secondary`
521 ///
522 /// The current ring is always included.
523 /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
524 fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
525 let i = iter::once(&self.current);
526
527 // With "hs-service" disabled, there are no secondary rings,
528 // so we don't care.
529 let _ = secondary;
530
531 #[cfg(feature = "hs-service")]
532 let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
533
534 i
535 }
536
537 /// Iterate over all the contained hsdirs
538 pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
539 self.iter_filter_secondary(true)
540 }
541
542 /// Iterate over the hsdirs relevant for `op`
543 pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
544 self.iter_filter_secondary(match op {
545 #[cfg(feature = "hs-service")]
546 HsDirOp::Upload => true,
547 HsDirOp::Download => false,
548 })
549 }
550}
551
552/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
553/// the status of its directory.
554#[derive(
555 Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
556)]
557#[non_exhaustive]
558#[repr(u16)]
559pub enum DirEvent {
560 /// A new consensus has been received, and has enough information to be
561 /// used.
562 ///
563 /// This event is also broadcast when a new set of consensus parameters is
564 /// available, even if that set of parameters comes from a configuration
565 /// change rather than from the latest consensus.
566 NewConsensus,
567
568 /// New descriptors have been received for the current consensus.
569 ///
570 /// (This event is _not_ broadcast when receiving new descriptors for a
571 /// consensus which is not yet ready to replace the current consensus.)
572 NewDescriptors,
573
574 /// We have received updated recommendations and requirements
575 /// for which subprotocols we should have to use the network.
576 NewProtocolRecommendation,
577}
578
579/// The network directory provider is shutting down without giving us the
580/// netdir we asked for.
581#[derive(Clone, Copy, Debug, thiserror::Error)]
582#[error("Network directory provider is shutting down")]
583#[non_exhaustive]
584pub struct NetdirProviderShutdown;
585
586impl tor_error::HasKind for NetdirProviderShutdown {
587 fn kind(&self) -> tor_error::ErrorKind {
588 tor_error::ErrorKind::ArtiShuttingDown
589 }
590}
591
592/// How "timely" must a network directory be?
593///
594/// This enum is used as an argument when requesting a [`NetDir`] object from
595/// [`NetDirProvider`] and other APIs, to specify how recent the information
596/// must be in order to be useful.
597#[derive(Copy, Clone, Eq, PartialEq, Debug)]
598#[allow(clippy::exhaustive_enums)]
599pub enum Timeliness {
600 /// The network directory must be strictly timely.
601 ///
602 /// That is, it must be based on a consensus that valid right now, with no
603 /// tolerance for skew or consensus problems.
604 ///
605 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
606 Strict,
607 /// The network directory must be roughly timely.
608 ///
609 /// This is, it must be be based on a consensus that is not _too_ far in the
610 /// future, and not _too_ far in the past.
611 ///
612 /// (The tolerances for "too far" will depend on configuration.)
613 ///
614 /// This is almost always the option that you want to use.
615 Timely,
616 /// Any network directory is permissible, regardless of how untimely.
617 ///
618 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
619 Unchecked,
620}
621
622/// An object that can provide [`NetDir`]s, as well as inform consumers when
623/// they might have changed.
624///
625/// It is the responsibility of the implementor of `NetDirProvider`
626/// to try to obtain an up-to-date `NetDir`,
627/// and continuously to maintain and update it.
628///
629/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
630/// as its `NetDirProvider`.
631#[async_trait]
632pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
633 /// Return a network directory that's live according to the provided
634 /// `timeliness`.
635 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
636
637 /// Return a reasonable netdir for general usage.
638 ///
639 /// This is an alias for
640 /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
641 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
642 self.netdir(Timeliness::Timely)
643 }
644
645 /// Return a new asynchronous stream that will receive notification
646 /// whenever the consensus has changed.
647 ///
648 /// Multiple events may be batched up into a single item: each time
649 /// this stream yields an event, all you can assume is that the event has
650 /// occurred at least once.
651 fn events(&self) -> BoxStream<'static, DirEvent>;
652
653 /// Return the latest network parameters.
654 ///
655 /// If we have no directory, return a reasonable set of defaults.
656 fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
657
658 /// Get a NetDir from `provider`, waiting until one exists.
659 async fn wait_for_netdir(
660 &self,
661 timeliness: Timeliness,
662 ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
663 if let Ok(nd) = self.netdir(timeliness) {
664 return Ok(nd);
665 }
666
667 let mut stream = self.events();
668 loop {
669 // We need to retry `self.netdir()` before waiting for any stream events, to
670 // avoid deadlock.
671 //
672 // We ignore all errors here: they can all potentially be fixed by
673 // getting a fresh consensus, and they will all get warned about
674 // by the NetDirProvider itself.
675 if let Ok(nd) = self.netdir(timeliness) {
676 return Ok(nd);
677 }
678 match stream.next().await {
679 Some(_) => {}
680 None => {
681 return Err(NetdirProviderShutdown);
682 }
683 }
684 }
685 }
686
687 /// Wait until `provider` lists `target`.
688 ///
689 /// NOTE: This might potentially wait indefinitely, if `target` is never actually
690 /// becomes listed in the directory. It will exit if the `NetDirProvider` shuts down.
691 async fn wait_for_netdir_to_list(
692 &self,
693 target: &tor_linkspec::RelayIds,
694 timeliness: Timeliness,
695 ) -> std::result::Result<(), NetdirProviderShutdown> {
696 let mut events = self.events();
697 loop {
698 // See if the desired relay is in the netdir.
699 //
700 // We do this before waiting for any events, to avoid race conditions.
701 {
702 let netdir = self.wait_for_netdir(timeliness).await?;
703 if netdir.ids_listed(target) == Some(true) {
704 return Ok(());
705 }
706 // If we reach this point, then ids_listed returned `Some(false)`,
707 // meaning "This relay is definitely not in the current directory";
708 // or it returned `None`, meaning "waiting for more information
709 // about this network directory.
710 // In both cases, it's reasonable to just wait for another netdir
711 // event and try again.
712 }
713 // We didn't find the relay; wait for the provider to have a new netdir
714 // or more netdir information.
715 if events.next().await.is_none() {
716 // The event stream is closed; the provider has shut down.
717 return Err(NetdirProviderShutdown);
718 }
719 }
720 }
721
722 /// Return the latest set of recommended and required protocols, if there is one.
723 ///
724 /// This may be more recent (or more available) than this provider's associated NetDir.
725 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
726}
727
728impl<T> NetDirProvider for Arc<T>
729where
730 T: NetDirProvider,
731{
732 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
733 self.deref().netdir(timeliness)
734 }
735
736 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
737 self.deref().timely_netdir()
738 }
739
740 fn events(&self) -> BoxStream<'static, DirEvent> {
741 self.deref().events()
742 }
743
744 fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
745 self.deref().params()
746 }
747
748 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
749 self.deref().protocol_statuses()
750 }
751}
752
753/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
754/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
755///
756/// This trait exists to work around a limitation in rust: when trait upcasting
757/// coercion is stable, this will be unnecessary.
758///
759/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
760pub trait UpcastArcNetDirProvider {
761 /// Return a view of this object as an `Arc<dyn NetDirProvider>`
762 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
763 where
764 Self: 'a;
765}
766
767impl<T> UpcastArcNetDirProvider for T
768where
769 T: NetDirProvider + Sized,
770{
771 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
772 where
773 Self: 'a,
774 {
775 self
776 }
777}
778
779impl AsRef<NetParameters> for NetDir {
780 fn as_ref(&self) -> &NetParameters {
781 self.params()
782 }
783}
784
785/// A partially build NetDir -- it can't be unwrapped until it has
786/// enough information to build safe paths.
787#[derive(Debug, Clone)]
788pub struct PartialNetDir {
789 /// The netdir that's under construction.
790 netdir: NetDir,
791
792 /// The previous netdir, if we had one
793 ///
794 /// Used as a cache, so we can reuse information
795 #[cfg(feature = "hs-common")]
796 prev_netdir: Option<Arc<NetDir>>,
797}
798
799/// A view of a relay on the Tor network, suitable for building circuits.
800// TODO: This should probably be a more specific struct, with a trait
801// that implements it.
802#[derive(Clone)]
803pub struct Relay<'a> {
804 /// A router descriptor for this relay.
805 rs: &'a netstatus::MdRouterStatus,
806 /// A microdescriptor for this relay.
807 md: &'a Microdesc,
808 /// The country code this relay is in, if we know one.
809 #[cfg(feature = "geoip")]
810 cc: Option<CountryCode>,
811}
812
813/// A relay that we haven't checked for validity or usability in
814/// routing.
815#[derive(Debug)]
816pub struct UncheckedRelay<'a> {
817 /// A router descriptor for this relay.
818 rs: &'a netstatus::MdRouterStatus,
819 /// A microdescriptor for this relay, if there is one.
820 md: Option<&'a Microdesc>,
821 /// The country code this relay is in, if we know one.
822 #[cfg(feature = "geoip")]
823 cc: Option<CountryCode>,
824}
825
826/// A partial or full network directory that we can download
827/// microdescriptors for.
828pub trait MdReceiver {
829 /// Return an iterator over the digests for all of the microdescriptors
830 /// that this netdir is missing.
831 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
832 /// Add a microdescriptor to this netdir, if it was wanted.
833 ///
834 /// Return true if it was indeed wanted.
835 fn add_microdesc(&mut self, md: Microdesc) -> bool;
836 /// Return the number of missing microdescriptors.
837 fn n_missing(&self) -> usize;
838}
839
840impl PartialNetDir {
841 /// Create a new PartialNetDir with a given consensus, and no
842 /// microdescriptors loaded.
843 ///
844 /// If `replacement_params` is provided, override network parameters from
845 /// the consensus with those from `replacement_params`.
846 pub fn new(
847 consensus: MdConsensus,
848 replacement_params: Option<&netstatus::NetParams<i32>>,
849 ) -> Self {
850 Self::new_inner(
851 consensus,
852 replacement_params,
853 #[cfg(feature = "geoip")]
854 None,
855 )
856 }
857
858 /// Create a new PartialNetDir with GeoIP support.
859 ///
860 /// This does the same thing as `new()`, except the provided GeoIP database is used to add
861 /// country codes to relays.
862 #[cfg(feature = "geoip")]
863 pub fn new_with_geoip(
864 consensus: MdConsensus,
865 replacement_params: Option<&netstatus::NetParams<i32>>,
866 geoip_db: &GeoipDb,
867 ) -> Self {
868 Self::new_inner(consensus, replacement_params, Some(geoip_db))
869 }
870
871 /// Implementation of the `new()` functions.
872 fn new_inner(
873 consensus: MdConsensus,
874 replacement_params: Option<&netstatus::NetParams<i32>>,
875 #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
876 ) -> Self {
877 let mut params = NetParameters::default();
878
879 // (We ignore unrecognized options here, since they come from
880 // the consensus, and we don't expect to recognize everything
881 // there.)
882 let _ = params.saturating_update(consensus.params().iter());
883
884 // Now see if the user has any parameters to override.
885 // (We have to do this now, or else changes won't be reflected in our
886 // weights.)
887 if let Some(replacement) = replacement_params {
888 for u in params.saturating_update(replacement.iter()) {
889 warn!("Unrecognized option: override_net_params.{}", u);
890 }
891 }
892
893 // Compute the weights we'll want to use for these relays.
894 let weights = weight::WeightSet::from_consensus(&consensus, ¶ms);
895
896 let n_relays = consensus.c_relays().len();
897
898 let rsidx_by_missing = consensus
899 .c_relays()
900 .iter_enumerated()
901 .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
902 .collect();
903
904 let rsidx_by_rsa = consensus
905 .c_relays()
906 .iter_enumerated()
907 .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
908 .collect();
909
910 #[cfg(feature = "geoip")]
911 let country_codes = if let Some(db) = geoip_db {
912 consensus
913 .c_relays()
914 .iter()
915 .map(|rs| {
916 db.lookup_country_code_multi(rs.addrs().map(|x| x.ip()))
917 .cloned()
918 })
919 .collect()
920 } else {
921 Default::default()
922 };
923
924 #[cfg(feature = "hs-common")]
925 let hsdir_rings = Arc::new({
926 let params = HsDirParams::compute(&consensus, ¶ms).expect("Invalid consensus!");
927 // TODO: It's a bit ugly to use expect above, but this function does
928 // not return a Result. On the other hand, the error conditions under which
929 // HsDirParams::compute can return Err are _very_ narrow and hard to
930 // hit; see documentation in that function. As such, we probably
931 // don't need to have this return a Result.
932
933 params.map(HsDirRing::empty_from_params)
934 });
935
936 let netdir = NetDir {
937 consensus: Arc::new(consensus),
938 params,
939 mds: vec![None; n_relays].into(),
940 rsidx_by_missing,
941 rsidx_by_rsa: Arc::new(rsidx_by_rsa),
942 rsidx_by_ed: HashMap::with_capacity(n_relays),
943 #[cfg(feature = "hs-common")]
944 hsdir_rings,
945 weights,
946 #[cfg(feature = "geoip")]
947 country_codes,
948 };
949
950 PartialNetDir {
951 netdir,
952 #[cfg(feature = "hs-common")]
953 prev_netdir: None,
954 }
955 }
956
957 /// Return the declared lifetime of this PartialNetDir.
958 pub fn lifetime(&self) -> &netstatus::Lifetime {
959 self.netdir.lifetime()
960 }
961
962 /// Record a previous netdir, which can be used for reusing cached information
963 //
964 // Fills in as many missing microdescriptors as possible in this
965 // netdir, using the microdescriptors from the previous netdir.
966 //
967 // With HS enabled, stores the netdir for reuse of relay hash ring index values.
968 #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
969 pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
970 for md in prev.mds.iter().flatten() {
971 self.netdir.add_arc_microdesc(md.clone());
972 }
973
974 #[cfg(feature = "hs-common")]
975 {
976 self.prev_netdir = Some(prev);
977 }
978 }
979
980 /// Compute the hash ring(s) for this NetDir
981 #[cfg(feature = "hs-common")]
982 fn compute_rings(&mut self) {
983 let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
984 .expect("Invalid consensus");
985 // TODO: see TODO by similar expect in new()
986
987 self.netdir.hsdir_rings =
988 Arc::new(params.map(|params| {
989 HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
990 }));
991 }
992
993 /// Return true if this are enough information in this directory
994 /// to build multihop paths.
995 pub fn have_enough_paths(&self) -> bool {
996 self.netdir.have_enough_paths()
997 }
998 /// If this directory has enough information to build multihop
999 /// circuits, return it.
1000 pub fn unwrap_if_sufficient(
1001 #[allow(unused_mut)] mut self,
1002 ) -> std::result::Result<NetDir, PartialNetDir> {
1003 if self.netdir.have_enough_paths() {
1004 #[cfg(feature = "hs-common")]
1005 self.compute_rings();
1006 Ok(self.netdir)
1007 } else {
1008 Err(self)
1009 }
1010 }
1011}
1012
1013impl MdReceiver for PartialNetDir {
1014 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1015 self.netdir.missing_microdescs()
1016 }
1017 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1018 self.netdir.add_microdesc(md)
1019 }
1020 fn n_missing(&self) -> usize {
1021 self.netdir.n_missing()
1022 }
1023}
1024
1025impl NetDir {
1026 /// Return the declared lifetime of this NetDir.
1027 pub fn lifetime(&self) -> &netstatus::Lifetime {
1028 self.consensus.lifetime()
1029 }
1030
1031 /// Add `md` to this NetDir.
1032 ///
1033 /// Return true if we wanted it, and false otherwise.
1034 fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1035 if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1036 assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1037
1038 // There should never be two approved MDs in the same
1039 // consensus listing the same ID... but if there is,
1040 // we'll let the most recent one win.
1041 self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1042
1043 // Happy path: we did indeed want this one.
1044 self.mds[rsidx] = Some(md);
1045
1046 // Save some space in the missing-descriptor list.
1047 if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1048 self.rsidx_by_missing.shrink_to_fit();
1049 }
1050
1051 return true;
1052 }
1053
1054 // Either we already had it, or we never wanted it at all.
1055 false
1056 }
1057
1058 /// Construct a (possibly invalid) Relay object from a routerstatus and its
1059 /// index within the consensus.
1060 fn relay_from_rs_and_rsidx<'a>(
1061 &'a self,
1062 rs: &'a netstatus::MdRouterStatus,
1063 rsidx: RouterStatusIdx,
1064 ) -> UncheckedRelay<'a> {
1065 debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1066 let md = self.mds[rsidx].as_deref();
1067 if let Some(md) = md {
1068 debug_assert_eq!(rs.md_digest(), md.digest());
1069 }
1070
1071 UncheckedRelay {
1072 rs,
1073 md,
1074 #[cfg(feature = "geoip")]
1075 cc: self.country_codes.get(rsidx.0).copied().flatten(),
1076 }
1077 }
1078
1079 /// Return the value of the hsdir_n_replicas param.
1080 #[cfg(feature = "hs-common")]
1081 fn n_replicas(&self) -> u8 {
1082 self.params
1083 .hsdir_n_replicas
1084 .get()
1085 .try_into()
1086 .expect("BoundedInt did not enforce bounds")
1087 }
1088
1089 /// Return the spread parameter for the specified `op`.
1090 #[cfg(feature = "hs-common")]
1091 fn spread(&self, op: HsDirOp) -> usize {
1092 let spread = match op {
1093 HsDirOp::Download => self.params.hsdir_spread_fetch,
1094 #[cfg(feature = "hs-service")]
1095 HsDirOp::Upload => self.params.hsdir_spread_store,
1096 };
1097
1098 spread
1099 .get()
1100 .try_into()
1101 .expect("BoundedInt did not enforce bounds!")
1102 }
1103
1104 /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1105 ///
1106 /// Algorithm:
1107 ///
1108 /// for idx in 1..=n_replicas:
1109 /// - let H = hsdir_ring::onion_service_index(id, replica, rand,
1110 /// period).
1111 /// - Find the position of H within hsdir_ring.
1112 /// - Take elements from hsdir_ring starting at that position,
1113 /// adding them to Dirs until we have added `spread` new elements
1114 /// that were not there before.
1115 #[cfg(feature = "hs-common")]
1116 fn select_hsdirs<'h, 'r: 'h>(
1117 &'r self,
1118 hsid: HsBlindId,
1119 ring: &'h HsDirRing,
1120 spread: usize,
1121 ) -> impl Iterator<Item = Relay<'r>> + 'h {
1122 let n_replicas = self.n_replicas();
1123
1124 (1..=n_replicas) // 1-indexed !
1125 .flat_map({
1126 let mut selected_nodes = HashSet::new();
1127
1128 move |replica: u8| {
1129 let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1130
1131 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1132 // According to rend-spec 2.2.3:
1133 // ... If any of those
1134 // nodes have already been selected for a lower-numbered replica of the
1135 // service, any nodes already chosen are disregarded (i.e. skipped over)
1136 // when choosing a replica's hsdir_spread_store nodes.
1137 selected_nodes.insert(*hsdir_idx)
1138 })
1139 .collect::<Vec<_>>()
1140 }
1141 })
1142 .filter_map(move |(_hsdir_idx, rs_idx)| {
1143 // This ought not to be None but let's not panic or bail if it is
1144 self.relay_by_rs_idx(*rs_idx)
1145 })
1146 }
1147
1148 /// Replace the overridden parameters in this netdir with `new_replacement`.
1149 ///
1150 /// After this function is done, the netdir's parameters will be those in
1151 /// the consensus, overridden by settings from `new_replacement`. Any
1152 /// settings in the old replacement parameters will be discarded.
1153 pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1154 // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1155 let mut new_params = NetParameters::default();
1156 let _ = new_params.saturating_update(self.consensus.params().iter());
1157 for u in new_params.saturating_update(new_replacement.iter()) {
1158 warn!("Unrecognized option: override_net_params.{}", u);
1159 }
1160
1161 self.params = new_params;
1162 }
1163
1164 /// Return an iterator over all Relay objects, including invalid ones
1165 /// that we can't use.
1166 pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1167 // TODO: I'd like if we could memoize this so we don't have to
1168 // do so many hashtable lookups.
1169 self.c_relays()
1170 .iter_enumerated()
1171 .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1172 }
1173 /// Return an iterator over all [usable](NetDir#usable) Relays.
1174 pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1175 self.all_relays().filter_map(UncheckedRelay::into_relay)
1176 }
1177
1178 /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1179 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1180 pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1181 self.mds.get(rsidx)?.as_deref()
1182 }
1183
1184 /// Return a relay matching a given identity, if we have a
1185 /// _usable_ relay with that key.
1186 ///
1187 /// (Does not return [unusable](NetDir#usable) relays.)
1188 ///
1189 ///
1190 /// Note that a `None` answer is not always permanent: if a microdescriptor
1191 /// is subsequently added for a relay with this ID, the ID may become usable
1192 /// even if it was not usable before.
1193 pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1194 where
1195 T: Into<RelayIdRef<'a>>,
1196 {
1197 let id = id.into();
1198 let answer = match id {
1199 RelayIdRef::Ed25519(ed25519) => {
1200 let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1201 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1202
1203 self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1204 }
1205 RelayIdRef::Rsa(rsa) => self
1206 .by_rsa_id_unchecked(rsa)
1207 .and_then(UncheckedRelay::into_relay)?,
1208 other_type => self.relays().find(|r| r.has_identity(other_type))?,
1209 };
1210 assert!(answer.has_identity(id));
1211 Some(answer)
1212 }
1213
1214 /// Obtain a `Relay` given a `RouterStatusIdx`
1215 ///
1216 /// Differs from `relay_from_rs_and_rsi` as follows:
1217 /// * That function expects the caller to already have an `MdRouterStatus`;
1218 /// it checks with `debug_assert` that the relay in the netdir matches.
1219 /// * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1220 /// * That function returns an `UncheckedRelay`; this one a `Relay`.
1221 ///
1222 /// `None` could be returned here, even with a valid `rsi`,
1223 /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1224 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1225 pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1226 let rs = self.c_relays().get(rs_idx)?;
1227 let md = self.mds.get(rs_idx)?.as_deref();
1228 UncheckedRelay {
1229 rs,
1230 md,
1231 #[cfg(feature = "geoip")]
1232 cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1233 }
1234 .into_relay()
1235 }
1236
1237 /// Return a relay with the same identities as those in `target`, if one
1238 /// exists.
1239 ///
1240 /// Does not return [unusable](NetDir#usable) relays.
1241 ///
1242 /// Note that a negative result from this method is not necessarily permanent:
1243 /// it may be the case that a relay exists,
1244 /// but we don't yet have enough information about it to know all of its IDs.
1245 /// To test whether a relay is *definitely* absent,
1246 /// use [`by_ids_detailed`](Self::by_ids_detailed)
1247 /// or [`ids_listed`](Self::ids_listed).
1248 ///
1249 /// # Limitations
1250 ///
1251 /// This will be very slow if `target` does not have an Ed25519 or RSA
1252 /// identity.
1253 pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1254 where
1255 T: HasRelayIds + ?Sized,
1256 {
1257 let mut identities = target.identities();
1258 // Don't try if there are no identities.
1259 let first_id = identities.next()?;
1260
1261 // Since there is at most one relay with each given ID type,
1262 // we only need to check the first relay we find.
1263 let candidate = self.by_id(first_id)?;
1264 if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1265 Some(candidate)
1266 } else {
1267 None
1268 }
1269 }
1270
1271 /// Check whether there is a relay that has at least one identity from
1272 /// `target`, and which _could_ have every identity from `target`.
1273 /// If so, return such a relay.
1274 ///
1275 /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1276 ///
1277 /// Return `RelayLookupError::Impossible` if we found a relay with at least
1278 /// one identity from `target`, but that relay's other identities contradict
1279 /// what we learned from `target`.
1280 ///
1281 /// Does not return [unusable](NetDir#usable) relays.
1282 ///
1283 /// (This function is only useful if you need to distinguish the
1284 /// "impossible" case from the "no such relay known" case.)
1285 ///
1286 /// # Limitations
1287 ///
1288 /// This will be very slow if `target` does not have an Ed25519 or RSA
1289 /// identity.
1290 //
1291 // TODO HS: This function could use a better name.
1292 //
1293 // TODO: We could remove the feature restriction here once we think this API is
1294 // stable.
1295 #[cfg(feature = "hs-common")]
1296 pub fn by_ids_detailed<T>(
1297 &self,
1298 target: &T,
1299 ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1300 where
1301 T: HasRelayIds + ?Sized,
1302 {
1303 let candidate = target
1304 .identities()
1305 // Find all the relays that share any identity with this set of identities.
1306 .filter_map(|id| self.by_id(id))
1307 // We might find the same relay more than once under a different
1308 // identity, so we remove the duplicates.
1309 //
1310 // Since there is at most one relay per rsa identity per consensus,
1311 // this is a true uniqueness check under current construction rules.
1312 .unique_by(|r| r.rs.rsa_identity())
1313 // If we find two or more distinct relays, then have a contradiction.
1314 .at_most_one()
1315 .map_err(|_| RelayLookupError::Impossible)?;
1316
1317 // If we have no candidate, return None early.
1318 let candidate = match candidate {
1319 Some(relay) => relay,
1320 None => return Ok(None),
1321 };
1322
1323 // Now we know we have a single candidate. Make sure that it does not have any
1324 // identity that does not match the target.
1325 if target
1326 .identities()
1327 .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1328 None => true,
1329 Some(id) => id == wanted_id,
1330 })
1331 {
1332 Ok(Some(candidate))
1333 } else {
1334 Err(RelayLookupError::Impossible)
1335 }
1336 }
1337
1338 /// Return a boolean if this consensus definitely has (or does not have) a
1339 /// relay matching the listed identities.
1340 ///
1341 /// `Some(true)` indicates that the relay exists.
1342 /// `Some(false)` indicates that the relay definitely does not exist.
1343 /// `None` indicates that we can't yet tell whether such a relay exists,
1344 /// due to missing information.
1345 fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1346 let r = self.by_rsa_id_unchecked(rsa_id);
1347 match r {
1348 Some(unchecked) => {
1349 if !unchecked.rs.ed25519_id_is_usable() {
1350 return Some(false);
1351 }
1352 // If md is present, then it's listed iff we have the right
1353 // ed id. Otherwise we don't know if it's listed.
1354 unchecked.md.map(|md| md.ed25519_id() == ed_id)
1355 }
1356 None => {
1357 // Definitely not listed.
1358 Some(false)
1359 }
1360 }
1361 }
1362
1363 /// Check whether a relay exists (or may exist)
1364 /// with the same identities as those in `target`.
1365 ///
1366 /// `Some(true)` indicates that the relay exists.
1367 /// `Some(false)` indicates that the relay definitely does not exist.
1368 /// `None` indicates that we can't yet tell whether such a relay exists,
1369 /// due to missing information.
1370 pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1371 where
1372 T: HasRelayIds + ?Sized,
1373 {
1374 let rsa_id = target.rsa_identity();
1375 let ed25519_id = target.ed_identity();
1376
1377 // TODO: If we later support more identity key types, this will
1378 // become incorrect. This assertion might help us recognize that case.
1379 const _: () = assert!(RelayIdType::COUNT == 2);
1380
1381 match (rsa_id, ed25519_id) {
1382 (Some(r), Some(e)) => self.id_pair_listed(e, r),
1383 (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1384 (None, Some(e)) => {
1385 if self.rsidx_by_ed.contains_key(e) {
1386 Some(true)
1387 } else {
1388 None
1389 }
1390 }
1391 (None, None) => None,
1392 }
1393 }
1394
1395 /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1396 ///
1397 /// This API can be used to find information about a relay that is listed in
1398 /// the current consensus, even if we don't yet have enough information
1399 /// (like a microdescriptor) about the relay to use it.
1400 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1401 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1402 fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1403 let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1404 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1405 assert_eq!(rs.rsa_identity(), rsa_id);
1406 Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1407 }
1408 /// Return the relay with a given RSA identity, if we have one
1409 /// and it is [usable](NetDir#usable).
1410 fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1411 self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1412 }
1413 /// Return true if `rsa_id` is listed in this directory, even if it isn't
1414 /// currently usable.
1415 ///
1416 /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1417 /// directory information.)
1418 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1419 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1420 fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1421 self.by_rsa_id_unchecked(rsa_id).is_some()
1422 }
1423
1424 /// List the hsdirs in this NetDir, that should be in the HSDir rings
1425 ///
1426 /// The results are not returned in any particular order.
1427 #[cfg(feature = "hs-common")]
1428 fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1429 self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1430 let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1431 relay.is_hsdir_for_ring().then_some(())?;
1432 let relay = relay.into_relay()?;
1433 Some((rsidx, relay))
1434 })
1435 }
1436
1437 /// Return the parameters from the consensus, clamped to the
1438 /// correct ranges, with defaults filled in.
1439 ///
1440 /// NOTE: that unsupported parameters aren't returned here; only those
1441 /// values configured in the `params` module are available.
1442 pub fn params(&self) -> &NetParameters {
1443 &self.params
1444 }
1445
1446 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1447 /// network's current requirements and recommendations for the list of
1448 /// protocols that every relay must implement.
1449 //
1450 // TODO HS: I am not sure this is the right API; other alternatives would be:
1451 // * To expose the _required_ relay protocol list instead (since that's all that
1452 // onion service implementations need).
1453 // * To expose the client protocol list as well (for symmetry).
1454 // * To expose the MdConsensus instead (since that's more general, although
1455 // it restricts the future evolution of this API).
1456 //
1457 // I think that this is a reasonably good compromise for now, but I'm going
1458 // to put it behind the `hs-common` feature to give us time to consider more.
1459 #[cfg(feature = "hs-common")]
1460 pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1461 self.consensus.relay_protocol_status()
1462 }
1463
1464 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1465 /// network's current requirements and recommendations for the list of
1466 /// protocols that every relay must implement.
1467 //
1468 // TODO HS: See notes on relay_protocol_status above.
1469 #[cfg(feature = "hs-common")]
1470 pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1471 self.consensus.client_protocol_status()
1472 }
1473
1474 /// Construct a `CircTarget` from an externally provided list of link specifiers,
1475 /// and an externally provided onion key.
1476 ///
1477 /// This method is used in the onion service protocol,
1478 /// where introduction points and rendezvous points are specified using these inputs.
1479 ///
1480 /// This function is a member of `NetDir` so that it can provide a reasonable list of
1481 /// [`Protocols`](tor_protover::Protocols) capabilities for the generated `CircTarget`.
1482 /// It does not (and should not!) look up anything else from the directory.
1483 #[cfg(feature = "hs-common")]
1484 pub fn circ_target_from_verbatim_linkspecs(
1485 &self,
1486 linkspecs: &[tor_linkspec::EncodedLinkSpec],
1487 ntor_onion_key: &curve25519::PublicKey,
1488 ) -> StdResult<VerbatimLinkSpecCircTarget<OwnedCircTarget>, VerbatimCircTargetDecodeError> {
1489 use VerbatimCircTargetDecodeError as E;
1490 use tor_linkspec::CircTarget as _;
1491 use tor_linkspec::decode::Strictness;
1492
1493 let mut bld = OwnedCircTarget::builder();
1494 use tor_error::into_internal;
1495
1496 *bld.chan_target() =
1497 OwnedChanTargetBuilder::from_encoded_linkspecs(Strictness::Standard, linkspecs)?;
1498 let protocols = {
1499 let chan_target = bld.chan_target().build().map_err(into_internal!(
1500 "from_encoded_linkspecs gave an invalid output"
1501 ))?;
1502 match self
1503 .by_ids_detailed(&chan_target)
1504 .map_err(E::ImpossibleIds)?
1505 {
1506 Some(relay) => relay.protovers().clone(),
1507 None => self.relay_protocol_status().required_protocols().clone(),
1508 }
1509 };
1510 bld.protocols(protocols);
1511 bld.ntor_onion_key(*ntor_onion_key);
1512 Ok(VerbatimLinkSpecCircTarget::new(
1513 bld.build()
1514 .map_err(into_internal!("Failed to construct a valid circtarget"))?,
1515 linkspecs.to_vec(),
1516 ))
1517 }
1518
1519 /// Return weighted the fraction of relays we can use. We only
1520 /// consider relays that match the predicate `usable`. We weight
1521 /// this bandwidth according to the provided `role`.
1522 ///
1523 /// If _no_ matching relays in the consensus have a nonzero
1524 /// weighted bandwidth value, we fall back to looking at the
1525 /// unweighted fraction of matching relays.
1526 ///
1527 /// If there are no matching relays in the consensus, we return 0.0.
1528 fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1529 where
1530 F: Fn(&UncheckedRelay<'a>) -> bool,
1531 {
1532 let mut total_weight = 0_u64;
1533 let mut have_weight = 0_u64;
1534 let mut have_count = 0_usize;
1535 let mut total_count = 0_usize;
1536
1537 for r in self.all_relays() {
1538 if !usable(&r) {
1539 continue;
1540 }
1541 let w = self.weights.weight_rs_for_role(r.rs, role);
1542 total_weight += w;
1543 total_count += 1;
1544 if r.is_usable() {
1545 have_weight += w;
1546 have_count += 1;
1547 }
1548 }
1549
1550 if total_weight > 0 {
1551 // The consensus lists some weighted bandwidth so return the
1552 // fraction of the weighted bandwidth for which we have
1553 // descriptors.
1554 (have_weight as f64) / (total_weight as f64)
1555 } else if total_count > 0 {
1556 // The consensus lists no weighted bandwidth for these relays,
1557 // but at least it does list relays. Return the fraction of
1558 // relays for which it we have descriptors.
1559 (have_count as f64) / (total_count as f64)
1560 } else {
1561 // There are no relays of this kind in the consensus. Return
1562 // 0.0, to avoid dividing by zero and giving NaN.
1563 0.0
1564 }
1565 }
1566 /// Return the estimated fraction of possible paths that we have
1567 /// enough microdescriptors to build.
1568 fn frac_usable_paths(&self) -> f64 {
1569 // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1570 // is_flagged_stable() checks here. This will require spec clarification.
1571 let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1572 u.low_level_details().is_suitable_as_guard()
1573 });
1574 let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1575 let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1576 self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1577 } else {
1578 // If there are no exits at all, we use f_m here.
1579 f_m
1580 };
1581 f_g * f_m * f_e
1582 }
1583 /// Return true if there is enough information in this NetDir to build
1584 /// multihop circuits.
1585 fn have_enough_paths(&self) -> bool {
1586 // TODO-A001: This should check for our guards as well, and
1587 // make sure that if they're listed in the consensus, we have
1588 // the descriptors for them.
1589
1590 // If we can build a randomly chosen path with at least this
1591 // probability, we know enough information to participate
1592 // on the network.
1593
1594 let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1595
1596 // What fraction of paths can we build?
1597 let available = self.frac_usable_paths();
1598
1599 available >= min_frac_paths
1600 }
1601 /// Choose a relay at random.
1602 ///
1603 /// Each relay is chosen with probability proportional to its weight
1604 /// in the role `role`, and is only selected if the predicate `usable`
1605 /// returns true for it.
1606 ///
1607 /// This function returns None if (and only if) there are no relays
1608 /// with nonzero weight where `usable` returned true.
1609 //
1610 // TODO this API, with the `usable` closure, invites mistakes where we fail to
1611 // check conditions that are implied by the role we have selected for the relay:
1612 // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1613 // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1614 // be renamed.) -Diziet
1615 pub fn pick_relay<'a, R, P>(
1616 &'a self,
1617 rng: &mut R,
1618 role: WeightRole,
1619 usable: P,
1620 ) -> Option<Relay<'a>>
1621 where
1622 R: rand::Rng,
1623 P: FnMut(&Relay<'a>) -> bool,
1624 {
1625 let relays: Vec<_> = self.relays().filter(usable).collect();
1626 // This algorithm uses rand::distr::WeightedIndex, and uses
1627 // gives O(n) time and space to build the index, plus O(log n)
1628 // sampling time.
1629 //
1630 // We might be better off building a WeightedIndex in advance
1631 // for each `role`, and then sampling it repeatedly until we
1632 // get a relay that satisfies `usable`. Or we might not --
1633 // that depends heavily on the actual particulars of our
1634 // inputs. We probably shouldn't make any changes there
1635 // unless profiling tells us that this function is in a hot
1636 // path.
1637 //
1638 // The C Tor sampling implementation goes through some trouble
1639 // here to try to make its path selection constant-time. I
1640 // believe that there is no actual remotely exploitable
1641 // side-channel here however. It could be worth analyzing in
1642 // the future.
1643 //
1644 // This code will give the wrong result if the total of all weights
1645 // can exceed u64::MAX. We make sure that can't happen when we
1646 // set up `self.weights`.
1647 match relays[..].choose_weighted(rng, |r| self.weights.weight_rs_for_role(r.rs, role)) {
1648 Ok(relay) => Some(relay.clone()),
1649 Err(WeightError::InsufficientNonZero) => {
1650 if relays.is_empty() {
1651 None
1652 } else {
1653 warn!(?self.weights, ?role,
1654 "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1655 relays.len());
1656 relays.choose(rng).cloned()
1657 }
1658 }
1659 Err(e) => {
1660 warn_report!(e, "Unexpected error while sampling a relay");
1661 None
1662 }
1663 }
1664 }
1665
1666 /// Choose `n` relay at random.
1667 ///
1668 /// Each relay is chosen with probability proportional to its weight
1669 /// in the role `role`, and is only selected if the predicate `usable`
1670 /// returns true for it.
1671 ///
1672 /// Relays are chosen without replacement: no relay will be
1673 /// returned twice. Therefore, the resulting vector may be smaller
1674 /// than `n` if we happen to have fewer than `n` appropriate relays.
1675 ///
1676 /// This function returns an empty vector if (and only if) there
1677 /// are no relays with nonzero weight where `usable` returned
1678 /// true.
1679 #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1680 pub fn pick_n_relays<'a, R, P>(
1681 &'a self,
1682 rng: &mut R,
1683 n: usize,
1684 role: WeightRole,
1685 usable: P,
1686 ) -> Vec<Relay<'a>>
1687 where
1688 R: rand::Rng,
1689 P: FnMut(&Relay<'a>) -> bool,
1690 {
1691 let relays: Vec<_> = self.relays().filter(usable).collect();
1692 // NOTE: See discussion in pick_relay().
1693 let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1694 self.weights.weight_rs_for_role(r.rs, role) as f64
1695 }) {
1696 Err(WeightError::InsufficientNonZero) => {
1697 // Too few relays had nonzero weights: return all of those that are okay.
1698 // (This is behavior used to come up with rand 0.9; it no longer does.
1699 // We still detect it.)
1700 let remaining: Vec<_> = relays
1701 .iter()
1702 .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1703 .cloned()
1704 .collect();
1705 if remaining.is_empty() {
1706 warn!(?self.weights, ?role,
1707 "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1708 relays.len());
1709 if relays.len() >= n {
1710 relays.choose_multiple(rng, n).cloned().collect()
1711 } else {
1712 relays
1713 }
1714 } else {
1715 warn!(?self.weights, ?role,
1716 "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1717 remaining.len(), relays.len());
1718 remaining
1719 }
1720 }
1721 Err(e) => {
1722 warn_report!(e, "Unexpected error while sampling a set of relays");
1723 Vec::new()
1724 }
1725 Ok(iter) => {
1726 let selection: Vec<_> = iter.map(Relay::clone).collect();
1727 if selection.len() < n && selection.len() < relays.len() {
1728 warn!(?self.weights, ?role,
1729 "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1730 and having {filtered_len} available after filtering. See bug #1907.",
1731 returned=selection.len(), filtered_len=relays.len());
1732 }
1733 selection
1734 }
1735 };
1736 relays.shuffle(rng);
1737 relays
1738 }
1739
1740 /// Compute the weight with which `relay` will be selected for a given
1741 /// `role`.
1742 pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1743 RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1744 }
1745
1746 /// Compute the total weight with which any relay matching `usable`
1747 /// will be selected for a given `role`.
1748 ///
1749 /// Note: because this function is used to assess the total
1750 /// properties of the consensus, the `usable` predicate takes a
1751 /// [`MdRouterStatus`] rather than a [`Relay`].
1752 pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1753 where
1754 P: Fn(&UncheckedRelay<'_>) -> bool,
1755 {
1756 self.all_relays()
1757 .filter_map(|unchecked| {
1758 if usable(&unchecked) {
1759 Some(RelayWeight(
1760 self.weights.weight_rs_for_role(unchecked.rs, role),
1761 ))
1762 } else {
1763 None
1764 }
1765 })
1766 .sum()
1767 }
1768
1769 /// Compute the weight with which a relay with ID `rsa_id` would be
1770 /// selected for a given `role`.
1771 ///
1772 /// Note that weight returned by this function assumes that the
1773 /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1774 /// then other weight-related functions will call its weight zero.
1775 pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1776 self.by_rsa_id_unchecked(rsa_id)
1777 .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1778 }
1779
1780 /// Return all relays in this NetDir known to be in the same family as
1781 /// `relay`.
1782 ///
1783 /// This list of members will **not** necessarily include `relay` itself.
1784 ///
1785 /// # Limitations
1786 ///
1787 /// Two relays only belong to the same family if _each_ relay
1788 /// claims to share a family with the other. But if we are
1789 /// missing a microdescriptor for one of the relays listed by this
1790 /// relay, we cannot know whether it acknowledges family
1791 /// membership with this relay or not. Therefore, this function
1792 /// can omit family members for which there is not (as yet) any
1793 /// Relay object.
1794 pub fn known_family_members<'a>(
1795 &'a self,
1796 relay: &'a Relay<'a>,
1797 ) -> impl Iterator<Item = Relay<'a>> {
1798 let relay_rsa_id = relay.rsa_id();
1799 relay.md.family().members().filter_map(move |other_rsa_id| {
1800 self.by_rsa_id(other_rsa_id)
1801 .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1802 })
1803 }
1804
1805 /// Return the current hidden service directory "time period".
1806 ///
1807 /// Specifically, this returns the time period that contains the beginning
1808 /// of the validity period of this `NetDir`'s consensus. That time period
1809 /// is the one we use when acting as an hidden service client.
1810 #[cfg(feature = "hs-common")]
1811 pub fn hs_time_period(&self) -> TimePeriod {
1812 self.hsdir_rings.current.time_period()
1813 }
1814
1815 /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1816 ///
1817 /// This includes the current time period (as from
1818 /// [`.hs_time_period`](NetDir::hs_time_period))
1819 /// plus additional time periods that we publish descriptors for when we are
1820 /// acting as a hidden service.
1821 #[cfg(feature = "hs-service")]
1822 pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1823 self.hsdir_rings
1824 .iter()
1825 .map(|r| r.params().clone())
1826 .collect()
1827 }
1828
1829 /// Return the relays in this network directory that will be used as hidden service directories
1830 ///
1831 /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1832 #[cfg(feature = "hs-common")]
1833 pub fn hs_dirs_download<'r, R>(
1834 &'r self,
1835 hsid: HsBlindId,
1836 period: TimePeriod,
1837 rng: &mut R,
1838 ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1839 where
1840 R: rand::Rng,
1841 {
1842 // Algorithm:
1843 //
1844 // 1. Determine which HsDirRing to use, based on the time period.
1845 // 2. Find the shared random value that's associated with that HsDirRing.
1846 // 3. Choose spread = the parameter `hsdir_spread_fetch`
1847 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1848 // 5. Initialize Dirs = []
1849 // 6. for idx in 1..=n_replicas:
1850 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1851 // period).
1852 // - Find the position of H within hsdir_ring.
1853 // - Take elements from hsdir_ring starting at that position,
1854 // adding them to Dirs until we have added `spread` new elements
1855 // that were not there before.
1856 // 7. Shuffle Dirs
1857 // 8. return Dirs.
1858
1859 let spread = self.spread(HsDirOp::Download);
1860
1861 // When downloading, only look at relays on current ring.
1862 let ring = &self.hsdir_rings.current;
1863
1864 if ring.params().time_period != period {
1865 return Err(internal!(
1866 "our current ring is not associated with the requested time period!"
1867 ));
1868 }
1869
1870 let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1871
1872 // When downloading, the order of the returned relays is random.
1873 hs_dirs.shuffle(rng);
1874
1875 Ok(hs_dirs)
1876 }
1877
1878 /// Return the relays in this network directory that will be used as hidden service directories
1879 ///
1880 /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1881 /// given time period.
1882 #[cfg(feature = "hs-service")]
1883 pub fn hs_dirs_upload(
1884 &self,
1885 hsid: HsBlindId,
1886 period: TimePeriod,
1887 ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1888 // Algorithm:
1889 //
1890 // 1. Choose spread = the parameter `hsdir_spread_store`
1891 // 2. Determine which HsDirRing to use, based on the time period.
1892 // 3. Find the shared random value that's associated with that HsDirRing.
1893 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1894 // 5. Initialize Dirs = []
1895 // 6. for idx in 1..=n_replicas:
1896 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1897 // period).
1898 // - Find the position of H within hsdir_ring.
1899 // - Take elements from hsdir_ring starting at that position,
1900 // adding them to Dirs until we have added `spread` new elements
1901 // that were not there before.
1902 // 3. return Dirs.
1903 let spread = self.spread(HsDirOp::Upload);
1904
1905 // For each HsBlindId, determine which HsDirRing to use.
1906 let rings = self
1907 .hsdir_rings
1908 .iter()
1909 .filter_map(move |ring| {
1910 // Make sure the ring matches the TP of the hsid it's matched with.
1911 (ring.params().time_period == period).then_some((ring, hsid, period))
1912 })
1913 .collect::<Vec<_>>();
1914
1915 // The specified period should have an associated ring.
1916 if !rings.iter().any(|(_, _, tp)| *tp == period) {
1917 return Err(internal!(
1918 "the specified time period does not have an associated ring"
1919 ));
1920 };
1921
1922 // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1923 // selecting replicas from each ring.
1924 Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1925 assert_eq!(period, ring.params().time_period());
1926 self.select_hsdirs(hsid, ring, spread)
1927 }))
1928 }
1929
1930 /// Return the relays in this network directory that will be used as hidden service directories
1931 ///
1932 /// Depending on `op`,
1933 /// these are suitable to either store, or retrieve, a
1934 /// given onion service's descriptor at a given time period.
1935 ///
1936 /// When `op` is `Download`, the order is random.
1937 /// When `op` is `Upload`, the order is not specified.
1938 ///
1939 /// Return an error if the time period is not one returned by
1940 /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1941 //
1942 // TODO: make HsDirOp pub(crate) once this is removed
1943 #[cfg(feature = "hs-common")]
1944 #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1945 pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1946 where
1947 R: rand::Rng,
1948 {
1949 // Algorithm:
1950 //
1951 // 1. Determine which HsDirRing to use, based on the time period.
1952 // 2. Find the shared random value that's associated with that HsDirRing.
1953 // 3. Choose spread = the parameter `hsdir_spread_store` or
1954 // `hsdir_spread_fetch` based on `op`.
1955 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1956 // 5. Initialize Dirs = []
1957 // 6. for idx in 1..=n_replicas:
1958 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1959 // period).
1960 // - Find the position of H within hsdir_ring.
1961 // - Take elements from hsdir_ring starting at that position,
1962 // adding them to Dirs until we have added `spread` new elements
1963 // that were not there before.
1964 // 7. return Dirs.
1965 let n_replicas = self
1966 .params
1967 .hsdir_n_replicas
1968 .get()
1969 .try_into()
1970 .expect("BoundedInt did not enforce bounds");
1971
1972 let spread = match op {
1973 HsDirOp::Download => self.params.hsdir_spread_fetch,
1974 #[cfg(feature = "hs-service")]
1975 HsDirOp::Upload => self.params.hsdir_spread_store,
1976 };
1977
1978 let spread = spread
1979 .get()
1980 .try_into()
1981 .expect("BoundedInt did not enforce bounds!");
1982
1983 // TODO: I may be wrong here but I suspect that this function may
1984 // need refactoring so that it does not look at _all_ of the HsDirRings,
1985 // but only at the ones that corresponds to time periods for which
1986 // HsBlindId is valid. Or I could be mistaken, in which case we should
1987 // have a comment to explain why I am, since the logic is subtle.
1988 // (For clients, there is only one ring.) -nickm
1989 //
1990 // (Actually, there is no need to follow through with the above TODO,
1991 // since this function is deprecated, and not used anywhere but the
1992 // tests.)
1993
1994 let mut hs_dirs = self
1995 .hsdir_rings
1996 .iter_for_op(op)
1997 .cartesian_product(1..=n_replicas) // 1-indexed !
1998 .flat_map({
1999 let mut selected_nodes = HashSet::new();
2000
2001 move |(ring, replica): (&HsDirRing, u8)| {
2002 let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
2003
2004 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
2005 // According to rend-spec 2.2.3:
2006 // ... If any of those
2007 // nodes have already been selected for a lower-numbered replica of the
2008 // service, any nodes already chosen are disregarded (i.e. skipped over)
2009 // when choosing a replica's hsdir_spread_store nodes.
2010 selected_nodes.insert(*hsdir_idx)
2011 })
2012 .collect::<Vec<_>>()
2013 }
2014 })
2015 .filter_map(|(_hsdir_idx, rs_idx)| {
2016 // This ought not to be None but let's not panic or bail if it is
2017 self.relay_by_rs_idx(*rs_idx)
2018 })
2019 .collect_vec();
2020
2021 match op {
2022 HsDirOp::Download => {
2023 // When `op` is `Download`, the order is random.
2024 hs_dirs.shuffle(rng);
2025 }
2026 #[cfg(feature = "hs-service")]
2027 HsDirOp::Upload => {
2028 // When `op` is `Upload`, the order is not specified.
2029 }
2030 }
2031
2032 hs_dirs
2033 }
2034}
2035
2036impl MdReceiver for NetDir {
2037 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
2038 Box::new(self.rsidx_by_missing.keys())
2039 }
2040 fn add_microdesc(&mut self, md: Microdesc) -> bool {
2041 self.add_arc_microdesc(Arc::new(md))
2042 }
2043 fn n_missing(&self) -> usize {
2044 self.rsidx_by_missing.len()
2045 }
2046}
2047
2048impl<'a> UncheckedRelay<'a> {
2049 /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
2050 ///
2051 /// Callers should generally avoid using this information directly if they can;
2052 /// it's better to use a higher-level function that exposes semantic information
2053 /// rather than these properties.
2054 pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2055 details::UncheckedRelayDetails(self)
2056 }
2057
2058 /// Return true if this relay is valid and [usable](NetDir#usable).
2059 ///
2060 /// This function should return `true` for every Relay we expose
2061 /// to the user.
2062 pub fn is_usable(&self) -> bool {
2063 // No need to check for 'valid' or 'running': they are implicit.
2064 self.md.is_some() && self.rs.ed25519_id_is_usable()
2065 }
2066 /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2067 pub fn into_relay(self) -> Option<Relay<'a>> {
2068 if self.is_usable() {
2069 Some(Relay {
2070 rs: self.rs,
2071 md: self.md?,
2072 #[cfg(feature = "geoip")]
2073 cc: self.cc,
2074 })
2075 } else {
2076 None
2077 }
2078 }
2079
2080 /// Return true if this relay is a hidden service directory
2081 ///
2082 /// Ie, if it is to be included in the hsdir ring.
2083 #[cfg(feature = "hs-common")]
2084 pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2085 // TODO are there any other flags should we check?
2086 // rend-spec-v3 2.2.3 says just
2087 // "each node listed in the current consensus with the HSDir flag"
2088 // Do we need to check ed25519_id_is_usable ?
2089 // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2090 self.rs.is_flagged_hsdir()
2091 }
2092}
2093
2094impl<'a> Relay<'a> {
2095 /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2096 ///
2097 /// Callers should generally avoid using this information directly if they can;
2098 /// it's better to use a higher-level function that exposes semantic information
2099 /// rather than these properties.
2100 pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2101 details::RelayDetails(self)
2102 }
2103
2104 /// Return the Ed25519 ID for this relay.
2105 pub fn id(&self) -> &Ed25519Identity {
2106 self.md.ed25519_id()
2107 }
2108 /// Return the RsaIdentity for this relay.
2109 pub fn rsa_id(&self) -> &RsaIdentity {
2110 self.rs.rsa_identity()
2111 }
2112
2113 /// Return a reference to this relay's "router status" entry in
2114 /// the consensus.
2115 ///
2116 /// The router status entry contains information about the relay
2117 /// that the authorities voted on directly. For most use cases,
2118 /// you shouldn't need them.
2119 ///
2120 /// This function is only available if the crate was built with
2121 /// its `experimental-api` feature.
2122 #[cfg(feature = "experimental-api")]
2123 pub fn rs(&self) -> &netstatus::MdRouterStatus {
2124 self.rs
2125 }
2126 /// Return a reference to this relay's "microdescriptor" entry in
2127 /// the consensus.
2128 ///
2129 /// A "microdescriptor" is a synopsis of the information about a relay,
2130 /// used to determine its capabilities and route traffic through it.
2131 /// For most use cases, you shouldn't need it.
2132 ///
2133 /// This function is only available if the crate was built with
2134 /// its `experimental-api` feature.
2135 #[cfg(feature = "experimental-api")]
2136 pub fn md(&self) -> &Microdesc {
2137 self.md
2138 }
2139}
2140
2141/// An error value returned from [`NetDir::by_ids_detailed`].
2142#[cfg(feature = "hs-common")]
2143#[derive(Clone, Debug, thiserror::Error)]
2144#[non_exhaustive]
2145pub enum RelayLookupError {
2146 /// We found a relay whose presence indicates that the provided set of
2147 /// identities is impossible to resolve.
2148 #[error("Provided set of identities is impossible according to consensus.")]
2149 Impossible,
2150}
2151
2152impl<'a> HasAddrs for Relay<'a> {
2153 fn addrs(&self) -> impl Iterator<Item = std::net::SocketAddr> {
2154 self.rs.addrs()
2155 }
2156}
2157#[cfg(feature = "geoip")]
2158impl<'a> HasCountryCode for Relay<'a> {
2159 fn country_code(&self) -> Option<CountryCode> {
2160 self.cc
2161 }
2162}
2163impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2164 fn ed_identity(&self) -> &Ed25519Identity {
2165 self.id()
2166 }
2167 fn rsa_identity(&self) -> &RsaIdentity {
2168 self.rsa_id()
2169 }
2170}
2171
2172impl<'a> HasRelayIds for UncheckedRelay<'a> {
2173 fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2174 match key_type {
2175 RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2176 self.md.map(|m| m.ed25519_id().into())
2177 }
2178 RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2179 _ => None,
2180 }
2181 }
2182}
2183#[cfg(feature = "geoip")]
2184impl<'a> HasCountryCode for UncheckedRelay<'a> {
2185 fn country_code(&self) -> Option<CountryCode> {
2186 self.cc
2187 }
2188}
2189
2190impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2191impl<'a> ChanTarget for Relay<'a> {}
2192
2193impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2194 fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2195 self.md.ntor_key()
2196 }
2197 fn protovers(&self) -> &tor_protover::Protocols {
2198 self.rs.protovers()
2199 }
2200}
2201
2202#[cfg(test)]
2203mod test {
2204 // @@ begin test lint list maintained by maint/add_warning @@
2205 #![allow(clippy::bool_assert_comparison)]
2206 #![allow(clippy::clone_on_copy)]
2207 #![allow(clippy::dbg_macro)]
2208 #![allow(clippy::mixed_attributes_style)]
2209 #![allow(clippy::print_stderr)]
2210 #![allow(clippy::print_stdout)]
2211 #![allow(clippy::single_char_pattern)]
2212 #![allow(clippy::unwrap_used)]
2213 #![allow(clippy::unchecked_time_subtraction)]
2214 #![allow(clippy::useless_vec)]
2215 #![allow(clippy::needless_pass_by_value)]
2216 //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2217 #![allow(clippy::cognitive_complexity)]
2218 use super::*;
2219 use crate::testnet::*;
2220 use float_eq::assert_float_eq;
2221 use std::collections::HashSet;
2222 use std::time::Duration;
2223 use tor_basic_utils::test_rng::{self, testing_rng};
2224 use tor_linkspec::{RelayIdType, RelayIds};
2225
2226 #[cfg(feature = "hs-common")]
2227 fn dummy_hs_blind_id() -> HsBlindId {
2228 let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2229 let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2230 HsBlindId::from(hsid)
2231 }
2232
2233 // Basic functionality for a partial netdir: Add microdescriptors,
2234 // then you have a netdir.
2235 #[test]
2236 fn partial_netdir() {
2237 let (consensus, microdescs) = construct_network().unwrap();
2238 let dir = PartialNetDir::new(consensus, None);
2239
2240 // Check the lifetime
2241 let lifetime = dir.lifetime();
2242 assert_eq!(
2243 lifetime
2244 .valid_until()
2245 .duration_since(lifetime.valid_after())
2246 .unwrap(),
2247 Duration::new(86400, 0)
2248 );
2249
2250 // No microdescriptors, so we don't have enough paths, and can't
2251 // advance.
2252 assert!(!dir.have_enough_paths());
2253 let mut dir = match dir.unwrap_if_sufficient() {
2254 Ok(_) => panic!(),
2255 Err(d) => d,
2256 };
2257
2258 let missing: HashSet<_> = dir.missing_microdescs().collect();
2259 assert_eq!(missing.len(), 40);
2260 assert_eq!(missing.len(), dir.netdir.c_relays().len());
2261 for md in µdescs {
2262 assert!(missing.contains(md.digest()));
2263 }
2264
2265 // Now add all the mds and try again.
2266 for md in microdescs {
2267 let wanted = dir.add_microdesc(md);
2268 assert!(wanted);
2269 }
2270
2271 let missing: HashSet<_> = dir.missing_microdescs().collect();
2272 assert!(missing.is_empty());
2273 assert!(dir.have_enough_paths());
2274 let _complete = match dir.unwrap_if_sufficient() {
2275 Ok(d) => d,
2276 Err(_) => panic!(),
2277 };
2278 }
2279
2280 #[test]
2281 fn override_params() {
2282 let (consensus, _microdescs) = construct_network().unwrap();
2283 let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2284 .parse()
2285 .unwrap();
2286 let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2287 let params = &dir.netdir.params;
2288 assert_eq!(params.bw_weight_scale.get(), 2);
2289 assert_eq!(params.circuit_window.get(), 500_i32);
2290
2291 // try again without the override.
2292 let dir = PartialNetDir::new(consensus, None);
2293 let params = &dir.netdir.params;
2294 assert_eq!(params.bw_weight_scale.get(), 1_i32);
2295 assert_eq!(params.circuit_window.get(), 1000_i32);
2296 }
2297
2298 #[test]
2299 fn fill_from_previous() {
2300 let (consensus, microdescs) = construct_network().unwrap();
2301
2302 let mut dir = PartialNetDir::new(consensus.clone(), None);
2303 for md in microdescs.iter().skip(2) {
2304 let wanted = dir.add_microdesc(md.clone());
2305 assert!(wanted);
2306 }
2307 let dir1 = dir.unwrap_if_sufficient().unwrap();
2308 assert_eq!(dir1.missing_microdescs().count(), 2);
2309
2310 let mut dir = PartialNetDir::new(consensus, None);
2311 assert_eq!(dir.missing_microdescs().count(), 40);
2312 dir.fill_from_previous_netdir(Arc::new(dir1));
2313 assert_eq!(dir.missing_microdescs().count(), 2);
2314 }
2315
2316 #[test]
2317 fn path_count() {
2318 let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2319 let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2320
2321 let (consensus, microdescs) = construct_network().unwrap();
2322
2323 let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2324 for (pos, md) in microdescs.iter().enumerate() {
2325 if pos % 7 == 2 {
2326 continue; // skip a few relays.
2327 }
2328 dir.add_microdesc(md.clone());
2329 }
2330 let dir = dir.unwrap_if_sufficient().unwrap();
2331
2332 // We have 40 relays that we know about from the consensus.
2333 assert_eq!(dir.all_relays().count(), 40);
2334
2335 // But only 34 are usable.
2336 assert_eq!(dir.relays().count(), 34);
2337
2338 // For guards: mds 20..=39 correspond to Guard relays.
2339 // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2340 // We skipped 23, 30, and 37. They have bandwidth
2341 // 4000 + 1000 + 8000 = 13_000. So our fractional bandwidth
2342 // should be (110-13)/110.
2343 let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2344 assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2345
2346 // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2347 // We skipped 16, 30, and 37. Per above our fractional bandwidth is
2348 // (110-16)/110.
2349 let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2350 assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2351
2352 // For middles: all relays are middles. We skipped 2, 9, 16,
2353 // 23, 30, and 37. Per above our fractional bandwidth is
2354 // (220-33)/220
2355 let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2356 assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2357
2358 // Multiplying those together, we get the fraction of paths we can
2359 // build at ~0.64052066, which is above the threshold we set above for
2360 // MinPathsForCircsPct.
2361 let f = dir.frac_usable_paths();
2362 assert!((f - 0.64052066).abs() < 0.000001);
2363
2364 // But if we try again with a slightly higher threshold...
2365 let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2366 for (pos, md) in microdescs.into_iter().enumerate() {
2367 if pos % 7 == 2 {
2368 continue; // skip a few relays.
2369 }
2370 dir.add_microdesc(md);
2371 }
2372 assert!(dir.unwrap_if_sufficient().is_err());
2373 }
2374
2375 /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2376 /// iterations, and a tolerance.
2377 ///
2378 /// If the Rng is deterministic (the default), we can use a faster setup,
2379 /// with a higher tolerance and fewer iterations. But if you've explicitly
2380 /// opted into randomization (or are replaying a seed from an earlier
2381 /// randomized test), we give you more iterations and a tighter tolerance.
2382 fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2383 // Use a deterministic RNG if none is specified, since this is slow otherwise.
2384 let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2385 let (iters, tolerance) = match config {
2386 test_rng::Config::Deterministic => (5000, 0.02),
2387 _ => (50000, 0.01),
2388 };
2389 (config.into_rng(), iters, tolerance)
2390 }
2391
2392 #[test]
2393 fn test_pick() {
2394 let (consensus, microdescs) = construct_network().unwrap();
2395 let mut dir = PartialNetDir::new(consensus, None);
2396 for md in microdescs.into_iter() {
2397 let wanted = dir.add_microdesc(md.clone());
2398 assert!(wanted);
2399 }
2400 let dir = dir.unwrap_if_sufficient().unwrap();
2401
2402 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2403
2404 let mut picked = [0_isize; 40];
2405 for _ in 0..total {
2406 let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2407 r.low_level_details().supports_exit_port_ipv4(80)
2408 });
2409 let r = r.unwrap();
2410 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2411 picked[id_byte as usize] += 1;
2412 }
2413 // non-exits should never get picked.
2414 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2415 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2416
2417 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2418
2419 // We didn't we any non-default weights, so the other relays get
2420 // weighted proportional to their bandwidth.
2421 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2422 assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2423 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2424 }
2425
2426 #[test]
2427 fn test_pick_multiple() {
2428 // This is mostly a copy of test_pick, except that it uses
2429 // pick_n_relays to pick several relays at once.
2430
2431 let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2432
2433 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2434
2435 let mut picked = [0_isize; 40];
2436 for _ in 0..total / 4 {
2437 let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2438 r.low_level_details().supports_exit_port_ipv4(80)
2439 });
2440 assert_eq!(relays.len(), 4);
2441 for r in relays {
2442 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2443 picked[id_byte as usize] += 1;
2444 }
2445 }
2446 // non-exits should never get picked.
2447 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2448 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2449
2450 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2451
2452 // We didn't we any non-default weights, so the other relays get
2453 // weighted proportional to their bandwidth.
2454 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2455 assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2456 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2457 }
2458
2459 #[test]
2460 fn subnets() {
2461 let cfg = SubnetConfig::default();
2462
2463 fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2464 cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2465 }
2466
2467 assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2468 assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2469
2470 assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2471
2472 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2473 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2474
2475 let cfg = SubnetConfig {
2476 subnets_family_v4: 32,
2477 subnets_family_v6: 128,
2478 };
2479 assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2480 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2481
2482 assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2483 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2484 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2485
2486 let cfg = SubnetConfig {
2487 subnets_family_v4: 33,
2488 subnets_family_v6: 129,
2489 };
2490 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2491 assert!(!same_net(&cfg, "::", "::"));
2492 }
2493
2494 #[test]
2495 fn subnet_union() {
2496 let cfg1 = SubnetConfig {
2497 subnets_family_v4: 16,
2498 subnets_family_v6: 64,
2499 };
2500 let cfg2 = SubnetConfig {
2501 subnets_family_v4: 24,
2502 subnets_family_v6: 32,
2503 };
2504 let a1 = "1.2.3.4".parse().unwrap();
2505 let a2 = "1.2.10.10".parse().unwrap();
2506
2507 let a3 = "ffff:ffff::7".parse().unwrap();
2508 let a4 = "ffff:ffff:1234::8".parse().unwrap();
2509
2510 assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2511 assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2512
2513 assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2514 assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2515
2516 let cfg_u = cfg1.union(&cfg2);
2517 assert_eq!(
2518 cfg_u,
2519 SubnetConfig {
2520 subnets_family_v4: 16,
2521 subnets_family_v6: 32,
2522 }
2523 );
2524 assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2525 assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2526
2527 assert_eq!(cfg1.union(&cfg1), cfg1);
2528
2529 assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2530 }
2531
2532 #[test]
2533 fn relay_funcs() {
2534 let (consensus, microdescs) = construct_custom_network(
2535 |pos, nb, _| {
2536 if pos == 15 {
2537 nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2538 } else if pos == 20 {
2539 nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2540 }
2541 },
2542 None,
2543 )
2544 .unwrap();
2545 let subnet_config = SubnetConfig::default();
2546 let all_family_info = FamilyRules::all_family_info();
2547 let mut dir = PartialNetDir::new(consensus, None);
2548 for md in microdescs.into_iter() {
2549 let wanted = dir.add_microdesc(md.clone());
2550 assert!(wanted);
2551 }
2552 let dir = dir.unwrap_if_sufficient().unwrap();
2553
2554 // Pick out a few relays by ID.
2555 let k0 = Ed25519Identity::from([0; 32]);
2556 let k1 = Ed25519Identity::from([1; 32]);
2557 let k2 = Ed25519Identity::from([2; 32]);
2558 let k3 = Ed25519Identity::from([3; 32]);
2559 let k10 = Ed25519Identity::from([10; 32]);
2560 let k15 = Ed25519Identity::from([15; 32]);
2561 let k20 = Ed25519Identity::from([20; 32]);
2562
2563 let r0 = dir.by_id(&k0).unwrap();
2564 let r1 = dir.by_id(&k1).unwrap();
2565 let r2 = dir.by_id(&k2).unwrap();
2566 let r3 = dir.by_id(&k3).unwrap();
2567 let r10 = dir.by_id(&k10).unwrap();
2568 let r15 = dir.by_id(&k15).unwrap();
2569 let r20 = dir.by_id(&k20).unwrap();
2570
2571 assert_eq!(r0.id(), &[0; 32].into());
2572 assert_eq!(r0.rsa_id(), &[0; 20].into());
2573 assert_eq!(r1.id(), &[1; 32].into());
2574 assert_eq!(r1.rsa_id(), &[1; 20].into());
2575
2576 assert!(r0.same_relay_ids(&r0));
2577 assert!(r1.same_relay_ids(&r1));
2578 assert!(!r1.same_relay_ids(&r0));
2579
2580 assert!(r0.low_level_details().is_dir_cache());
2581 assert!(!r1.low_level_details().is_dir_cache());
2582 assert!(r2.low_level_details().is_dir_cache());
2583 assert!(!r3.low_level_details().is_dir_cache());
2584
2585 assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2586 assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2587 assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2588 assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2589
2590 assert!(!r0.low_level_details().policies_allow_some_port());
2591 assert!(!r1.low_level_details().policies_allow_some_port());
2592 assert!(!r2.low_level_details().policies_allow_some_port());
2593 assert!(!r3.low_level_details().policies_allow_some_port());
2594 assert!(r10.low_level_details().policies_allow_some_port());
2595
2596 assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2597 assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2598 assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2599 assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2600 assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2601 assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2602 assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2603 assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2604
2605 assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2606 assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2607 assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2608 assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2609 assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2610 assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2611
2612 // Make sure IPv6 families work.
2613 let subnet_config = SubnetConfig {
2614 subnets_family_v4: 128,
2615 subnets_family_v6: 96,
2616 };
2617 assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2618 assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2619
2620 // Make sure that subnet configs can be disabled.
2621 let subnet_config = SubnetConfig {
2622 subnets_family_v4: 255,
2623 subnets_family_v6: 255,
2624 };
2625 assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2626 }
2627
2628 #[test]
2629 fn test_badexit() {
2630 // make a netdir where relays 10-19 are badexit, and everybody
2631 // exits to 443 on IPv6.
2632 use tor_netdoc::types::relay_flags::RelayFlag;
2633 let netdir = construct_custom_netdir(|pos, nb, _| {
2634 if (10..20).contains(&pos) {
2635 nb.rs.add_flags(RelayFlag::BadExit);
2636 }
2637 nb.md.parse_ipv6_policy("accept 443").unwrap();
2638 })
2639 .unwrap()
2640 .unwrap_if_sufficient()
2641 .unwrap();
2642
2643 let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2644 let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2645
2646 assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2647 assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2648
2649 assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2650 assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2651 assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2652
2653 assert!(!e12.low_level_details().policies_allow_some_port());
2654 assert!(e32.low_level_details().policies_allow_some_port());
2655
2656 assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2657 assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2658 assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2659 assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2660
2661 assert!(
2662 e12.low_level_details()
2663 .ipv4_declared_policy()
2664 .allows_some_port()
2665 );
2666 assert!(
2667 e12.low_level_details()
2668 .ipv6_declared_policy()
2669 .allows_some_port()
2670 );
2671 }
2672
2673 #[cfg(feature = "experimental-api")]
2674 #[test]
2675 fn test_accessors() {
2676 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2677
2678 let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2679 let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2680
2681 assert!(!r4.md().ipv4_policy().allows_some_port());
2682 assert!(r16.md().ipv4_policy().allows_some_port());
2683
2684 assert!(!r4.rs().is_flagged_exit());
2685 assert!(r16.rs().is_flagged_exit());
2686 }
2687
2688 #[test]
2689 fn test_by_id() {
2690 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2691 let netdir = construct_custom_netdir(|pos, nb, _| {
2692 nb.omit_md = pos == 13;
2693 })
2694 .unwrap();
2695
2696 let netdir = netdir.unwrap_if_sufficient().unwrap();
2697
2698 let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2699 assert_eq!(r.id().as_bytes(), &[0; 32]);
2700
2701 assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2702
2703 let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2704 assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2705 assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2706
2707 assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2708
2709 assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2710 assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2711
2712 let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2713 assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2714 assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2715
2716 let pair_13_13 = RelayIds::builder()
2717 .ed_identity([13; 32].into())
2718 .rsa_identity([13; 20].into())
2719 .build()
2720 .unwrap();
2721 let pair_14_14 = RelayIds::builder()
2722 .ed_identity([14; 32].into())
2723 .rsa_identity([14; 20].into())
2724 .build()
2725 .unwrap();
2726 let pair_14_99 = RelayIds::builder()
2727 .ed_identity([14; 32].into())
2728 .rsa_identity([99; 20].into())
2729 .build()
2730 .unwrap();
2731
2732 let r = netdir.by_ids(&pair_13_13);
2733 assert!(r.is_none());
2734 let r = netdir.by_ids(&pair_14_14).unwrap();
2735 assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2736 assert_eq!(
2737 r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2738 &[14; 32]
2739 );
2740 let r = netdir.by_ids(&pair_14_99);
2741 assert!(r.is_none());
2742
2743 assert_eq!(
2744 netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2745 None
2746 );
2747 assert_eq!(
2748 netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2749 Some(true)
2750 );
2751 assert_eq!(
2752 netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2753 Some(false)
2754 );
2755 }
2756
2757 #[test]
2758 #[cfg(feature = "hs-common")]
2759 fn test_by_ids_detailed() {
2760 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2761 let netdir = construct_custom_netdir(|pos, nb, _| {
2762 nb.omit_md = pos == 13;
2763 })
2764 .unwrap();
2765
2766 let netdir = netdir.unwrap_if_sufficient().unwrap();
2767
2768 let id13_13 = RelayIds::builder()
2769 .ed_identity([13; 32].into())
2770 .rsa_identity([13; 20].into())
2771 .build()
2772 .unwrap();
2773 let id15_15 = RelayIds::builder()
2774 .ed_identity([15; 32].into())
2775 .rsa_identity([15; 20].into())
2776 .build()
2777 .unwrap();
2778 let id15_99 = RelayIds::builder()
2779 .ed_identity([15; 32].into())
2780 .rsa_identity([99; 20].into())
2781 .build()
2782 .unwrap();
2783 let id99_15 = RelayIds::builder()
2784 .ed_identity([99; 32].into())
2785 .rsa_identity([15; 20].into())
2786 .build()
2787 .unwrap();
2788 let id99_99 = RelayIds::builder()
2789 .ed_identity([99; 32].into())
2790 .rsa_identity([99; 20].into())
2791 .build()
2792 .unwrap();
2793 let id15_xx = RelayIds::builder()
2794 .ed_identity([15; 32].into())
2795 .build()
2796 .unwrap();
2797 let idxx_15 = RelayIds::builder()
2798 .rsa_identity([15; 20].into())
2799 .build()
2800 .unwrap();
2801
2802 assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2803 assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2804 assert!(matches!(
2805 netdir.by_ids_detailed(&id15_99),
2806 Err(RelayLookupError::Impossible)
2807 ));
2808 assert!(matches!(
2809 netdir.by_ids_detailed(&id99_15),
2810 Err(RelayLookupError::Impossible)
2811 ));
2812 assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2813 assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2814 assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2815 }
2816
2817 #[test]
2818 fn weight_type() {
2819 let r0 = RelayWeight(0);
2820 let r100 = RelayWeight(100);
2821 let r200 = RelayWeight(200);
2822 let r300 = RelayWeight(300);
2823 assert_eq!(r100 + r200, r300);
2824 assert_eq!(r100.checked_div(r200), Some(0.5));
2825 assert!(r100.checked_div(r0).is_none());
2826 assert_eq!(r200.ratio(0.5), Some(r100));
2827 assert!(r200.ratio(-1.0).is_none());
2828 }
2829
2830 #[test]
2831 fn weight_accessors() {
2832 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2833 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2834
2835 let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2836 // This is just the total guard weight, since all our Wxy = 1.
2837 assert_eq!(g_total, RelayWeight(110_000));
2838
2839 let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2840 assert_eq!(g_total, RelayWeight(0));
2841
2842 let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2843 assert!(relay.rs.is_flagged_guard());
2844 let w = netdir.relay_weight(&relay, WeightRole::Guard);
2845 assert_eq!(w, RelayWeight(6_000));
2846
2847 let w = netdir
2848 .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2849 .unwrap();
2850 assert_eq!(w, RelayWeight(4_000));
2851
2852 assert!(
2853 netdir
2854 .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2855 .is_none()
2856 );
2857 }
2858
2859 #[test]
2860 fn family_list() {
2861 let netdir = construct_custom_netdir(|pos, n, _| {
2862 if pos == 0x0a {
2863 n.md.family(
2864 "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2865 $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2866 $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2867 .parse()
2868 .unwrap(),
2869 );
2870 } else if pos == 0x0c {
2871 n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2872 }
2873 })
2874 .unwrap()
2875 .unwrap_if_sufficient()
2876 .unwrap();
2877
2878 // In the testing netdir, adjacent members are in the same family by default...
2879 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2880 let family: Vec<_> = netdir.known_family_members(&r0).collect();
2881 assert_eq!(family.len(), 1);
2882 assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2883
2884 // But we've made this relay claim membership with several others.
2885 let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2886 let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2887 assert_eq!(family.len(), 2);
2888 assert!(family.contains(&Ed25519Identity::from([11; 32])));
2889 assert!(family.contains(&Ed25519Identity::from([12; 32])));
2890 // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2891 // membership with 10.
2892 }
2893 #[test]
2894 #[cfg(feature = "geoip")]
2895 fn relay_has_country_code() {
2896 let src_v6 = r#"
2897 fe80:dead:beef::,fe80:dead:ffff::,US
2898 fe80:feed:eeee::1,fe80:feed:eeee::2,AT
2899 fe80:feed:eeee::2,fe80:feed:ffff::,DE
2900 "#;
2901 let db = GeoipDb::new_from_legacy_format("", src_v6).unwrap();
2902
2903 let netdir = construct_custom_netdir_with_geoip(
2904 |pos, n, _| {
2905 if pos == 0x01 {
2906 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2907 }
2908 if pos == 0x02 {
2909 n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2910 n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2911 }
2912 if pos == 0x03 {
2913 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2914 n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2915 }
2916 },
2917 &db,
2918 )
2919 .unwrap()
2920 .unwrap_if_sufficient()
2921 .unwrap();
2922
2923 // No GeoIP data available -> None
2924 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2925 assert_eq!(r0.cc, None);
2926
2927 // Exactly one match -> Some
2928 let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2929 assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2930
2931 // Conflicting matches -> None
2932 let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2933 assert_eq!(r2.cc, None);
2934
2935 // Multiple agreeing matches -> Some
2936 let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2937 assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2938 }
2939
2940 #[test]
2941 #[cfg(feature = "hs-common")]
2942 #[allow(deprecated)]
2943 fn hs_dirs_selection() {
2944 use tor_basic_utils::test_rng::testing_rng;
2945
2946 const HSDIR_SPREAD_STORE: i32 = 6;
2947 const HSDIR_SPREAD_FETCH: i32 = 2;
2948 const PARAMS: [(&str, i32); 2] = [
2949 ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2950 ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2951 ];
2952
2953 let netdir: Arc<NetDir> =
2954 crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2955 .unwrap()
2956 .unwrap_if_sufficient()
2957 .unwrap()
2958 .into();
2959 let hsid = dummy_hs_blind_id();
2960
2961 const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2962 // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2963 // are only 10 relays with the HsDir flag in the consensus.
2964 #[cfg(feature = "hs-service")]
2965 (HsDirOp::Upload, 10),
2966 (HsDirOp::Download, 4),
2967 ];
2968
2969 for (op, relay_count) in OP_RELAY_COUNT {
2970 let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2971
2972 assert_eq!(relays.len(), *relay_count);
2973
2974 // There should be no duplicates (the filtering function passed to
2975 // HsDirRing::ring_items_at() ensures the relays that are already in use for
2976 // lower-numbered replicas aren't considered a second time for a higher-numbered
2977 // replica).
2978 let unique = relays
2979 .iter()
2980 .map(|relay| relay.ed_identity())
2981 .collect::<HashSet<_>>();
2982 assert_eq!(unique.len(), relays.len());
2983 }
2984
2985 // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2986 // expected relays.
2987 //
2988 // For example, let's say we have the following hsdir ring:
2989 //
2990 // A - B
2991 // / \
2992 // F C
2993 // \ /
2994 // E - D
2995 //
2996 // Let's also assume that:
2997 //
2998 // * hsdir_spread_store = 3
2999 // * the ordering of the relays on the ring is [A, B, C, D, E, F]
3000 //
3001 // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
3002 // relays [E, F, D]. We should have a test that checks this.
3003 }
3004
3005 #[test]
3006 fn zero_weights() {
3007 // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
3008 // in the presence of items whose weight is 0.
3009 //
3010 // We think that the behavior is:
3011 // - An item with weight 0 is never returned.
3012 // - If all items have weight 0, choose_weighted returns an error.
3013 // - If all items have weight 0, choose_multiple_weighted returns an empty list.
3014 // - If we request n items from choose_multiple_weighted,
3015 // but only m<n items have nonzero weight, we return all m of those items.
3016 // - if the request for n items can't be completely satisfied with n items of weight >= 0,
3017 // we get InsufficientNonZero.
3018 let items = vec![1, 2, 3];
3019 let mut rng = testing_rng();
3020
3021 let a = items.choose_weighted(&mut rng, |_| 0);
3022 assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
3023
3024 let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
3025 let xs: Vec<_> = x.unwrap().collect();
3026 assert!(xs.is_empty());
3027
3028 let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
3029 let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
3030 let xs: Vec<_> = x.unwrap().collect();
3031 assert_eq!(&xs[..], &[&1]);
3032
3033 for _ in 0..100 {
3034 let a = items.choose_weighted(&mut rng, only_one);
3035 assert_eq!(a.unwrap(), &1);
3036
3037 let x = items
3038 .choose_multiple_weighted(&mut rng, 1, only_one)
3039 .unwrap()
3040 .collect::<Vec<_>>();
3041 assert_eq!(x, vec![&1]);
3042 }
3043 }
3044
3045 #[test]
3046 fn insufficient_but_nonzero() {
3047 // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
3048 // but there are insufficient values.
3049 // (If this behavior changes, we need to change our usage.)
3050
3051 let items = vec![1, 2, 3];
3052 let mut rng = testing_rng();
3053 let mut a = items
3054 .choose_multiple_weighted(&mut rng, 10, |_| 1)
3055 .unwrap()
3056 .copied()
3057 .collect::<Vec<_>>();
3058 a.sort();
3059 assert_eq!(a, items);
3060 }
3061}