tor_netdir/lib.rs
1#![cfg_attr(docsrs, feature(doc_cfg))]
2#![doc = include_str!("../README.md")]
3// @@ begin lint list maintained by maint/add_warning @@
4#![allow(renamed_and_removed_lints)] // @@REMOVE_WHEN(ci_arti_stable)
5#![allow(unknown_lints)] // @@REMOVE_WHEN(ci_arti_nightly)
6#![warn(missing_docs)]
7#![warn(noop_method_call)]
8#![warn(unreachable_pub)]
9#![warn(clippy::all)]
10#![deny(clippy::await_holding_lock)]
11#![deny(clippy::cargo_common_metadata)]
12#![deny(clippy::cast_lossless)]
13#![deny(clippy::checked_conversions)]
14#![warn(clippy::cognitive_complexity)]
15#![deny(clippy::debug_assert_with_mut_call)]
16#![deny(clippy::exhaustive_enums)]
17#![deny(clippy::exhaustive_structs)]
18#![deny(clippy::expl_impl_clone_on_copy)]
19#![deny(clippy::fallible_impl_from)]
20#![deny(clippy::implicit_clone)]
21#![deny(clippy::large_stack_arrays)]
22#![warn(clippy::manual_ok_or)]
23#![deny(clippy::missing_docs_in_private_items)]
24#![warn(clippy::needless_borrow)]
25#![warn(clippy::needless_pass_by_value)]
26#![warn(clippy::option_option)]
27#![deny(clippy::print_stderr)]
28#![deny(clippy::print_stdout)]
29#![warn(clippy::rc_buffer)]
30#![deny(clippy::ref_option_ref)]
31#![warn(clippy::semicolon_if_nothing_returned)]
32#![warn(clippy::trait_duplication_in_bounds)]
33#![deny(clippy::unchecked_time_subtraction)]
34#![deny(clippy::unnecessary_wraps)]
35#![warn(clippy::unseparated_literal_suffix)]
36#![deny(clippy::unwrap_used)]
37#![deny(clippy::mod_module_files)]
38#![allow(clippy::let_unit_value)] // This can reasonably be done for explicitness
39#![allow(clippy::uninlined_format_args)]
40#![allow(clippy::significant_drop_in_scrutinee)] // arti/-/merge_requests/588/#note_2812945
41#![allow(clippy::result_large_err)] // temporary workaround for arti#587
42#![allow(clippy::needless_raw_string_hashes)] // complained-about code is fine, often best
43#![allow(clippy::needless_lifetimes)] // See arti#1765
44#![allow(mismatched_lifetime_syntaxes)] // temporary workaround for arti#2060
45//! <!-- @@ end lint list maintained by maint/add_warning @@ -->
46
47pub mod details;
48mod err;
49#[cfg(feature = "hs-common")]
50mod hsdir_params;
51#[cfg(feature = "hs-common")]
52mod hsdir_ring;
53pub mod params;
54mod weight;
55
56#[cfg(any(test, feature = "testing"))]
57pub mod testnet;
58#[cfg(feature = "testing")]
59pub mod testprovider;
60
61use async_trait::async_trait;
62#[cfg(feature = "hs-service")]
63use itertools::chain;
64use tor_error::warn_report;
65#[cfg(feature = "hs-common")]
66use tor_linkspec::OwnedCircTarget;
67use tor_linkspec::{
68 ChanTarget, DirectChanMethodsHelper, HasAddrs, HasRelayIds, RelayIdRef, RelayIdType,
69};
70use tor_llcrypto as ll;
71use tor_llcrypto::pk::{ed25519::Ed25519Identity, rsa::RsaIdentity};
72use tor_netdoc::doc::microdesc::{MdDigest, Microdesc};
73use tor_netdoc::doc::netstatus::{self, MdConsensus, MdRouterStatus};
74#[cfg(feature = "hs-common")]
75use {hsdir_ring::HsDirRing, std::iter};
76
77use derive_more::{From, Into};
78use futures::{StreamExt, stream::BoxStream};
79use num_enum::{IntoPrimitive, TryFromPrimitive};
80use rand::seq::{IndexedRandom as _, SliceRandom as _, WeightError};
81use serde::Deserialize;
82use std::collections::HashMap;
83use std::net::IpAddr;
84use std::ops::Deref;
85use std::sync::Arc;
86use std::time::SystemTime;
87use strum::{EnumCount, EnumIter};
88use tracing::warn;
89use typed_index_collections::{TiSlice, TiVec};
90
91#[cfg(feature = "hs-common")]
92use {
93 itertools::Itertools,
94 std::collections::HashSet,
95 std::result::Result as StdResult,
96 tor_error::{Bug, internal},
97 tor_hscrypto::{pk::HsBlindId, time::TimePeriod},
98 tor_linkspec::{OwnedChanTargetBuilder, verbatim::VerbatimLinkSpecCircTarget},
99 tor_llcrypto::pk::curve25519,
100};
101
102pub use err::Error;
103pub use weight::WeightRole;
104/// A Result using the Error type from the tor-netdir crate
105pub type Result<T> = std::result::Result<T, Error>;
106
107#[cfg(feature = "hs-common")]
108pub use err::{OnionDirLookupError, VerbatimCircTargetDecodeError};
109
110use params::NetParameters;
111#[cfg(feature = "geoip")]
112use tor_geoip::{CountryCode, GeoipDb, HasCountryCode};
113
114#[cfg(feature = "hs-common")]
115pub use hsdir_params::HsDirParams;
116
117/// Index into the consensus relays
118///
119/// This is an index into the list of relays returned by
120/// [`.c_relays()`](ConsensusRelays::c_relays)
121/// (on the corresponding consensus or netdir).
122///
123/// This is just a `usize` inside, but using a newtype prevents getting a relay index
124/// confused with other kinds of slice indices or counts.
125///
126/// If you are in a part of the code which needs to work with multiple consensuses,
127/// the typechecking cannot tell if you try to index into the wrong consensus.
128#[derive(Debug, From, Into, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
129pub(crate) struct RouterStatusIdx(usize);
130
131/// Extension trait to provide index-type-safe `.c_relays()` method
132//
133// TODO: Really it would be better to have MdConsensns::relays() return TiSlice,
134// but that would be an API break there.
135pub(crate) trait ConsensusRelays {
136 /// Obtain the list of relays in the consensus
137 //
138 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus>;
139}
140impl ConsensusRelays for MdConsensus {
141 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
142 TiSlice::from_ref(MdConsensus::relays(self))
143 }
144}
145impl ConsensusRelays for NetDir {
146 fn c_relays(&self) -> &TiSlice<RouterStatusIdx, MdRouterStatus> {
147 self.consensus.c_relays()
148 }
149}
150
151/// Configuration for determining when two relays have addresses "too close" in
152/// the network.
153///
154/// Used by `Relay::low_level_details().in_same_subnet()`.
155#[derive(Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
156#[serde(deny_unknown_fields)]
157pub struct SubnetConfig {
158 /// Consider IPv4 nodes in the same /x to be the same family.
159 ///
160 /// If this value is 0, all nodes with IPv4 addresses will be in the
161 /// same family. If this value is above 32, then no nodes will be
162 /// placed im the same family based on their IPv4 addresses.
163 subnets_family_v4: u8,
164 /// Consider IPv6 nodes in the same /x to be the same family.
165 ///
166 /// If this value is 0, all nodes with IPv6 addresses will be in the
167 /// same family. If this value is above 128, then no nodes will be
168 /// placed im the same family based on their IPv6 addresses.
169 subnets_family_v6: u8,
170}
171
172impl Default for SubnetConfig {
173 fn default() -> Self {
174 Self::new(16, 32)
175 }
176}
177
178impl SubnetConfig {
179 /// Construct a new SubnetConfig from a pair of bit prefix lengths.
180 ///
181 /// The values are clamped to the appropriate ranges if they are
182 /// out-of-bounds.
183 pub fn new(subnets_family_v4: u8, subnets_family_v6: u8) -> Self {
184 Self {
185 subnets_family_v4,
186 subnets_family_v6,
187 }
188 }
189
190 /// Construct a new SubnetConfig such that addresses are not in the same
191 /// family with anything--not even with themselves.
192 pub fn no_addresses_match() -> SubnetConfig {
193 SubnetConfig {
194 subnets_family_v4: 33,
195 subnets_family_v6: 129,
196 }
197 }
198
199 /// Return true if the two addresses in the same subnet, according to this
200 /// configuration.
201 pub fn addrs_in_same_subnet(&self, a: &IpAddr, b: &IpAddr) -> bool {
202 match (a, b) {
203 (IpAddr::V4(a), IpAddr::V4(b)) => {
204 let bits = self.subnets_family_v4;
205 if bits > 32 {
206 return false;
207 }
208 let a = u32::from_be_bytes(a.octets());
209 let b = u32::from_be_bytes(b.octets());
210 (a >> (32 - bits)) == (b >> (32 - bits))
211 }
212 (IpAddr::V6(a), IpAddr::V6(b)) => {
213 let bits = self.subnets_family_v6;
214 if bits > 128 {
215 return false;
216 }
217 let a = u128::from_be_bytes(a.octets());
218 let b = u128::from_be_bytes(b.octets());
219 (a >> (128 - bits)) == (b >> (128 - bits))
220 }
221 _ => false,
222 }
223 }
224
225 /// Return true if any of the addresses in `a` shares a subnet with any of
226 /// the addresses in `b`, according to this configuration.
227 pub fn any_addrs_in_same_subnet<T, U>(&self, a: &T, b: &U) -> bool
228 where
229 T: tor_linkspec::HasAddrs,
230 U: tor_linkspec::HasAddrs,
231 {
232 a.addrs().any(|aa| {
233 b.addrs()
234 .any(|bb| self.addrs_in_same_subnet(&aa.ip(), &bb.ip()))
235 })
236 }
237
238 /// Return a new subnet configuration that is the union of `self` and
239 /// `other`.
240 ///
241 /// That is, return a subnet configuration that puts all addresses in the
242 /// same subnet if and only if at least one of `self` and `other` would put
243 /// them in the same subnet.
244 pub fn union(&self, other: &Self) -> Self {
245 use std::cmp::min;
246 Self {
247 subnets_family_v4: min(self.subnets_family_v4, other.subnets_family_v4),
248 subnets_family_v6: min(self.subnets_family_v6, other.subnets_family_v6),
249 }
250 }
251}
252
253/// Configuration for which listed family information to use when deciding
254/// whether relays belong to the same family.
255///
256/// Derived from network parameters.
257#[derive(Clone, Copy, Debug)]
258pub struct FamilyRules {
259 /// If true, we use family information from lists of family members.
260 use_family_lists: bool,
261 /// If true, we use family information from lists of family IDs and from family certs.
262 use_family_ids: bool,
263}
264
265impl<'a> From<&'a NetParameters> for FamilyRules {
266 fn from(params: &'a NetParameters) -> Self {
267 FamilyRules {
268 use_family_lists: bool::from(params.use_family_lists),
269 use_family_ids: bool::from(params.use_family_ids),
270 }
271 }
272}
273
274impl FamilyRules {
275 /// Return a `FamilyRules` that will use all recognized kinds of family information.
276 pub fn all_family_info() -> Self {
277 Self {
278 use_family_lists: true,
279 use_family_ids: true,
280 }
281 }
282
283 /// Return a `FamilyRules` that will ignore all family information declared by relays.
284 pub fn ignore_declared_families() -> Self {
285 Self {
286 use_family_lists: false,
287 use_family_ids: false,
288 }
289 }
290
291 /// Configure this `FamilyRules` to use (or not use) family information from
292 /// lists of family members.
293 pub fn use_family_lists(&mut self, val: bool) -> &mut Self {
294 self.use_family_lists = val;
295 self
296 }
297
298 /// Configure this `FamilyRules` to use (or not use) family information from
299 /// family IDs and family certs.
300 pub fn use_family_ids(&mut self, val: bool) -> &mut Self {
301 self.use_family_ids = val;
302 self
303 }
304
305 /// Return a `FamilyRules` that will look at every source of information
306 /// requested by `self` or by `other`.
307 pub fn union(&self, other: &Self) -> Self {
308 Self {
309 use_family_lists: self.use_family_lists || other.use_family_lists,
310 use_family_ids: self.use_family_ids || other.use_family_ids,
311 }
312 }
313}
314
315/// An opaque type representing the weight with which a relay or set of
316/// relays will be selected for a given role.
317///
318/// Most users should ignore this type, and just use pick_relay instead.
319#[derive(
320 Copy,
321 Clone,
322 Debug,
323 derive_more::Add,
324 derive_more::Sum,
325 derive_more::AddAssign,
326 Eq,
327 PartialEq,
328 Ord,
329 PartialOrd,
330)]
331pub struct RelayWeight(u64);
332
333impl RelayWeight {
334 /// Try to divide this weight by `rhs`.
335 ///
336 /// Return a ratio on success, or None on division-by-zero.
337 pub fn checked_div(&self, rhs: RelayWeight) -> Option<f64> {
338 if rhs.0 == 0 {
339 None
340 } else {
341 Some((self.0 as f64) / (rhs.0 as f64))
342 }
343 }
344
345 /// Compute a ratio `frac` of this weight.
346 ///
347 /// Return None if frac is less than zero, since negative weights
348 /// are impossible.
349 pub fn ratio(&self, frac: f64) -> Option<RelayWeight> {
350 let product = (self.0 as f64) * frac;
351 if product >= 0.0 && product.is_finite() {
352 Some(RelayWeight(product as u64))
353 } else {
354 None
355 }
356 }
357}
358
359impl From<u64> for RelayWeight {
360 fn from(val: u64) -> Self {
361 RelayWeight(val)
362 }
363}
364
365/// An operation for which we might be requesting a hidden service directory.
366#[derive(Copy, Clone, Debug, PartialEq)]
367// TODO: make this pub(crate) once NetDir::hs_dirs is removed
368#[non_exhaustive]
369pub enum HsDirOp {
370 /// Uploading an onion service descriptor.
371 #[cfg(feature = "hs-service")]
372 Upload,
373 /// Downloading an onion service descriptor.
374 Download,
375}
376
377/// A view of the Tor directory, suitable for use in building circuits.
378///
379/// Abstractly, a [`NetDir`] is a set of usable public [`Relay`]s, each of which
380/// has its own properties, identity, and correct weighted probability for use
381/// under different circumstances.
382///
383/// A [`NetDir`] is constructed by making a [`PartialNetDir`] from a consensus
384/// document, and then adding enough microdescriptors to that `PartialNetDir` so
385/// that it can be used to build paths. (Thus, if you have a NetDir, it is
386/// definitely adequate to build paths.)
387///
388/// # "Usable" relays
389///
390/// Many methods on NetDir are defined in terms of <a name="usable">"Usable"</a> relays. Unless
391/// otherwise stated, a relay is "usable" if it is listed in the consensus,
392/// if we have full directory information for that relay (including a
393/// microdescriptor), and if that relay does not have any flags indicating that
394/// we should never use it. (Currently, `NoEdConsensus` is the only such flag.)
395///
396/// # Limitations
397///
398/// The current NetDir implementation assumes fairly strongly that every relay
399/// has an Ed25519 identity and an RSA identity, that the consensus is indexed
400/// by RSA identities, and that the Ed25519 identities are stored in
401/// microdescriptors.
402///
403/// If these assumptions someday change, then we'll have to revise the
404/// implementation.
405#[derive(Debug, Clone)]
406pub struct NetDir {
407 /// A microdescriptor consensus that lists the members of the network,
408 /// and maps each one to a 'microdescriptor' that has more information
409 /// about it
410 consensus: Arc<MdConsensus>,
411 /// A map from keys to integer values, distributed in the consensus,
412 /// and clamped to certain defaults.
413 params: NetParameters,
414 /// Map from routerstatus index, to that routerstatus's microdescriptor (if we have one.)
415 mds: TiVec<RouterStatusIdx, Option<Arc<Microdesc>>>,
416 /// Map from SHA256 of _missing_ microdescriptors to the index of their
417 /// corresponding routerstatus.
418 rsidx_by_missing: HashMap<MdDigest, RouterStatusIdx>,
419 /// Map from ed25519 identity to index of the routerstatus.
420 ///
421 /// Note that we don't know the ed25519 identity of a relay until
422 /// we get the microdescriptor for it, so this won't be filled in
423 /// until we get the microdescriptors.
424 ///
425 /// # Implementation note
426 ///
427 /// For this field, and for `rsidx_by_rsa`,
428 /// it might be cool to have references instead.
429 /// But that would make this into a self-referential structure,
430 /// which isn't possible in safe rust.
431 rsidx_by_ed: HashMap<Ed25519Identity, RouterStatusIdx>,
432 /// Map from RSA identity to index of the routerstatus.
433 ///
434 /// This is constructed at the same time as the NetDir object, so it
435 /// can be immutable.
436 rsidx_by_rsa: Arc<HashMap<RsaIdentity, RouterStatusIdx>>,
437
438 /// Hash ring(s) describing the onion service directory.
439 ///
440 /// This is empty in a PartialNetDir, and is filled in before the NetDir is
441 /// built.
442 //
443 // TODO hs: It is ugly to have this exist in a partially constructed state
444 // in a PartialNetDir.
445 // Ideally, a PartialNetDir would contain only an HsDirs<HsDirParams>,
446 // or perhaps nothing at all, here.
447 #[cfg(feature = "hs-common")]
448 hsdir_rings: Arc<HsDirs<HsDirRing>>,
449
450 /// Weight values to apply to a given relay when deciding how frequently
451 /// to choose it for a given role.
452 weights: weight::WeightSet,
453
454 #[cfg(feature = "geoip")]
455 /// Country codes for each router in our consensus.
456 ///
457 /// This is indexed by the `RouterStatusIdx` (i.e. a router idx of zero has
458 /// the country code at position zero in this array).
459 country_codes: Vec<Option<CountryCode>>,
460}
461
462/// Collection of hidden service directories (or parameters for them)
463///
464/// In [`NetDir`] this is used to store the actual hash rings.
465/// (But, in a NetDir in a [`PartialNetDir`], it contains [`HsDirRing`]s
466/// where only the `params` are populated, and the `ring` is empty.)
467///
468/// This same generic type is used as the return type from
469/// [`HsDirParams::compute`](HsDirParams::compute),
470/// where it contains the *parameters* for the primary and secondary rings.
471#[derive(Debug, Clone)]
472#[cfg(feature = "hs-common")]
473pub(crate) struct HsDirs<D> {
474 /// The current ring
475 ///
476 /// It corresponds to the time period containing the `valid-after` time in
477 /// the consensus. Its SRV is whatever SRV was most current at the time when
478 /// that time period began.
479 ///
480 /// This is the hash ring that we should use whenever we are fetching an
481 /// onion service descriptor.
482 current: D,
483
484 /// Secondary rings (based on the parameters for the previous and next time periods)
485 ///
486 /// Onion services upload to positions on these ring as well, based on how
487 /// far into the current time period this directory is, so that
488 /// not-synchronized clients can still find their descriptor.
489 ///
490 /// Note that with the current (2023) network parameters, with
491 /// `hsdir_interval = SRV lifetime = 24 hours` at most one of these
492 /// secondary rings will be active at a time. We have two here in order
493 /// to conform with a more flexible regime in proposal 342.
494 //
495 // TODO: hs clients never need this; so I've made it not-present for them.
496 // But does that risk too much with respect to side channels?
497 //
498 // TODO: Perhaps we should refactor this so that it is clear that these
499 // are immutable? On the other hand, the documentation for this type
500 // declares that it is immutable, so we are likely okay.
501 //
502 // TODO: this `Vec` is only ever 0,1,2 elements.
503 // Maybe it should be an ArrayVec or something.
504 #[cfg(feature = "hs-service")]
505 secondary: Vec<D>,
506}
507
508#[cfg(feature = "hs-common")]
509impl<D> HsDirs<D> {
510 /// Convert an `HsDirs<D>` to `HsDirs<D2>` by mapping each contained `D`
511 pub(crate) fn map<D2>(self, mut f: impl FnMut(D) -> D2) -> HsDirs<D2> {
512 HsDirs {
513 current: f(self.current),
514 #[cfg(feature = "hs-service")]
515 secondary: self.secondary.into_iter().map(f).collect(),
516 }
517 }
518
519 /// Iterate over some of the contained hsdirs, according to `secondary`
520 ///
521 /// The current ring is always included.
522 /// Secondary rings are included iff `secondary` and the `hs-service` feature is enabled.
523 fn iter_filter_secondary(&self, secondary: bool) -> impl Iterator<Item = &D> {
524 let i = iter::once(&self.current);
525
526 // With "hs-service" disabled, there are no secondary rings,
527 // so we don't care.
528 let _ = secondary;
529
530 #[cfg(feature = "hs-service")]
531 let i = chain!(i, self.secondary.iter().filter(move |_| secondary));
532
533 i
534 }
535
536 /// Iterate over all the contained hsdirs
537 pub(crate) fn iter(&self) -> impl Iterator<Item = &D> {
538 self.iter_filter_secondary(true)
539 }
540
541 /// Iterate over the hsdirs relevant for `op`
542 pub(crate) fn iter_for_op(&self, op: HsDirOp) -> impl Iterator<Item = &D> {
543 self.iter_filter_secondary(match op {
544 #[cfg(feature = "hs-service")]
545 HsDirOp::Upload => true,
546 HsDirOp::Download => false,
547 })
548 }
549}
550
551/// An event that a [`NetDirProvider`] can broadcast to indicate that a change in
552/// the status of its directory.
553#[derive(
554 Debug, Clone, Copy, PartialEq, Eq, EnumIter, EnumCount, IntoPrimitive, TryFromPrimitive,
555)]
556#[non_exhaustive]
557#[repr(u16)]
558pub enum DirEvent {
559 /// A new consensus has been received, and has enough information to be
560 /// used.
561 ///
562 /// This event is also broadcast when a new set of consensus parameters is
563 /// available, even if that set of parameters comes from a configuration
564 /// change rather than from the latest consensus.
565 NewConsensus,
566
567 /// New descriptors have been received for the current consensus.
568 ///
569 /// (This event is _not_ broadcast when receiving new descriptors for a
570 /// consensus which is not yet ready to replace the current consensus.)
571 NewDescriptors,
572
573 /// We have received updated recommendations and requirements
574 /// for which subprotocols we should have to use the network.
575 NewProtocolRecommendation,
576}
577
578/// The network directory provider is shutting down without giving us the
579/// netdir we asked for.
580#[derive(Clone, Copy, Debug, thiserror::Error)]
581#[error("Network directory provider is shutting down")]
582#[non_exhaustive]
583pub struct NetdirProviderShutdown;
584
585impl tor_error::HasKind for NetdirProviderShutdown {
586 fn kind(&self) -> tor_error::ErrorKind {
587 tor_error::ErrorKind::ArtiShuttingDown
588 }
589}
590
591/// How "timely" must a network directory be?
592///
593/// This enum is used as an argument when requesting a [`NetDir`] object from
594/// [`NetDirProvider`] and other APIs, to specify how recent the information
595/// must be in order to be useful.
596#[derive(Copy, Clone, Eq, PartialEq, Debug)]
597#[allow(clippy::exhaustive_enums)]
598pub enum Timeliness {
599 /// The network directory must be strictly timely.
600 ///
601 /// That is, it must be based on a consensus that valid right now, with no
602 /// tolerance for skew or consensus problems.
603 ///
604 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
605 Strict,
606 /// The network directory must be roughly timely.
607 ///
608 /// This is, it must be be based on a consensus that is not _too_ far in the
609 /// future, and not _too_ far in the past.
610 ///
611 /// (The tolerances for "too far" will depend on configuration.)
612 ///
613 /// This is almost always the option that you want to use.
614 Timely,
615 /// Any network directory is permissible, regardless of how untimely.
616 ///
617 /// Avoid using this option if you could use [`Timeliness::Timely`] instead.
618 Unchecked,
619}
620
621/// An object that can provide [`NetDir`]s, as well as inform consumers when
622/// they might have changed.
623///
624/// It is the responsibility of the implementor of `NetDirProvider`
625/// to try to obtain an up-to-date `NetDir`,
626/// and continuously to maintain and update it.
627///
628/// In usual configurations, Arti uses `tor_dirmgr::DirMgr`
629/// as its `NetDirProvider`.
630#[async_trait]
631pub trait NetDirProvider: UpcastArcNetDirProvider + Send + Sync {
632 /// Return a network directory that's live according to the provided
633 /// `timeliness`.
634 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>>;
635
636 /// Return a reasonable netdir for general usage.
637 ///
638 /// This is an alias for
639 /// [`NetDirProvider::netdir`]`(`[`Timeliness::Timely`]`)`.
640 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
641 self.netdir(Timeliness::Timely)
642 }
643
644 /// Return a new asynchronous stream that will receive notification
645 /// whenever the consensus has changed.
646 ///
647 /// Multiple events may be batched up into a single item: each time
648 /// this stream yields an event, all you can assume is that the event has
649 /// occurred at least once.
650 fn events(&self) -> BoxStream<'static, DirEvent>;
651
652 /// Return the latest network parameters.
653 ///
654 /// If we have no directory, return a reasonable set of defaults.
655 fn params(&self) -> Arc<dyn AsRef<NetParameters>>;
656
657 /// Get a NetDir from `provider`, waiting until one exists.
658 async fn wait_for_netdir(
659 &self,
660 timeliness: Timeliness,
661 ) -> std::result::Result<Arc<NetDir>, NetdirProviderShutdown> {
662 if let Ok(nd) = self.netdir(timeliness) {
663 return Ok(nd);
664 }
665
666 let mut stream = self.events();
667 loop {
668 // We need to retry `self.netdir()` before waiting for any stream events, to
669 // avoid deadlock.
670 //
671 // We ignore all errors here: they can all potentially be fixed by
672 // getting a fresh consensus, and they will all get warned about
673 // by the NetDirProvider itself.
674 if let Ok(nd) = self.netdir(timeliness) {
675 return Ok(nd);
676 }
677 match stream.next().await {
678 Some(_) => {}
679 None => {
680 return Err(NetdirProviderShutdown);
681 }
682 }
683 }
684 }
685
686 /// Wait until `provider` lists `target`.
687 ///
688 /// NOTE: This might potentially wait indefinitely, if `target` is never actually
689 /// becomes listed in the directory. It will exit if the `NetDirProvider` shuts down.
690 async fn wait_for_netdir_to_list(
691 &self,
692 target: &tor_linkspec::RelayIds,
693 timeliness: Timeliness,
694 ) -> std::result::Result<(), NetdirProviderShutdown> {
695 let mut events = self.events();
696 loop {
697 // See if the desired relay is in the netdir.
698 //
699 // We do this before waiting for any events, to avoid race conditions.
700 {
701 let netdir = self.wait_for_netdir(timeliness).await?;
702 if netdir.ids_listed(target) == Some(true) {
703 return Ok(());
704 }
705 // If we reach this point, then ids_listed returned `Some(false)`,
706 // meaning "This relay is definitely not in the current directory";
707 // or it returned `None`, meaning "waiting for more information
708 // about this network directory.
709 // In both cases, it's reasonable to just wait for another netdir
710 // event and try again.
711 }
712 // We didn't find the relay; wait for the provider to have a new netdir
713 // or more netdir information.
714 if events.next().await.is_none() {
715 // The event stream is closed; the provider has shut down.
716 return Err(NetdirProviderShutdown);
717 }
718 }
719 }
720
721 /// Return the latest set of recommended and required protocols, if there is one.
722 ///
723 /// This may be more recent (or more available) than this provider's associated NetDir.
724 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)>;
725}
726
727impl<T> NetDirProvider for Arc<T>
728where
729 T: NetDirProvider,
730{
731 fn netdir(&self, timeliness: Timeliness) -> Result<Arc<NetDir>> {
732 self.deref().netdir(timeliness)
733 }
734
735 fn timely_netdir(&self) -> Result<Arc<NetDir>> {
736 self.deref().timely_netdir()
737 }
738
739 fn events(&self) -> BoxStream<'static, DirEvent> {
740 self.deref().events()
741 }
742
743 fn params(&self) -> Arc<dyn AsRef<NetParameters>> {
744 self.deref().params()
745 }
746
747 fn protocol_statuses(&self) -> Option<(SystemTime, Arc<netstatus::ProtoStatuses>)> {
748 self.deref().protocol_statuses()
749 }
750}
751
752/// Helper trait: allows any `Arc<X>` to be upcast to a `Arc<dyn
753/// NetDirProvider>` if X is an implementation or supertrait of NetDirProvider.
754///
755/// This trait exists to work around a limitation in rust: when trait upcasting
756/// coercion is stable, this will be unnecessary.
757///
758/// The Rust tracking issue is <https://github.com/rust-lang/rust/issues/65991>.
759pub trait UpcastArcNetDirProvider {
760 /// Return a view of this object as an `Arc<dyn NetDirProvider>`
761 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
762 where
763 Self: 'a;
764}
765
766impl<T> UpcastArcNetDirProvider for T
767where
768 T: NetDirProvider + Sized,
769{
770 fn upcast_arc<'a>(self: Arc<Self>) -> Arc<dyn NetDirProvider + 'a>
771 where
772 Self: 'a,
773 {
774 self
775 }
776}
777
778impl AsRef<NetParameters> for NetDir {
779 fn as_ref(&self) -> &NetParameters {
780 self.params()
781 }
782}
783
784/// A partially build NetDir -- it can't be unwrapped until it has
785/// enough information to build safe paths.
786#[derive(Debug, Clone)]
787pub struct PartialNetDir {
788 /// The netdir that's under construction.
789 netdir: NetDir,
790
791 /// The previous netdir, if we had one
792 ///
793 /// Used as a cache, so we can reuse information
794 #[cfg(feature = "hs-common")]
795 prev_netdir: Option<Arc<NetDir>>,
796}
797
798/// A view of a relay on the Tor network, suitable for building circuits.
799// TODO: This should probably be a more specific struct, with a trait
800// that implements it.
801#[derive(Clone)]
802pub struct Relay<'a> {
803 /// A router descriptor for this relay.
804 rs: &'a netstatus::MdRouterStatus,
805 /// A microdescriptor for this relay.
806 md: &'a Microdesc,
807 /// The country code this relay is in, if we know one.
808 #[cfg(feature = "geoip")]
809 cc: Option<CountryCode>,
810}
811
812/// A relay that we haven't checked for validity or usability in
813/// routing.
814#[derive(Debug)]
815pub struct UncheckedRelay<'a> {
816 /// A router descriptor for this relay.
817 rs: &'a netstatus::MdRouterStatus,
818 /// A microdescriptor for this relay, if there is one.
819 md: Option<&'a Microdesc>,
820 /// The country code this relay is in, if we know one.
821 #[cfg(feature = "geoip")]
822 cc: Option<CountryCode>,
823}
824
825/// A partial or full network directory that we can download
826/// microdescriptors for.
827pub trait MdReceiver {
828 /// Return an iterator over the digests for all of the microdescriptors
829 /// that this netdir is missing.
830 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_>;
831 /// Add a microdescriptor to this netdir, if it was wanted.
832 ///
833 /// Return true if it was indeed wanted.
834 fn add_microdesc(&mut self, md: Microdesc) -> bool;
835 /// Return the number of missing microdescriptors.
836 fn n_missing(&self) -> usize;
837}
838
839impl PartialNetDir {
840 /// Create a new PartialNetDir with a given consensus, and no
841 /// microdescriptors loaded.
842 ///
843 /// If `replacement_params` is provided, override network parameters from
844 /// the consensus with those from `replacement_params`.
845 pub fn new(
846 consensus: MdConsensus,
847 replacement_params: Option<&netstatus::NetParams<i32>>,
848 ) -> Self {
849 Self::new_inner(
850 consensus,
851 replacement_params,
852 #[cfg(feature = "geoip")]
853 None,
854 )
855 }
856
857 /// Create a new PartialNetDir with GeoIP support.
858 ///
859 /// This does the same thing as `new()`, except the provided GeoIP database is used to add
860 /// country codes to relays.
861 #[cfg(feature = "geoip")]
862 pub fn new_with_geoip(
863 consensus: MdConsensus,
864 replacement_params: Option<&netstatus::NetParams<i32>>,
865 geoip_db: &GeoipDb,
866 ) -> Self {
867 Self::new_inner(consensus, replacement_params, Some(geoip_db))
868 }
869
870 /// Implementation of the `new()` functions.
871 fn new_inner(
872 consensus: MdConsensus,
873 replacement_params: Option<&netstatus::NetParams<i32>>,
874 #[cfg(feature = "geoip")] geoip_db: Option<&GeoipDb>,
875 ) -> Self {
876 let mut params = NetParameters::default();
877
878 // (We ignore unrecognized options here, since they come from
879 // the consensus, and we don't expect to recognize everything
880 // there.)
881 let _ = params.saturating_update(consensus.params().iter());
882
883 // Now see if the user has any parameters to override.
884 // (We have to do this now, or else changes won't be reflected in our
885 // weights.)
886 if let Some(replacement) = replacement_params {
887 for u in params.saturating_update(replacement.iter()) {
888 warn!("Unrecognized option: override_net_params.{}", u);
889 }
890 }
891
892 // Compute the weights we'll want to use for these relays.
893 let weights = weight::WeightSet::from_consensus(&consensus, ¶ms);
894
895 let n_relays = consensus.c_relays().len();
896
897 let rsidx_by_missing = consensus
898 .c_relays()
899 .iter_enumerated()
900 .map(|(rsidx, rs)| (*rs.md_digest(), rsidx))
901 .collect();
902
903 let rsidx_by_rsa = consensus
904 .c_relays()
905 .iter_enumerated()
906 .map(|(rsidx, rs)| (*rs.rsa_identity(), rsidx))
907 .collect();
908
909 #[cfg(feature = "geoip")]
910 let country_codes = if let Some(db) = geoip_db {
911 consensus
912 .c_relays()
913 .iter()
914 .map(|rs| {
915 db.lookup_country_code_multi(rs.addrs().map(|x| x.ip()))
916 .cloned()
917 })
918 .collect()
919 } else {
920 Default::default()
921 };
922
923 #[cfg(feature = "hs-common")]
924 let hsdir_rings = Arc::new({
925 let params = HsDirParams::compute(&consensus, ¶ms).expect("Invalid consensus!");
926 // TODO: It's a bit ugly to use expect above, but this function does
927 // not return a Result. On the other hand, the error conditions under which
928 // HsDirParams::compute can return Err are _very_ narrow and hard to
929 // hit; see documentation in that function. As such, we probably
930 // don't need to have this return a Result.
931
932 params.map(HsDirRing::empty_from_params)
933 });
934
935 let netdir = NetDir {
936 consensus: Arc::new(consensus),
937 params,
938 mds: vec![None; n_relays].into(),
939 rsidx_by_missing,
940 rsidx_by_rsa: Arc::new(rsidx_by_rsa),
941 rsidx_by_ed: HashMap::with_capacity(n_relays),
942 #[cfg(feature = "hs-common")]
943 hsdir_rings,
944 weights,
945 #[cfg(feature = "geoip")]
946 country_codes,
947 };
948
949 PartialNetDir {
950 netdir,
951 #[cfg(feature = "hs-common")]
952 prev_netdir: None,
953 }
954 }
955
956 /// Return the declared lifetime of this PartialNetDir.
957 pub fn lifetime(&self) -> &netstatus::Lifetime {
958 self.netdir.lifetime()
959 }
960
961 /// Record a previous netdir, which can be used for reusing cached information
962 //
963 // Fills in as many missing microdescriptors as possible in this
964 // netdir, using the microdescriptors from the previous netdir.
965 //
966 // With HS enabled, stores the netdir for reuse of relay hash ring index values.
967 #[allow(clippy::needless_pass_by_value)] // prev might, or might not, be stored
968 pub fn fill_from_previous_netdir(&mut self, prev: Arc<NetDir>) {
969 for md in prev.mds.iter().flatten() {
970 self.netdir.add_arc_microdesc(md.clone());
971 }
972
973 #[cfg(feature = "hs-common")]
974 {
975 self.prev_netdir = Some(prev);
976 }
977 }
978
979 /// Compute the hash ring(s) for this NetDir
980 #[cfg(feature = "hs-common")]
981 fn compute_rings(&mut self) {
982 let params = HsDirParams::compute(&self.netdir.consensus, &self.netdir.params)
983 .expect("Invalid consensus");
984 // TODO: see TODO by similar expect in new()
985
986 self.netdir.hsdir_rings =
987 Arc::new(params.map(|params| {
988 HsDirRing::compute(params, &self.netdir, self.prev_netdir.as_deref())
989 }));
990 }
991
992 /// Return true if this are enough information in this directory
993 /// to build multihop paths.
994 pub fn have_enough_paths(&self) -> bool {
995 self.netdir.have_enough_paths()
996 }
997 /// If this directory has enough information to build multihop
998 /// circuits, return it.
999 pub fn unwrap_if_sufficient(
1000 #[allow(unused_mut)] mut self,
1001 ) -> std::result::Result<NetDir, PartialNetDir> {
1002 if self.netdir.have_enough_paths() {
1003 #[cfg(feature = "hs-common")]
1004 self.compute_rings();
1005 Ok(self.netdir)
1006 } else {
1007 Err(self)
1008 }
1009 }
1010}
1011
1012impl MdReceiver for PartialNetDir {
1013 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
1014 self.netdir.missing_microdescs()
1015 }
1016 fn add_microdesc(&mut self, md: Microdesc) -> bool {
1017 self.netdir.add_microdesc(md)
1018 }
1019 fn n_missing(&self) -> usize {
1020 self.netdir.n_missing()
1021 }
1022}
1023
1024impl NetDir {
1025 /// Return the declared lifetime of this NetDir.
1026 pub fn lifetime(&self) -> &netstatus::Lifetime {
1027 self.consensus.lifetime()
1028 }
1029
1030 /// Add `md` to this NetDir.
1031 ///
1032 /// Return true if we wanted it, and false otherwise.
1033 fn add_arc_microdesc(&mut self, md: Arc<Microdesc>) -> bool {
1034 if let Some(rsidx) = self.rsidx_by_missing.remove(md.digest()) {
1035 assert_eq!(self.c_relays()[rsidx].md_digest(), md.digest());
1036
1037 // There should never be two approved MDs in the same
1038 // consensus listing the same ID... but if there is,
1039 // we'll let the most recent one win.
1040 self.rsidx_by_ed.insert(*md.ed25519_id(), rsidx);
1041
1042 // Happy path: we did indeed want this one.
1043 self.mds[rsidx] = Some(md);
1044
1045 // Save some space in the missing-descriptor list.
1046 if self.rsidx_by_missing.len() < self.rsidx_by_missing.capacity() / 4 {
1047 self.rsidx_by_missing.shrink_to_fit();
1048 }
1049
1050 return true;
1051 }
1052
1053 // Either we already had it, or we never wanted it at all.
1054 false
1055 }
1056
1057 /// Construct a (possibly invalid) Relay object from a routerstatus and its
1058 /// index within the consensus.
1059 fn relay_from_rs_and_rsidx<'a>(
1060 &'a self,
1061 rs: &'a netstatus::MdRouterStatus,
1062 rsidx: RouterStatusIdx,
1063 ) -> UncheckedRelay<'a> {
1064 debug_assert_eq!(self.c_relays()[rsidx].rsa_identity(), rs.rsa_identity());
1065 let md = self.mds[rsidx].as_deref();
1066 if let Some(md) = md {
1067 debug_assert_eq!(rs.md_digest(), md.digest());
1068 }
1069
1070 UncheckedRelay {
1071 rs,
1072 md,
1073 #[cfg(feature = "geoip")]
1074 cc: self.country_codes.get(rsidx.0).copied().flatten(),
1075 }
1076 }
1077
1078 /// Return the value of the hsdir_n_replicas param.
1079 #[cfg(feature = "hs-common")]
1080 fn n_replicas(&self) -> u8 {
1081 self.params
1082 .hsdir_n_replicas
1083 .get()
1084 .try_into()
1085 .expect("BoundedInt did not enforce bounds")
1086 }
1087
1088 /// Return the spread parameter for the specified `op`.
1089 #[cfg(feature = "hs-common")]
1090 fn spread(&self, op: HsDirOp) -> usize {
1091 let spread = match op {
1092 HsDirOp::Download => self.params.hsdir_spread_fetch,
1093 #[cfg(feature = "hs-service")]
1094 HsDirOp::Upload => self.params.hsdir_spread_store,
1095 };
1096
1097 spread
1098 .get()
1099 .try_into()
1100 .expect("BoundedInt did not enforce bounds!")
1101 }
1102
1103 /// Select `spread` hsdir relays for the specified `hsid` from a given `ring`.
1104 ///
1105 /// Algorithm:
1106 ///
1107 /// for idx in 1..=n_replicas:
1108 /// - let H = hsdir_ring::onion_service_index(id, replica, rand,
1109 /// period).
1110 /// - Find the position of H within hsdir_ring.
1111 /// - Take elements from hsdir_ring starting at that position,
1112 /// adding them to Dirs until we have added `spread` new elements
1113 /// that were not there before.
1114 #[cfg(feature = "hs-common")]
1115 fn select_hsdirs<'h, 'r: 'h>(
1116 &'r self,
1117 hsid: HsBlindId,
1118 ring: &'h HsDirRing,
1119 spread: usize,
1120 ) -> impl Iterator<Item = Relay<'r>> + 'h {
1121 let n_replicas = self.n_replicas();
1122
1123 (1..=n_replicas) // 1-indexed !
1124 .flat_map({
1125 let mut selected_nodes = HashSet::new();
1126
1127 move |replica: u8| {
1128 let hsdir_idx = hsdir_ring::service_hsdir_index(&hsid, replica, ring.params());
1129
1130 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
1131 // According to rend-spec 2.2.3:
1132 // ... If any of those
1133 // nodes have already been selected for a lower-numbered replica of the
1134 // service, any nodes already chosen are disregarded (i.e. skipped over)
1135 // when choosing a replica's hsdir_spread_store nodes.
1136 selected_nodes.insert(*hsdir_idx)
1137 })
1138 .collect::<Vec<_>>()
1139 }
1140 })
1141 .filter_map(move |(_hsdir_idx, rs_idx)| {
1142 // This ought not to be None but let's not panic or bail if it is
1143 self.relay_by_rs_idx(*rs_idx)
1144 })
1145 }
1146
1147 /// Replace the overridden parameters in this netdir with `new_replacement`.
1148 ///
1149 /// After this function is done, the netdir's parameters will be those in
1150 /// the consensus, overridden by settings from `new_replacement`. Any
1151 /// settings in the old replacement parameters will be discarded.
1152 pub fn replace_overridden_parameters(&mut self, new_replacement: &netstatus::NetParams<i32>) {
1153 // TODO(nickm): This is largely duplicate code from PartialNetDir::new().
1154 let mut new_params = NetParameters::default();
1155 let _ = new_params.saturating_update(self.consensus.params().iter());
1156 for u in new_params.saturating_update(new_replacement.iter()) {
1157 warn!("Unrecognized option: override_net_params.{}", u);
1158 }
1159
1160 self.params = new_params;
1161 }
1162
1163 /// Return an iterator over all Relay objects, including invalid ones
1164 /// that we can't use.
1165 pub fn all_relays(&self) -> impl Iterator<Item = UncheckedRelay<'_>> {
1166 // TODO: I'd like if we could memoize this so we don't have to
1167 // do so many hashtable lookups.
1168 self.c_relays()
1169 .iter_enumerated()
1170 .map(move |(rsidx, rs)| self.relay_from_rs_and_rsidx(rs, rsidx))
1171 }
1172 /// Return an iterator over all [usable](NetDir#usable) Relays.
1173 pub fn relays(&self) -> impl Iterator<Item = Relay<'_>> {
1174 self.all_relays().filter_map(UncheckedRelay::into_relay)
1175 }
1176
1177 /// Look up a relay's [`Microdesc`] by its [`RouterStatusIdx`]
1178 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1179 pub(crate) fn md_by_rsidx(&self, rsidx: RouterStatusIdx) -> Option<&Microdesc> {
1180 self.mds.get(rsidx)?.as_deref()
1181 }
1182
1183 /// Return a relay matching a given identity, if we have a
1184 /// _usable_ relay with that key.
1185 ///
1186 /// (Does not return [unusable](NetDir#usable) relays.)
1187 ///
1188 ///
1189 /// Note that a `None` answer is not always permanent: if a microdescriptor
1190 /// is subsequently added for a relay with this ID, the ID may become usable
1191 /// even if it was not usable before.
1192 pub fn by_id<'a, T>(&self, id: T) -> Option<Relay<'_>>
1193 where
1194 T: Into<RelayIdRef<'a>>,
1195 {
1196 let id = id.into();
1197 let answer = match id {
1198 RelayIdRef::Ed25519(ed25519) => {
1199 let rsidx = *self.rsidx_by_ed.get(ed25519)?;
1200 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1201
1202 self.relay_from_rs_and_rsidx(rs, rsidx).into_relay()?
1203 }
1204 RelayIdRef::Rsa(rsa) => self
1205 .by_rsa_id_unchecked(rsa)
1206 .and_then(UncheckedRelay::into_relay)?,
1207 other_type => self.relays().find(|r| r.has_identity(other_type))?,
1208 };
1209 assert!(answer.has_identity(id));
1210 Some(answer)
1211 }
1212
1213 /// Obtain a `Relay` given a `RouterStatusIdx`
1214 ///
1215 /// Differs from `relay_from_rs_and_rsi` as follows:
1216 /// * That function expects the caller to already have an `MdRouterStatus`;
1217 /// it checks with `debug_assert` that the relay in the netdir matches.
1218 /// * That function panics if the `RouterStatusIdx` is invalid; this one returns `None`.
1219 /// * That function returns an `UncheckedRelay`; this one a `Relay`.
1220 ///
1221 /// `None` could be returned here, even with a valid `rsi`,
1222 /// if `rsi` refers to an [unusable](NetDir#usable) relay.
1223 #[cfg_attr(not(feature = "hs-common"), allow(dead_code))]
1224 pub(crate) fn relay_by_rs_idx(&self, rs_idx: RouterStatusIdx) -> Option<Relay<'_>> {
1225 let rs = self.c_relays().get(rs_idx)?;
1226 let md = self.mds.get(rs_idx)?.as_deref();
1227 UncheckedRelay {
1228 rs,
1229 md,
1230 #[cfg(feature = "geoip")]
1231 cc: self.country_codes.get(rs_idx.0).copied().flatten(),
1232 }
1233 .into_relay()
1234 }
1235
1236 /// Return a relay with the same identities as those in `target`, if one
1237 /// exists.
1238 ///
1239 /// Does not return [unusable](NetDir#usable) relays.
1240 ///
1241 /// Note that a negative result from this method is not necessarily permanent:
1242 /// it may be the case that a relay exists,
1243 /// but we don't yet have enough information about it to know all of its IDs.
1244 /// To test whether a relay is *definitely* absent,
1245 /// use [`by_ids_detailed`](Self::by_ids_detailed)
1246 /// or [`ids_listed`](Self::ids_listed).
1247 ///
1248 /// # Limitations
1249 ///
1250 /// This will be very slow if `target` does not have an Ed25519 or RSA
1251 /// identity.
1252 pub fn by_ids<T>(&self, target: &T) -> Option<Relay<'_>>
1253 where
1254 T: HasRelayIds + ?Sized,
1255 {
1256 let mut identities = target.identities();
1257 // Don't try if there are no identities.
1258 let first_id = identities.next()?;
1259
1260 // Since there is at most one relay with each given ID type,
1261 // we only need to check the first relay we find.
1262 let candidate = self.by_id(first_id)?;
1263 if identities.all(|wanted_id| candidate.has_identity(wanted_id)) {
1264 Some(candidate)
1265 } else {
1266 None
1267 }
1268 }
1269
1270 /// Check whether there is a relay that has at least one identity from
1271 /// `target`, and which _could_ have every identity from `target`.
1272 /// If so, return such a relay.
1273 ///
1274 /// Return `Ok(None)` if we did not find a relay with any identity from `target`.
1275 ///
1276 /// Return `RelayLookupError::Impossible` if we found a relay with at least
1277 /// one identity from `target`, but that relay's other identities contradict
1278 /// what we learned from `target`.
1279 ///
1280 /// Does not return [unusable](NetDir#usable) relays.
1281 ///
1282 /// (This function is only useful if you need to distinguish the
1283 /// "impossible" case from the "no such relay known" case.)
1284 ///
1285 /// # Limitations
1286 ///
1287 /// This will be very slow if `target` does not have an Ed25519 or RSA
1288 /// identity.
1289 //
1290 // TODO HS: This function could use a better name.
1291 //
1292 // TODO: We could remove the feature restriction here once we think this API is
1293 // stable.
1294 #[cfg(feature = "hs-common")]
1295 pub fn by_ids_detailed<T>(
1296 &self,
1297 target: &T,
1298 ) -> std::result::Result<Option<Relay<'_>>, RelayLookupError>
1299 where
1300 T: HasRelayIds + ?Sized,
1301 {
1302 let candidate = target
1303 .identities()
1304 // Find all the relays that share any identity with this set of identities.
1305 .filter_map(|id| self.by_id(id))
1306 // We might find the same relay more than once under a different
1307 // identity, so we remove the duplicates.
1308 //
1309 // Since there is at most one relay per rsa identity per consensus,
1310 // this is a true uniqueness check under current construction rules.
1311 .unique_by(|r| r.rs.rsa_identity())
1312 // If we find two or more distinct relays, then have a contradiction.
1313 .at_most_one()
1314 .map_err(|_| RelayLookupError::Impossible)?;
1315
1316 // If we have no candidate, return None early.
1317 let candidate = match candidate {
1318 Some(relay) => relay,
1319 None => return Ok(None),
1320 };
1321
1322 // Now we know we have a single candidate. Make sure that it does not have any
1323 // identity that does not match the target.
1324 if target
1325 .identities()
1326 .all(|wanted_id| match candidate.identity(wanted_id.id_type()) {
1327 None => true,
1328 Some(id) => id == wanted_id,
1329 })
1330 {
1331 Ok(Some(candidate))
1332 } else {
1333 Err(RelayLookupError::Impossible)
1334 }
1335 }
1336
1337 /// Return a boolean if this consensus definitely has (or does not have) a
1338 /// relay matching the listed identities.
1339 ///
1340 /// `Some(true)` indicates that the relay exists.
1341 /// `Some(false)` indicates that the relay definitely does not exist.
1342 /// `None` indicates that we can't yet tell whether such a relay exists,
1343 /// due to missing information.
1344 fn id_pair_listed(&self, ed_id: &Ed25519Identity, rsa_id: &RsaIdentity) -> Option<bool> {
1345 let r = self.by_rsa_id_unchecked(rsa_id);
1346 match r {
1347 Some(unchecked) => {
1348 if !unchecked.rs.ed25519_id_is_usable() {
1349 return Some(false);
1350 }
1351 // If md is present, then it's listed iff we have the right
1352 // ed id. Otherwise we don't know if it's listed.
1353 unchecked.md.map(|md| md.ed25519_id() == ed_id)
1354 }
1355 None => {
1356 // Definitely not listed.
1357 Some(false)
1358 }
1359 }
1360 }
1361
1362 /// Check whether a relay exists (or may exist)
1363 /// with the same identities as those in `target`.
1364 ///
1365 /// `Some(true)` indicates that the relay exists.
1366 /// `Some(false)` indicates that the relay definitely does not exist.
1367 /// `None` indicates that we can't yet tell whether such a relay exists,
1368 /// due to missing information.
1369 pub fn ids_listed<T>(&self, target: &T) -> Option<bool>
1370 where
1371 T: HasRelayIds + ?Sized,
1372 {
1373 let rsa_id = target.rsa_identity();
1374 let ed25519_id = target.ed_identity();
1375
1376 // TODO: If we later support more identity key types, this will
1377 // become incorrect. This assertion might help us recognize that case.
1378 const _: () = assert!(RelayIdType::COUNT == 2);
1379
1380 match (rsa_id, ed25519_id) {
1381 (Some(r), Some(e)) => self.id_pair_listed(e, r),
1382 (Some(r), None) => Some(self.rsa_id_is_listed(r)),
1383 (None, Some(e)) => {
1384 if self.rsidx_by_ed.contains_key(e) {
1385 Some(true)
1386 } else {
1387 None
1388 }
1389 }
1390 (None, None) => None,
1391 }
1392 }
1393
1394 /// Return a (possibly [unusable](NetDir#usable)) relay with a given RSA identity.
1395 ///
1396 /// This API can be used to find information about a relay that is listed in
1397 /// the current consensus, even if we don't yet have enough information
1398 /// (like a microdescriptor) about the relay to use it.
1399 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1400 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1401 fn by_rsa_id_unchecked(&self, rsa_id: &RsaIdentity) -> Option<UncheckedRelay<'_>> {
1402 let rsidx = *self.rsidx_by_rsa.get(rsa_id)?;
1403 let rs = self.c_relays().get(rsidx).expect("Corrupt index");
1404 assert_eq!(rs.rsa_identity(), rsa_id);
1405 Some(self.relay_from_rs_and_rsidx(rs, rsidx))
1406 }
1407 /// Return the relay with a given RSA identity, if we have one
1408 /// and it is [usable](NetDir#usable).
1409 fn by_rsa_id(&self, rsa_id: &RsaIdentity) -> Option<Relay<'_>> {
1410 self.by_rsa_id_unchecked(rsa_id)?.into_relay()
1411 }
1412 /// Return true if `rsa_id` is listed in this directory, even if it isn't
1413 /// currently usable.
1414 ///
1415 /// (An "[unusable](NetDir#usable)" relay in this context is one for which we don't have full
1416 /// directory information.)
1417 #[cfg_attr(feature = "experimental-api", visibility::make(pub))]
1418 #[cfg_attr(docsrs, doc(cfg(feature = "experimental-api")))]
1419 fn rsa_id_is_listed(&self, rsa_id: &RsaIdentity) -> bool {
1420 self.by_rsa_id_unchecked(rsa_id).is_some()
1421 }
1422
1423 /// List the hsdirs in this NetDir, that should be in the HSDir rings
1424 ///
1425 /// The results are not returned in any particular order.
1426 #[cfg(feature = "hs-common")]
1427 fn all_hsdirs(&self) -> impl Iterator<Item = (RouterStatusIdx, Relay<'_>)> {
1428 self.c_relays().iter_enumerated().filter_map(|(rsidx, rs)| {
1429 let relay = self.relay_from_rs_and_rsidx(rs, rsidx);
1430 relay.is_hsdir_for_ring().then_some(())?;
1431 let relay = relay.into_relay()?;
1432 Some((rsidx, relay))
1433 })
1434 }
1435
1436 /// Return the parameters from the consensus, clamped to the
1437 /// correct ranges, with defaults filled in.
1438 ///
1439 /// NOTE: that unsupported parameters aren't returned here; only those
1440 /// values configured in the `params` module are available.
1441 pub fn params(&self) -> &NetParameters {
1442 &self.params
1443 }
1444
1445 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1446 /// network's current requirements and recommendations for the list of
1447 /// protocols that every relay must implement.
1448 //
1449 // TODO HS: I am not sure this is the right API; other alternatives would be:
1450 // * To expose the _required_ relay protocol list instead (since that's all that
1451 // onion service implementations need).
1452 // * To expose the client protocol list as well (for symmetry).
1453 // * To expose the MdConsensus instead (since that's more general, although
1454 // it restricts the future evolution of this API).
1455 //
1456 // I think that this is a reasonably good compromise for now, but I'm going
1457 // to put it behind the `hs-common` feature to give us time to consider more.
1458 #[cfg(feature = "hs-common")]
1459 pub fn relay_protocol_status(&self) -> &netstatus::ProtoStatus {
1460 self.consensus.relay_protocol_status()
1461 }
1462
1463 /// Return a [`ProtoStatus`](netstatus::ProtoStatus) that lists the
1464 /// network's current requirements and recommendations for the list of
1465 /// protocols that every relay must implement.
1466 //
1467 // TODO HS: See notes on relay_protocol_status above.
1468 #[cfg(feature = "hs-common")]
1469 pub fn client_protocol_status(&self) -> &netstatus::ProtoStatus {
1470 self.consensus.client_protocol_status()
1471 }
1472
1473 /// Construct a `CircTarget` from an externally provided list of link specifiers,
1474 /// and an externally provided onion key.
1475 ///
1476 /// This method is used in the onion service protocol,
1477 /// where introduction points and rendezvous points are specified using these inputs.
1478 ///
1479 /// This function is a member of `NetDir` so that it can provide a reasonable list of
1480 /// [`Protocols`](tor_protover::Protocols) capabilities for the generated `CircTarget`.
1481 /// It does not (and should not!) look up anything else from the directory.
1482 #[cfg(feature = "hs-common")]
1483 pub fn circ_target_from_verbatim_linkspecs(
1484 &self,
1485 linkspecs: &[tor_linkspec::EncodedLinkSpec],
1486 ntor_onion_key: &curve25519::PublicKey,
1487 ) -> StdResult<VerbatimLinkSpecCircTarget<OwnedCircTarget>, VerbatimCircTargetDecodeError> {
1488 use VerbatimCircTargetDecodeError as E;
1489 use tor_linkspec::CircTarget as _;
1490 use tor_linkspec::decode::Strictness;
1491
1492 let mut bld = OwnedCircTarget::builder();
1493 use tor_error::into_internal;
1494
1495 *bld.chan_target() =
1496 OwnedChanTargetBuilder::from_encoded_linkspecs(Strictness::Standard, linkspecs)?;
1497 let protocols = {
1498 let chan_target = bld.chan_target().build().map_err(into_internal!(
1499 "from_encoded_linkspecs gave an invalid output"
1500 ))?;
1501 match self
1502 .by_ids_detailed(&chan_target)
1503 .map_err(E::ImpossibleIds)?
1504 {
1505 Some(relay) => relay.protovers().clone(),
1506 None => self.relay_protocol_status().required_protocols().clone(),
1507 }
1508 };
1509 bld.protocols(protocols);
1510 bld.ntor_onion_key(*ntor_onion_key);
1511 Ok(VerbatimLinkSpecCircTarget::new(
1512 bld.build()
1513 .map_err(into_internal!("Failed to construct a valid circtarget"))?,
1514 linkspecs.to_vec(),
1515 ))
1516 }
1517
1518 /// Return weighted the fraction of relays we can use. We only
1519 /// consider relays that match the predicate `usable`. We weight
1520 /// this bandwidth according to the provided `role`.
1521 ///
1522 /// If _no_ matching relays in the consensus have a nonzero
1523 /// weighted bandwidth value, we fall back to looking at the
1524 /// unweighted fraction of matching relays.
1525 ///
1526 /// If there are no matching relays in the consensus, we return 0.0.
1527 fn frac_for_role<'a, F>(&'a self, role: WeightRole, usable: F) -> f64
1528 where
1529 F: Fn(&UncheckedRelay<'a>) -> bool,
1530 {
1531 let mut total_weight = 0_u64;
1532 let mut have_weight = 0_u64;
1533 let mut have_count = 0_usize;
1534 let mut total_count = 0_usize;
1535
1536 for r in self.all_relays() {
1537 if !usable(&r) {
1538 continue;
1539 }
1540 let w = self.weights.weight_rs_for_role(r.rs, role);
1541 total_weight += w;
1542 total_count += 1;
1543 if r.is_usable() {
1544 have_weight += w;
1545 have_count += 1;
1546 }
1547 }
1548
1549 if total_weight > 0 {
1550 // The consensus lists some weighted bandwidth so return the
1551 // fraction of the weighted bandwidth for which we have
1552 // descriptors.
1553 (have_weight as f64) / (total_weight as f64)
1554 } else if total_count > 0 {
1555 // The consensus lists no weighted bandwidth for these relays,
1556 // but at least it does list relays. Return the fraction of
1557 // relays for which it we have descriptors.
1558 (have_count as f64) / (total_count as f64)
1559 } else {
1560 // There are no relays of this kind in the consensus. Return
1561 // 0.0, to avoid dividing by zero and giving NaN.
1562 0.0
1563 }
1564 }
1565 /// Return the estimated fraction of possible paths that we have
1566 /// enough microdescriptors to build.
1567 fn frac_usable_paths(&self) -> f64 {
1568 // TODO #504, TODO SPEC: We may want to add a set of is_flagged_fast() and/or
1569 // is_flagged_stable() checks here. This will require spec clarification.
1570 let f_g = self.frac_for_role(WeightRole::Guard, |u| {
1571 u.low_level_details().is_suitable_as_guard()
1572 });
1573 let f_m = self.frac_for_role(WeightRole::Middle, |_| true);
1574 let f_e = if self.all_relays().any(|u| u.rs.is_flagged_exit()) {
1575 self.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit())
1576 } else {
1577 // If there are no exits at all, we use f_m here.
1578 f_m
1579 };
1580 f_g * f_m * f_e
1581 }
1582 /// Return true if there is enough information in this NetDir to build
1583 /// multihop circuits.
1584 fn have_enough_paths(&self) -> bool {
1585 // TODO-A001: This should check for our guards as well, and
1586 // make sure that if they're listed in the consensus, we have
1587 // the descriptors for them.
1588
1589 // If we can build a randomly chosen path with at least this
1590 // probability, we know enough information to participate
1591 // on the network.
1592
1593 let min_frac_paths: f64 = self.params().min_circuit_path_threshold.as_fraction();
1594
1595 // What fraction of paths can we build?
1596 let available = self.frac_usable_paths();
1597
1598 available >= min_frac_paths
1599 }
1600 /// Choose a relay at random.
1601 ///
1602 /// Each relay is chosen with probability proportional to its weight
1603 /// in the role `role`, and is only selected if the predicate `usable`
1604 /// returns true for it.
1605 ///
1606 /// This function returns None if (and only if) there are no relays
1607 /// with nonzero weight where `usable` returned true.
1608 //
1609 // TODO this API, with the `usable` closure, invites mistakes where we fail to
1610 // check conditions that are implied by the role we have selected for the relay:
1611 // call sites must include a call to `Relay::is_polarity_inverter()` or whatever.
1612 // IMO the `WeightRole` ought to imply a condition (and it should therefore probably
1613 // be renamed.) -Diziet
1614 pub fn pick_relay<'a, R, P>(
1615 &'a self,
1616 rng: &mut R,
1617 role: WeightRole,
1618 usable: P,
1619 ) -> Option<Relay<'a>>
1620 where
1621 R: rand::Rng,
1622 P: FnMut(&Relay<'a>) -> bool,
1623 {
1624 let relays: Vec<_> = self.relays().filter(usable).collect();
1625 // This algorithm uses rand::distr::WeightedIndex, and uses
1626 // gives O(n) time and space to build the index, plus O(log n)
1627 // sampling time.
1628 //
1629 // We might be better off building a WeightedIndex in advance
1630 // for each `role`, and then sampling it repeatedly until we
1631 // get a relay that satisfies `usable`. Or we might not --
1632 // that depends heavily on the actual particulars of our
1633 // inputs. We probably shouldn't make any changes there
1634 // unless profiling tells us that this function is in a hot
1635 // path.
1636 //
1637 // The C Tor sampling implementation goes through some trouble
1638 // here to try to make its path selection constant-time. I
1639 // believe that there is no actual remotely exploitable
1640 // side-channel here however. It could be worth analyzing in
1641 // the future.
1642 //
1643 // This code will give the wrong result if the total of all weights
1644 // can exceed u64::MAX. We make sure that can't happen when we
1645 // set up `self.weights`.
1646 match relays[..].choose_weighted(rng, |r| self.weights.weight_rs_for_role(r.rs, role)) {
1647 Ok(relay) => Some(relay.clone()),
1648 Err(WeightError::InsufficientNonZero) => {
1649 if relays.is_empty() {
1650 None
1651 } else {
1652 warn!(?self.weights, ?role,
1653 "After filtering, all {} relays had zero weight. Choosing one at random. See bug #1907.",
1654 relays.len());
1655 relays.choose(rng).cloned()
1656 }
1657 }
1658 Err(e) => {
1659 warn_report!(e, "Unexpected error while sampling a relay");
1660 None
1661 }
1662 }
1663 }
1664
1665 /// Choose `n` relay at random.
1666 ///
1667 /// Each relay is chosen with probability proportional to its weight
1668 /// in the role `role`, and is only selected if the predicate `usable`
1669 /// returns true for it.
1670 ///
1671 /// Relays are chosen without replacement: no relay will be
1672 /// returned twice. Therefore, the resulting vector may be smaller
1673 /// than `n` if we happen to have fewer than `n` appropriate relays.
1674 ///
1675 /// This function returns an empty vector if (and only if) there
1676 /// are no relays with nonzero weight where `usable` returned
1677 /// true.
1678 #[allow(clippy::cognitive_complexity)] // all due to tracing crate.
1679 pub fn pick_n_relays<'a, R, P>(
1680 &'a self,
1681 rng: &mut R,
1682 n: usize,
1683 role: WeightRole,
1684 usable: P,
1685 ) -> Vec<Relay<'a>>
1686 where
1687 R: rand::Rng,
1688 P: FnMut(&Relay<'a>) -> bool,
1689 {
1690 let relays: Vec<_> = self.relays().filter(usable).collect();
1691 // NOTE: See discussion in pick_relay().
1692 let mut relays = match relays[..].choose_multiple_weighted(rng, n, |r| {
1693 self.weights.weight_rs_for_role(r.rs, role) as f64
1694 }) {
1695 Err(WeightError::InsufficientNonZero) => {
1696 // Too few relays had nonzero weights: return all of those that are okay.
1697 // (This is behavior used to come up with rand 0.9; it no longer does.
1698 // We still detect it.)
1699 let remaining: Vec<_> = relays
1700 .iter()
1701 .filter(|r| self.weights.weight_rs_for_role(r.rs, role) > 0)
1702 .cloned()
1703 .collect();
1704 if remaining.is_empty() {
1705 warn!(?self.weights, ?role,
1706 "After filtering, all {} relays had zero weight! Picking some at random. See bug #1907.",
1707 relays.len());
1708 if relays.len() >= n {
1709 relays.choose_multiple(rng, n).cloned().collect()
1710 } else {
1711 relays
1712 }
1713 } else {
1714 warn!(?self.weights, ?role,
1715 "After filtering, only had {}/{} relays with nonzero weight. Returning them all. See bug #1907.",
1716 remaining.len(), relays.len());
1717 remaining
1718 }
1719 }
1720 Err(e) => {
1721 warn_report!(e, "Unexpected error while sampling a set of relays");
1722 Vec::new()
1723 }
1724 Ok(iter) => {
1725 let selection: Vec<_> = iter.map(Relay::clone).collect();
1726 if selection.len() < n && selection.len() < relays.len() {
1727 warn!(?self.weights, ?role,
1728 "choose_multiple_weighted returned only {returned}, despite requesting {n}, \
1729 and having {filtered_len} available after filtering. See bug #1907.",
1730 returned=selection.len(), filtered_len=relays.len());
1731 }
1732 selection
1733 }
1734 };
1735 relays.shuffle(rng);
1736 relays
1737 }
1738
1739 /// Compute the weight with which `relay` will be selected for a given
1740 /// `role`.
1741 pub fn relay_weight<'a>(&'a self, relay: &Relay<'a>, role: WeightRole) -> RelayWeight {
1742 RelayWeight(self.weights.weight_rs_for_role(relay.rs, role))
1743 }
1744
1745 /// Compute the total weight with which any relay matching `usable`
1746 /// will be selected for a given `role`.
1747 ///
1748 /// Note: because this function is used to assess the total
1749 /// properties of the consensus, the `usable` predicate takes a
1750 /// [`MdRouterStatus`] rather than a [`Relay`].
1751 pub fn total_weight<P>(&self, role: WeightRole, usable: P) -> RelayWeight
1752 where
1753 P: Fn(&UncheckedRelay<'_>) -> bool,
1754 {
1755 self.all_relays()
1756 .filter_map(|unchecked| {
1757 if usable(&unchecked) {
1758 Some(RelayWeight(
1759 self.weights.weight_rs_for_role(unchecked.rs, role),
1760 ))
1761 } else {
1762 None
1763 }
1764 })
1765 .sum()
1766 }
1767
1768 /// Compute the weight with which a relay with ID `rsa_id` would be
1769 /// selected for a given `role`.
1770 ///
1771 /// Note that weight returned by this function assumes that the
1772 /// relay with that ID is actually [usable](NetDir#usable); if it isn't usable,
1773 /// then other weight-related functions will call its weight zero.
1774 pub fn weight_by_rsa_id(&self, rsa_id: &RsaIdentity, role: WeightRole) -> Option<RelayWeight> {
1775 self.by_rsa_id_unchecked(rsa_id)
1776 .map(|unchecked| RelayWeight(self.weights.weight_rs_for_role(unchecked.rs, role)))
1777 }
1778
1779 /// Return all relays in this NetDir known to be in the same family as
1780 /// `relay`.
1781 ///
1782 /// This list of members will **not** necessarily include `relay` itself.
1783 ///
1784 /// # Limitations
1785 ///
1786 /// Two relays only belong to the same family if _each_ relay
1787 /// claims to share a family with the other. But if we are
1788 /// missing a microdescriptor for one of the relays listed by this
1789 /// relay, we cannot know whether it acknowledges family
1790 /// membership with this relay or not. Therefore, this function
1791 /// can omit family members for which there is not (as yet) any
1792 /// Relay object.
1793 pub fn known_family_members<'a>(
1794 &'a self,
1795 relay: &'a Relay<'a>,
1796 ) -> impl Iterator<Item = Relay<'a>> {
1797 let relay_rsa_id = relay.rsa_id();
1798 relay.md.family().members().filter_map(move |other_rsa_id| {
1799 self.by_rsa_id(other_rsa_id)
1800 .filter(|other_relay| other_relay.md.family().contains(relay_rsa_id))
1801 })
1802 }
1803
1804 /// Return the current hidden service directory "time period".
1805 ///
1806 /// Specifically, this returns the time period that contains the beginning
1807 /// of the validity period of this `NetDir`'s consensus. That time period
1808 /// is the one we use when acting as an hidden service client.
1809 #[cfg(feature = "hs-common")]
1810 pub fn hs_time_period(&self) -> TimePeriod {
1811 self.hsdir_rings.current.time_period()
1812 }
1813
1814 /// Return the [`HsDirParams`] of all the relevant hidden service directory "time periods"
1815 ///
1816 /// This includes the current time period (as from
1817 /// [`.hs_time_period`](NetDir::hs_time_period))
1818 /// plus additional time periods that we publish descriptors for when we are
1819 /// acting as a hidden service.
1820 #[cfg(feature = "hs-service")]
1821 pub fn hs_all_time_periods(&self) -> Vec<HsDirParams> {
1822 self.hsdir_rings
1823 .iter()
1824 .map(|r| r.params().clone())
1825 .collect()
1826 }
1827
1828 /// Return the relays in this network directory that will be used as hidden service directories
1829 ///
1830 /// These are suitable to retrieve a given onion service's descriptor at a given time period.
1831 #[cfg(feature = "hs-common")]
1832 pub fn hs_dirs_download<'r, R>(
1833 &'r self,
1834 hsid: HsBlindId,
1835 period: TimePeriod,
1836 rng: &mut R,
1837 ) -> std::result::Result<Vec<Relay<'r>>, Bug>
1838 where
1839 R: rand::Rng,
1840 {
1841 // Algorithm:
1842 //
1843 // 1. Determine which HsDirRing to use, based on the time period.
1844 // 2. Find the shared random value that's associated with that HsDirRing.
1845 // 3. Choose spread = the parameter `hsdir_spread_fetch`
1846 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1847 // 5. Initialize Dirs = []
1848 // 6. for idx in 1..=n_replicas:
1849 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1850 // period).
1851 // - Find the position of H within hsdir_ring.
1852 // - Take elements from hsdir_ring starting at that position,
1853 // adding them to Dirs until we have added `spread` new elements
1854 // that were not there before.
1855 // 7. Shuffle Dirs
1856 // 8. return Dirs.
1857
1858 let spread = self.spread(HsDirOp::Download);
1859
1860 // When downloading, only look at relays on current ring.
1861 let ring = &self.hsdir_rings.current;
1862
1863 if ring.params().time_period != period {
1864 return Err(internal!(
1865 "our current ring is not associated with the requested time period!"
1866 ));
1867 }
1868
1869 let mut hs_dirs = self.select_hsdirs(hsid, ring, spread).collect_vec();
1870
1871 // When downloading, the order of the returned relays is random.
1872 hs_dirs.shuffle(rng);
1873
1874 Ok(hs_dirs)
1875 }
1876
1877 /// Return the relays in this network directory that will be used as hidden service directories
1878 ///
1879 /// Returns the relays that are suitable for storing a given onion service's descriptors at the
1880 /// given time period.
1881 #[cfg(feature = "hs-service")]
1882 pub fn hs_dirs_upload(
1883 &self,
1884 hsid: HsBlindId,
1885 period: TimePeriod,
1886 ) -> std::result::Result<impl Iterator<Item = Relay<'_>>, Bug> {
1887 // Algorithm:
1888 //
1889 // 1. Choose spread = the parameter `hsdir_spread_store`
1890 // 2. Determine which HsDirRing to use, based on the time period.
1891 // 3. Find the shared random value that's associated with that HsDirRing.
1892 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1893 // 5. Initialize Dirs = []
1894 // 6. for idx in 1..=n_replicas:
1895 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1896 // period).
1897 // - Find the position of H within hsdir_ring.
1898 // - Take elements from hsdir_ring starting at that position,
1899 // adding them to Dirs until we have added `spread` new elements
1900 // that were not there before.
1901 // 3. return Dirs.
1902 let spread = self.spread(HsDirOp::Upload);
1903
1904 // For each HsBlindId, determine which HsDirRing to use.
1905 let rings = self
1906 .hsdir_rings
1907 .iter()
1908 .filter_map(move |ring| {
1909 // Make sure the ring matches the TP of the hsid it's matched with.
1910 (ring.params().time_period == period).then_some((ring, hsid, period))
1911 })
1912 .collect::<Vec<_>>();
1913
1914 // The specified period should have an associated ring.
1915 if !rings.iter().any(|(_, _, tp)| *tp == period) {
1916 return Err(internal!(
1917 "the specified time period does not have an associated ring"
1918 ));
1919 };
1920
1921 // Now that we've matched each `hsid` with the ring associated with its TP, we can start
1922 // selecting replicas from each ring.
1923 Ok(rings.into_iter().flat_map(move |(ring, hsid, period)| {
1924 assert_eq!(period, ring.params().time_period());
1925 self.select_hsdirs(hsid, ring, spread)
1926 }))
1927 }
1928
1929 /// Return the relays in this network directory that will be used as hidden service directories
1930 ///
1931 /// Depending on `op`,
1932 /// these are suitable to either store, or retrieve, a
1933 /// given onion service's descriptor at a given time period.
1934 ///
1935 /// When `op` is `Download`, the order is random.
1936 /// When `op` is `Upload`, the order is not specified.
1937 ///
1938 /// Return an error if the time period is not one returned by
1939 /// `onion_service_time_period` or `onion_service_secondary_time_periods`.
1940 //
1941 // TODO: make HsDirOp pub(crate) once this is removed
1942 #[cfg(feature = "hs-common")]
1943 #[deprecated(note = "Use hs_dirs_upload or hs_dirs_download instead")]
1944 pub fn hs_dirs<'r, R>(&'r self, hsid: &HsBlindId, op: HsDirOp, rng: &mut R) -> Vec<Relay<'r>>
1945 where
1946 R: rand::Rng,
1947 {
1948 // Algorithm:
1949 //
1950 // 1. Determine which HsDirRing to use, based on the time period.
1951 // 2. Find the shared random value that's associated with that HsDirRing.
1952 // 3. Choose spread = the parameter `hsdir_spread_store` or
1953 // `hsdir_spread_fetch` based on `op`.
1954 // 4. Let n_replicas = the parameter `hsdir_n_replicas`.
1955 // 5. Initialize Dirs = []
1956 // 6. for idx in 1..=n_replicas:
1957 // - let H = hsdir_ring::onion_service_index(id, replica, rand,
1958 // period).
1959 // - Find the position of H within hsdir_ring.
1960 // - Take elements from hsdir_ring starting at that position,
1961 // adding them to Dirs until we have added `spread` new elements
1962 // that were not there before.
1963 // 7. return Dirs.
1964 let n_replicas = self
1965 .params
1966 .hsdir_n_replicas
1967 .get()
1968 .try_into()
1969 .expect("BoundedInt did not enforce bounds");
1970
1971 let spread = match op {
1972 HsDirOp::Download => self.params.hsdir_spread_fetch,
1973 #[cfg(feature = "hs-service")]
1974 HsDirOp::Upload => self.params.hsdir_spread_store,
1975 };
1976
1977 let spread = spread
1978 .get()
1979 .try_into()
1980 .expect("BoundedInt did not enforce bounds!");
1981
1982 // TODO: I may be wrong here but I suspect that this function may
1983 // need refactoring so that it does not look at _all_ of the HsDirRings,
1984 // but only at the ones that corresponds to time periods for which
1985 // HsBlindId is valid. Or I could be mistaken, in which case we should
1986 // have a comment to explain why I am, since the logic is subtle.
1987 // (For clients, there is only one ring.) -nickm
1988 //
1989 // (Actually, there is no need to follow through with the above TODO,
1990 // since this function is deprecated, and not used anywhere but the
1991 // tests.)
1992
1993 let mut hs_dirs = self
1994 .hsdir_rings
1995 .iter_for_op(op)
1996 .cartesian_product(1..=n_replicas) // 1-indexed !
1997 .flat_map({
1998 let mut selected_nodes = HashSet::new();
1999
2000 move |(ring, replica): (&HsDirRing, u8)| {
2001 let hsdir_idx = hsdir_ring::service_hsdir_index(hsid, replica, ring.params());
2002
2003 ring.ring_items_at(hsdir_idx, spread, |(hsdir_idx, _)| {
2004 // According to rend-spec 2.2.3:
2005 // ... If any of those
2006 // nodes have already been selected for a lower-numbered replica of the
2007 // service, any nodes already chosen are disregarded (i.e. skipped over)
2008 // when choosing a replica's hsdir_spread_store nodes.
2009 selected_nodes.insert(*hsdir_idx)
2010 })
2011 .collect::<Vec<_>>()
2012 }
2013 })
2014 .filter_map(|(_hsdir_idx, rs_idx)| {
2015 // This ought not to be None but let's not panic or bail if it is
2016 self.relay_by_rs_idx(*rs_idx)
2017 })
2018 .collect_vec();
2019
2020 match op {
2021 HsDirOp::Download => {
2022 // When `op` is `Download`, the order is random.
2023 hs_dirs.shuffle(rng);
2024 }
2025 #[cfg(feature = "hs-service")]
2026 HsDirOp::Upload => {
2027 // When `op` is `Upload`, the order is not specified.
2028 }
2029 }
2030
2031 hs_dirs
2032 }
2033}
2034
2035impl MdReceiver for NetDir {
2036 fn missing_microdescs(&self) -> Box<dyn Iterator<Item = &MdDigest> + '_> {
2037 Box::new(self.rsidx_by_missing.keys())
2038 }
2039 fn add_microdesc(&mut self, md: Microdesc) -> bool {
2040 self.add_arc_microdesc(Arc::new(md))
2041 }
2042 fn n_missing(&self) -> usize {
2043 self.rsidx_by_missing.len()
2044 }
2045}
2046
2047impl<'a> UncheckedRelay<'a> {
2048 /// Return an [`UncheckedRelayDetails`](details::UncheckedRelayDetails) for this relay.
2049 ///
2050 /// Callers should generally avoid using this information directly if they can;
2051 /// it's better to use a higher-level function that exposes semantic information
2052 /// rather than these properties.
2053 pub fn low_level_details(&self) -> details::UncheckedRelayDetails<'_> {
2054 details::UncheckedRelayDetails(self)
2055 }
2056
2057 /// Return true if this relay is valid and [usable](NetDir#usable).
2058 ///
2059 /// This function should return `true` for every Relay we expose
2060 /// to the user.
2061 pub fn is_usable(&self) -> bool {
2062 // No need to check for 'valid' or 'running': they are implicit.
2063 self.md.is_some() && self.rs.ed25519_id_is_usable()
2064 }
2065 /// If this is [usable](NetDir#usable), return a corresponding Relay object.
2066 pub fn into_relay(self) -> Option<Relay<'a>> {
2067 if self.is_usable() {
2068 Some(Relay {
2069 rs: self.rs,
2070 md: self.md?,
2071 #[cfg(feature = "geoip")]
2072 cc: self.cc,
2073 })
2074 } else {
2075 None
2076 }
2077 }
2078
2079 /// Return true if this relay is a hidden service directory
2080 ///
2081 /// Ie, if it is to be included in the hsdir ring.
2082 #[cfg(feature = "hs-common")]
2083 pub(crate) fn is_hsdir_for_ring(&self) -> bool {
2084 // TODO are there any other flags should we check?
2085 // rend-spec-v3 2.2.3 says just
2086 // "each node listed in the current consensus with the HSDir flag"
2087 // Do we need to check ed25519_id_is_usable ?
2088 // See also https://gitlab.torproject.org/tpo/core/arti/-/issues/504
2089 self.rs.is_flagged_hsdir()
2090 }
2091}
2092
2093impl<'a> Relay<'a> {
2094 /// Return a [`RelayDetails`](details::RelayDetails) for this relay.
2095 ///
2096 /// Callers should generally avoid using this information directly if they can;
2097 /// it's better to use a higher-level function that exposes semantic information
2098 /// rather than these properties.
2099 pub fn low_level_details(&self) -> details::RelayDetails<'_> {
2100 details::RelayDetails(self)
2101 }
2102
2103 /// Return the Ed25519 ID for this relay.
2104 pub fn id(&self) -> &Ed25519Identity {
2105 self.md.ed25519_id()
2106 }
2107 /// Return the RsaIdentity for this relay.
2108 pub fn rsa_id(&self) -> &RsaIdentity {
2109 self.rs.rsa_identity()
2110 }
2111
2112 /// Return a reference to this relay's "router status" entry in
2113 /// the consensus.
2114 ///
2115 /// The router status entry contains information about the relay
2116 /// that the authorities voted on directly. For most use cases,
2117 /// you shouldn't need them.
2118 ///
2119 /// This function is only available if the crate was built with
2120 /// its `experimental-api` feature.
2121 #[cfg(feature = "experimental-api")]
2122 pub fn rs(&self) -> &netstatus::MdRouterStatus {
2123 self.rs
2124 }
2125 /// Return a reference to this relay's "microdescriptor" entry in
2126 /// the consensus.
2127 ///
2128 /// A "microdescriptor" is a synopsis of the information about a relay,
2129 /// used to determine its capabilities and route traffic through it.
2130 /// For most use cases, you shouldn't need it.
2131 ///
2132 /// This function is only available if the crate was built with
2133 /// its `experimental-api` feature.
2134 #[cfg(feature = "experimental-api")]
2135 pub fn md(&self) -> &Microdesc {
2136 self.md
2137 }
2138}
2139
2140/// An error value returned from [`NetDir::by_ids_detailed`].
2141#[cfg(feature = "hs-common")]
2142#[derive(Clone, Debug, thiserror::Error)]
2143#[non_exhaustive]
2144pub enum RelayLookupError {
2145 /// We found a relay whose presence indicates that the provided set of
2146 /// identities is impossible to resolve.
2147 #[error("Provided set of identities is impossible according to consensus.")]
2148 Impossible,
2149}
2150
2151impl<'a> HasAddrs for Relay<'a> {
2152 fn addrs(&self) -> impl Iterator<Item = std::net::SocketAddr> {
2153 self.rs.addrs()
2154 }
2155}
2156#[cfg(feature = "geoip")]
2157impl<'a> HasCountryCode for Relay<'a> {
2158 fn country_code(&self) -> Option<CountryCode> {
2159 self.cc
2160 }
2161}
2162impl<'a> tor_linkspec::HasRelayIdsLegacy for Relay<'a> {
2163 fn ed_identity(&self) -> &Ed25519Identity {
2164 self.id()
2165 }
2166 fn rsa_identity(&self) -> &RsaIdentity {
2167 self.rsa_id()
2168 }
2169}
2170
2171impl<'a> HasRelayIds for UncheckedRelay<'a> {
2172 fn identity(&self, key_type: RelayIdType) -> Option<RelayIdRef<'_>> {
2173 match key_type {
2174 RelayIdType::Ed25519 if self.rs.ed25519_id_is_usable() => {
2175 self.md.map(|m| m.ed25519_id().into())
2176 }
2177 RelayIdType::Rsa => Some(self.rs.rsa_identity().into()),
2178 _ => None,
2179 }
2180 }
2181}
2182#[cfg(feature = "geoip")]
2183impl<'a> HasCountryCode for UncheckedRelay<'a> {
2184 fn country_code(&self) -> Option<CountryCode> {
2185 self.cc
2186 }
2187}
2188
2189impl<'a> DirectChanMethodsHelper for Relay<'a> {}
2190impl<'a> ChanTarget for Relay<'a> {}
2191
2192impl<'a> tor_linkspec::CircTarget for Relay<'a> {
2193 fn ntor_onion_key(&self) -> &ll::pk::curve25519::PublicKey {
2194 self.md.ntor_key()
2195 }
2196 fn protovers(&self) -> &tor_protover::Protocols {
2197 self.rs.protovers()
2198 }
2199}
2200
2201#[cfg(test)]
2202mod test {
2203 // @@ begin test lint list maintained by maint/add_warning @@
2204 #![allow(clippy::bool_assert_comparison)]
2205 #![allow(clippy::clone_on_copy)]
2206 #![allow(clippy::dbg_macro)]
2207 #![allow(clippy::mixed_attributes_style)]
2208 #![allow(clippy::print_stderr)]
2209 #![allow(clippy::print_stdout)]
2210 #![allow(clippy::single_char_pattern)]
2211 #![allow(clippy::unwrap_used)]
2212 #![allow(clippy::unchecked_time_subtraction)]
2213 #![allow(clippy::useless_vec)]
2214 #![allow(clippy::needless_pass_by_value)]
2215 //! <!-- @@ end test lint list maintained by maint/add_warning @@ -->
2216 #![allow(clippy::cognitive_complexity)]
2217 use super::*;
2218 use crate::testnet::*;
2219 use float_eq::assert_float_eq;
2220 use std::collections::HashSet;
2221 use std::time::Duration;
2222 use tor_basic_utils::test_rng::{self, testing_rng};
2223 use tor_linkspec::{RelayIdType, RelayIds};
2224
2225 #[cfg(feature = "hs-common")]
2226 fn dummy_hs_blind_id() -> HsBlindId {
2227 let hsid = [2, 1, 1, 1].iter().cycle().take(32).cloned().collect_vec();
2228 let hsid = Ed25519Identity::new(hsid[..].try_into().unwrap());
2229 HsBlindId::from(hsid)
2230 }
2231
2232 // Basic functionality for a partial netdir: Add microdescriptors,
2233 // then you have a netdir.
2234 #[test]
2235 fn partial_netdir() {
2236 let (consensus, microdescs) = construct_network().unwrap();
2237 let dir = PartialNetDir::new(consensus, None);
2238
2239 // Check the lifetime
2240 let lifetime = dir.lifetime();
2241 assert_eq!(
2242 lifetime
2243 .valid_until()
2244 .duration_since(lifetime.valid_after())
2245 .unwrap(),
2246 Duration::new(86400, 0)
2247 );
2248
2249 // No microdescriptors, so we don't have enough paths, and can't
2250 // advance.
2251 assert!(!dir.have_enough_paths());
2252 let mut dir = match dir.unwrap_if_sufficient() {
2253 Ok(_) => panic!(),
2254 Err(d) => d,
2255 };
2256
2257 let missing: HashSet<_> = dir.missing_microdescs().collect();
2258 assert_eq!(missing.len(), 40);
2259 assert_eq!(missing.len(), dir.netdir.c_relays().len());
2260 for md in µdescs {
2261 assert!(missing.contains(md.digest()));
2262 }
2263
2264 // Now add all the mds and try again.
2265 for md in microdescs {
2266 let wanted = dir.add_microdesc(md);
2267 assert!(wanted);
2268 }
2269
2270 let missing: HashSet<_> = dir.missing_microdescs().collect();
2271 assert!(missing.is_empty());
2272 assert!(dir.have_enough_paths());
2273 let _complete = match dir.unwrap_if_sufficient() {
2274 Ok(d) => d,
2275 Err(_) => panic!(),
2276 };
2277 }
2278
2279 #[test]
2280 fn override_params() {
2281 let (consensus, _microdescs) = construct_network().unwrap();
2282 let override_p = "bwweightscale=2 doesnotexist=77 circwindow=500"
2283 .parse()
2284 .unwrap();
2285 let dir = PartialNetDir::new(consensus.clone(), Some(&override_p));
2286 let params = &dir.netdir.params;
2287 assert_eq!(params.bw_weight_scale.get(), 2);
2288 assert_eq!(params.circuit_window.get(), 500_i32);
2289
2290 // try again without the override.
2291 let dir = PartialNetDir::new(consensus, None);
2292 let params = &dir.netdir.params;
2293 assert_eq!(params.bw_weight_scale.get(), 1_i32);
2294 assert_eq!(params.circuit_window.get(), 1000_i32);
2295 }
2296
2297 #[test]
2298 fn fill_from_previous() {
2299 let (consensus, microdescs) = construct_network().unwrap();
2300
2301 let mut dir = PartialNetDir::new(consensus.clone(), None);
2302 for md in microdescs.iter().skip(2) {
2303 let wanted = dir.add_microdesc(md.clone());
2304 assert!(wanted);
2305 }
2306 let dir1 = dir.unwrap_if_sufficient().unwrap();
2307 assert_eq!(dir1.missing_microdescs().count(), 2);
2308
2309 let mut dir = PartialNetDir::new(consensus, None);
2310 assert_eq!(dir.missing_microdescs().count(), 40);
2311 dir.fill_from_previous_netdir(Arc::new(dir1));
2312 assert_eq!(dir.missing_microdescs().count(), 2);
2313 }
2314
2315 #[test]
2316 fn path_count() {
2317 let low_threshold = "min_paths_for_circs_pct=64".parse().unwrap();
2318 let high_threshold = "min_paths_for_circs_pct=65".parse().unwrap();
2319
2320 let (consensus, microdescs) = construct_network().unwrap();
2321
2322 let mut dir = PartialNetDir::new(consensus.clone(), Some(&low_threshold));
2323 for (pos, md) in microdescs.iter().enumerate() {
2324 if pos % 7 == 2 {
2325 continue; // skip a few relays.
2326 }
2327 dir.add_microdesc(md.clone());
2328 }
2329 let dir = dir.unwrap_if_sufficient().unwrap();
2330
2331 // We have 40 relays that we know about from the consensus.
2332 assert_eq!(dir.all_relays().count(), 40);
2333
2334 // But only 34 are usable.
2335 assert_eq!(dir.relays().count(), 34);
2336
2337 // For guards: mds 20..=39 correspond to Guard relays.
2338 // Their bandwidth is 2*(1000+2000+...10000) = 110_000.
2339 // We skipped 23, 30, and 37. They have bandwidth
2340 // 4000 + 1000 + 8000 = 13_000. So our fractional bandwidth
2341 // should be (110-13)/110.
2342 let f = dir.frac_for_role(WeightRole::Guard, |u| u.rs.is_flagged_guard());
2343 assert!(((97.0 / 110.0) - f).abs() < 0.000001);
2344
2345 // For exits: mds 10..=19 and 30..=39 correspond to Exit relays.
2346 // We skipped 16, 30, and 37. Per above our fractional bandwidth is
2347 // (110-16)/110.
2348 let f = dir.frac_for_role(WeightRole::Exit, |u| u.rs.is_flagged_exit());
2349 assert!(((94.0 / 110.0) - f).abs() < 0.000001);
2350
2351 // For middles: all relays are middles. We skipped 2, 9, 16,
2352 // 23, 30, and 37. Per above our fractional bandwidth is
2353 // (220-33)/220
2354 let f = dir.frac_for_role(WeightRole::Middle, |_| true);
2355 assert!(((187.0 / 220.0) - f).abs() < 0.000001);
2356
2357 // Multiplying those together, we get the fraction of paths we can
2358 // build at ~0.64052066, which is above the threshold we set above for
2359 // MinPathsForCircsPct.
2360 let f = dir.frac_usable_paths();
2361 assert!((f - 0.64052066).abs() < 0.000001);
2362
2363 // But if we try again with a slightly higher threshold...
2364 let mut dir = PartialNetDir::new(consensus, Some(&high_threshold));
2365 for (pos, md) in microdescs.into_iter().enumerate() {
2366 if pos % 7 == 2 {
2367 continue; // skip a few relays.
2368 }
2369 dir.add_microdesc(md);
2370 }
2371 assert!(dir.unwrap_if_sufficient().is_err());
2372 }
2373
2374 /// Return a 3-tuple for use by `test_pick_*()` of an Rng, a number of
2375 /// iterations, and a tolerance.
2376 ///
2377 /// If the Rng is deterministic (the default), we can use a faster setup,
2378 /// with a higher tolerance and fewer iterations. But if you've explicitly
2379 /// opted into randomization (or are replaying a seed from an earlier
2380 /// randomized test), we give you more iterations and a tighter tolerance.
2381 fn testing_rng_with_tolerances() -> (impl rand::Rng, usize, f64) {
2382 // Use a deterministic RNG if none is specified, since this is slow otherwise.
2383 let config = test_rng::Config::from_env().unwrap_or(test_rng::Config::Deterministic);
2384 let (iters, tolerance) = match config {
2385 test_rng::Config::Deterministic => (5000, 0.02),
2386 _ => (50000, 0.01),
2387 };
2388 (config.into_rng(), iters, tolerance)
2389 }
2390
2391 #[test]
2392 fn test_pick() {
2393 let (consensus, microdescs) = construct_network().unwrap();
2394 let mut dir = PartialNetDir::new(consensus, None);
2395 for md in microdescs.into_iter() {
2396 let wanted = dir.add_microdesc(md.clone());
2397 assert!(wanted);
2398 }
2399 let dir = dir.unwrap_if_sufficient().unwrap();
2400
2401 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2402
2403 let mut picked = [0_isize; 40];
2404 for _ in 0..total {
2405 let r = dir.pick_relay(&mut rng, WeightRole::Middle, |r| {
2406 r.low_level_details().supports_exit_port_ipv4(80)
2407 });
2408 let r = r.unwrap();
2409 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2410 picked[id_byte as usize] += 1;
2411 }
2412 // non-exits should never get picked.
2413 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2414 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2415
2416 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2417
2418 // We didn't we any non-default weights, so the other relays get
2419 // weighted proportional to their bandwidth.
2420 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2421 assert_float_eq!(picked_f[38], (9.0 / 110.0), abs <= tolerance);
2422 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2423 }
2424
2425 #[test]
2426 fn test_pick_multiple() {
2427 // This is mostly a copy of test_pick, except that it uses
2428 // pick_n_relays to pick several relays at once.
2429
2430 let dir = construct_netdir().unwrap_if_sufficient().unwrap();
2431
2432 let (mut rng, total, tolerance) = testing_rng_with_tolerances();
2433
2434 let mut picked = [0_isize; 40];
2435 for _ in 0..total / 4 {
2436 let relays = dir.pick_n_relays(&mut rng, 4, WeightRole::Middle, |r| {
2437 r.low_level_details().supports_exit_port_ipv4(80)
2438 });
2439 assert_eq!(relays.len(), 4);
2440 for r in relays {
2441 let id_byte = r.identity(RelayIdType::Rsa).unwrap().as_bytes()[0];
2442 picked[id_byte as usize] += 1;
2443 }
2444 }
2445 // non-exits should never get picked.
2446 picked[0..10].iter().for_each(|x| assert_eq!(*x, 0));
2447 picked[20..30].iter().for_each(|x| assert_eq!(*x, 0));
2448
2449 let picked_f: Vec<_> = picked.iter().map(|x| *x as f64 / total as f64).collect();
2450
2451 // We didn't we any non-default weights, so the other relays get
2452 // weighted proportional to their bandwidth.
2453 assert_float_eq!(picked_f[19], (10.0 / 110.0), abs <= tolerance);
2454 assert_float_eq!(picked_f[36], (7.0 / 110.0), abs <= tolerance);
2455 assert_float_eq!(picked_f[39], (10.0 / 110.0), abs <= tolerance);
2456 }
2457
2458 #[test]
2459 fn subnets() {
2460 let cfg = SubnetConfig::default();
2461
2462 fn same_net(cfg: &SubnetConfig, a: &str, b: &str) -> bool {
2463 cfg.addrs_in_same_subnet(&a.parse().unwrap(), &b.parse().unwrap())
2464 }
2465
2466 assert!(same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2467 assert!(!same_net(&cfg, "127.15.3.3", "127.16.9.9"));
2468
2469 assert!(!same_net(&cfg, "127.15.3.3", "127::"));
2470
2471 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2472 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:fffe:91:34::"));
2473
2474 let cfg = SubnetConfig {
2475 subnets_family_v4: 32,
2476 subnets_family_v6: 128,
2477 };
2478 assert!(!same_net(&cfg, "127.15.3.3", "127.15.9.9"));
2479 assert!(!same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:91:34::"));
2480
2481 assert!(same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2482 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.2"));
2483 assert!(same_net(&cfg, "ffff:ffff:90:33::", "ffff:ffff:90:33::"));
2484
2485 let cfg = SubnetConfig {
2486 subnets_family_v4: 33,
2487 subnets_family_v6: 129,
2488 };
2489 assert!(!same_net(&cfg, "127.0.0.1", "127.0.0.1"));
2490 assert!(!same_net(&cfg, "::", "::"));
2491 }
2492
2493 #[test]
2494 fn subnet_union() {
2495 let cfg1 = SubnetConfig {
2496 subnets_family_v4: 16,
2497 subnets_family_v6: 64,
2498 };
2499 let cfg2 = SubnetConfig {
2500 subnets_family_v4: 24,
2501 subnets_family_v6: 32,
2502 };
2503 let a1 = "1.2.3.4".parse().unwrap();
2504 let a2 = "1.2.10.10".parse().unwrap();
2505
2506 let a3 = "ffff:ffff::7".parse().unwrap();
2507 let a4 = "ffff:ffff:1234::8".parse().unwrap();
2508
2509 assert_eq!(cfg1.addrs_in_same_subnet(&a1, &a2), true);
2510 assert_eq!(cfg2.addrs_in_same_subnet(&a1, &a2), false);
2511
2512 assert_eq!(cfg1.addrs_in_same_subnet(&a3, &a4), false);
2513 assert_eq!(cfg2.addrs_in_same_subnet(&a3, &a4), true);
2514
2515 let cfg_u = cfg1.union(&cfg2);
2516 assert_eq!(
2517 cfg_u,
2518 SubnetConfig {
2519 subnets_family_v4: 16,
2520 subnets_family_v6: 32,
2521 }
2522 );
2523 assert_eq!(cfg_u.addrs_in_same_subnet(&a1, &a2), true);
2524 assert_eq!(cfg_u.addrs_in_same_subnet(&a3, &a4), true);
2525
2526 assert_eq!(cfg1.union(&cfg1), cfg1);
2527
2528 assert_eq!(cfg1.union(&SubnetConfig::no_addresses_match()), cfg1);
2529 }
2530
2531 #[test]
2532 fn relay_funcs() {
2533 let (consensus, microdescs) = construct_custom_network(
2534 |pos, nb, _| {
2535 if pos == 15 {
2536 nb.rs.add_or_port("[f0f0::30]:9001".parse().unwrap());
2537 } else if pos == 20 {
2538 nb.rs.add_or_port("[f0f0::3131]:9001".parse().unwrap());
2539 }
2540 },
2541 None,
2542 )
2543 .unwrap();
2544 let subnet_config = SubnetConfig::default();
2545 let all_family_info = FamilyRules::all_family_info();
2546 let mut dir = PartialNetDir::new(consensus, None);
2547 for md in microdescs.into_iter() {
2548 let wanted = dir.add_microdesc(md.clone());
2549 assert!(wanted);
2550 }
2551 let dir = dir.unwrap_if_sufficient().unwrap();
2552
2553 // Pick out a few relays by ID.
2554 let k0 = Ed25519Identity::from([0; 32]);
2555 let k1 = Ed25519Identity::from([1; 32]);
2556 let k2 = Ed25519Identity::from([2; 32]);
2557 let k3 = Ed25519Identity::from([3; 32]);
2558 let k10 = Ed25519Identity::from([10; 32]);
2559 let k15 = Ed25519Identity::from([15; 32]);
2560 let k20 = Ed25519Identity::from([20; 32]);
2561
2562 let r0 = dir.by_id(&k0).unwrap();
2563 let r1 = dir.by_id(&k1).unwrap();
2564 let r2 = dir.by_id(&k2).unwrap();
2565 let r3 = dir.by_id(&k3).unwrap();
2566 let r10 = dir.by_id(&k10).unwrap();
2567 let r15 = dir.by_id(&k15).unwrap();
2568 let r20 = dir.by_id(&k20).unwrap();
2569
2570 assert_eq!(r0.id(), &[0; 32].into());
2571 assert_eq!(r0.rsa_id(), &[0; 20].into());
2572 assert_eq!(r1.id(), &[1; 32].into());
2573 assert_eq!(r1.rsa_id(), &[1; 20].into());
2574
2575 assert!(r0.same_relay_ids(&r0));
2576 assert!(r1.same_relay_ids(&r1));
2577 assert!(!r1.same_relay_ids(&r0));
2578
2579 assert!(r0.low_level_details().is_dir_cache());
2580 assert!(!r1.low_level_details().is_dir_cache());
2581 assert!(r2.low_level_details().is_dir_cache());
2582 assert!(!r3.low_level_details().is_dir_cache());
2583
2584 assert!(!r0.low_level_details().supports_exit_port_ipv4(80));
2585 assert!(!r1.low_level_details().supports_exit_port_ipv4(80));
2586 assert!(!r2.low_level_details().supports_exit_port_ipv4(80));
2587 assert!(!r3.low_level_details().supports_exit_port_ipv4(80));
2588
2589 assert!(!r0.low_level_details().policies_allow_some_port());
2590 assert!(!r1.low_level_details().policies_allow_some_port());
2591 assert!(!r2.low_level_details().policies_allow_some_port());
2592 assert!(!r3.low_level_details().policies_allow_some_port());
2593 assert!(r10.low_level_details().policies_allow_some_port());
2594
2595 assert!(r0.low_level_details().in_same_family(&r0, all_family_info));
2596 assert!(r0.low_level_details().in_same_family(&r1, all_family_info));
2597 assert!(r1.low_level_details().in_same_family(&r0, all_family_info));
2598 assert!(r1.low_level_details().in_same_family(&r1, all_family_info));
2599 assert!(!r0.low_level_details().in_same_family(&r2, all_family_info));
2600 assert!(!r2.low_level_details().in_same_family(&r0, all_family_info));
2601 assert!(r2.low_level_details().in_same_family(&r2, all_family_info));
2602 assert!(r2.low_level_details().in_same_family(&r3, all_family_info));
2603
2604 assert!(r0.low_level_details().in_same_subnet(&r10, &subnet_config));
2605 assert!(r10.low_level_details().in_same_subnet(&r10, &subnet_config));
2606 assert!(r0.low_level_details().in_same_subnet(&r0, &subnet_config));
2607 assert!(r1.low_level_details().in_same_subnet(&r1, &subnet_config));
2608 assert!(!r1.low_level_details().in_same_subnet(&r2, &subnet_config));
2609 assert!(!r2.low_level_details().in_same_subnet(&r3, &subnet_config));
2610
2611 // Make sure IPv6 families work.
2612 let subnet_config = SubnetConfig {
2613 subnets_family_v4: 128,
2614 subnets_family_v6: 96,
2615 };
2616 assert!(r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2617 assert!(!r15.low_level_details().in_same_subnet(&r1, &subnet_config));
2618
2619 // Make sure that subnet configs can be disabled.
2620 let subnet_config = SubnetConfig {
2621 subnets_family_v4: 255,
2622 subnets_family_v6: 255,
2623 };
2624 assert!(!r15.low_level_details().in_same_subnet(&r20, &subnet_config));
2625 }
2626
2627 #[test]
2628 fn test_badexit() {
2629 // make a netdir where relays 10-19 are badexit, and everybody
2630 // exits to 443 on IPv6.
2631 use tor_netdoc::types::relay_flags::RelayFlag;
2632 let netdir = construct_custom_netdir(|pos, nb, _| {
2633 if (10..20).contains(&pos) {
2634 nb.rs.add_flags(RelayFlag::BadExit);
2635 }
2636 nb.md.parse_ipv6_policy("accept 443").unwrap();
2637 })
2638 .unwrap()
2639 .unwrap_if_sufficient()
2640 .unwrap();
2641
2642 let e12 = netdir.by_id(&Ed25519Identity::from([12; 32])).unwrap();
2643 let e32 = netdir.by_id(&Ed25519Identity::from([32; 32])).unwrap();
2644
2645 assert!(!e12.low_level_details().supports_exit_port_ipv4(80));
2646 assert!(e32.low_level_details().supports_exit_port_ipv4(80));
2647
2648 assert!(!e12.low_level_details().supports_exit_port_ipv6(443));
2649 assert!(e32.low_level_details().supports_exit_port_ipv6(443));
2650 assert!(!e32.low_level_details().supports_exit_port_ipv6(555));
2651
2652 assert!(!e12.low_level_details().policies_allow_some_port());
2653 assert!(e32.low_level_details().policies_allow_some_port());
2654
2655 assert!(!e12.low_level_details().ipv4_policy().allows_some_port());
2656 assert!(!e12.low_level_details().ipv6_policy().allows_some_port());
2657 assert!(e32.low_level_details().ipv4_policy().allows_some_port());
2658 assert!(e32.low_level_details().ipv6_policy().allows_some_port());
2659
2660 assert!(
2661 e12.low_level_details()
2662 .ipv4_declared_policy()
2663 .allows_some_port()
2664 );
2665 assert!(
2666 e12.low_level_details()
2667 .ipv6_declared_policy()
2668 .allows_some_port()
2669 );
2670 }
2671
2672 #[cfg(feature = "experimental-api")]
2673 #[test]
2674 fn test_accessors() {
2675 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2676
2677 let r4 = netdir.by_id(&Ed25519Identity::from([4; 32])).unwrap();
2678 let r16 = netdir.by_id(&Ed25519Identity::from([16; 32])).unwrap();
2679
2680 assert!(!r4.md().ipv4_policy().allows_some_port());
2681 assert!(r16.md().ipv4_policy().allows_some_port());
2682
2683 assert!(!r4.rs().is_flagged_exit());
2684 assert!(r16.rs().is_flagged_exit());
2685 }
2686
2687 #[test]
2688 fn test_by_id() {
2689 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2690 let netdir = construct_custom_netdir(|pos, nb, _| {
2691 nb.omit_md = pos == 13;
2692 })
2693 .unwrap();
2694
2695 let netdir = netdir.unwrap_if_sufficient().unwrap();
2696
2697 let r = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2698 assert_eq!(r.id().as_bytes(), &[0; 32]);
2699
2700 assert!(netdir.by_id(&Ed25519Identity::from([13; 32])).is_none());
2701
2702 let r = netdir.by_rsa_id(&[12; 20].into()).unwrap();
2703 assert_eq!(r.rsa_id().as_bytes(), &[12; 20]);
2704 assert!(netdir.rsa_id_is_listed(&[12; 20].into()));
2705
2706 assert!(netdir.by_rsa_id(&[13; 20].into()).is_none());
2707
2708 assert!(netdir.by_rsa_id_unchecked(&[99; 20].into()).is_none());
2709 assert!(!netdir.rsa_id_is_listed(&[99; 20].into()));
2710
2711 let r = netdir.by_rsa_id_unchecked(&[13; 20].into()).unwrap();
2712 assert_eq!(r.rs.rsa_identity().as_bytes(), &[13; 20]);
2713 assert!(netdir.rsa_id_is_listed(&[13; 20].into()));
2714
2715 let pair_13_13 = RelayIds::builder()
2716 .ed_identity([13; 32].into())
2717 .rsa_identity([13; 20].into())
2718 .build()
2719 .unwrap();
2720 let pair_14_14 = RelayIds::builder()
2721 .ed_identity([14; 32].into())
2722 .rsa_identity([14; 20].into())
2723 .build()
2724 .unwrap();
2725 let pair_14_99 = RelayIds::builder()
2726 .ed_identity([14; 32].into())
2727 .rsa_identity([99; 20].into())
2728 .build()
2729 .unwrap();
2730
2731 let r = netdir.by_ids(&pair_13_13);
2732 assert!(r.is_none());
2733 let r = netdir.by_ids(&pair_14_14).unwrap();
2734 assert_eq!(r.identity(RelayIdType::Rsa).unwrap().as_bytes(), &[14; 20]);
2735 assert_eq!(
2736 r.identity(RelayIdType::Ed25519).unwrap().as_bytes(),
2737 &[14; 32]
2738 );
2739 let r = netdir.by_ids(&pair_14_99);
2740 assert!(r.is_none());
2741
2742 assert_eq!(
2743 netdir.id_pair_listed(&[13; 32].into(), &[13; 20].into()),
2744 None
2745 );
2746 assert_eq!(
2747 netdir.id_pair_listed(&[15; 32].into(), &[15; 20].into()),
2748 Some(true)
2749 );
2750 assert_eq!(
2751 netdir.id_pair_listed(&[15; 32].into(), &[99; 20].into()),
2752 Some(false)
2753 );
2754 }
2755
2756 #[test]
2757 #[cfg(feature = "hs-common")]
2758 fn test_by_ids_detailed() {
2759 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2760 let netdir = construct_custom_netdir(|pos, nb, _| {
2761 nb.omit_md = pos == 13;
2762 })
2763 .unwrap();
2764
2765 let netdir = netdir.unwrap_if_sufficient().unwrap();
2766
2767 let id13_13 = RelayIds::builder()
2768 .ed_identity([13; 32].into())
2769 .rsa_identity([13; 20].into())
2770 .build()
2771 .unwrap();
2772 let id15_15 = RelayIds::builder()
2773 .ed_identity([15; 32].into())
2774 .rsa_identity([15; 20].into())
2775 .build()
2776 .unwrap();
2777 let id15_99 = RelayIds::builder()
2778 .ed_identity([15; 32].into())
2779 .rsa_identity([99; 20].into())
2780 .build()
2781 .unwrap();
2782 let id99_15 = RelayIds::builder()
2783 .ed_identity([99; 32].into())
2784 .rsa_identity([15; 20].into())
2785 .build()
2786 .unwrap();
2787 let id99_99 = RelayIds::builder()
2788 .ed_identity([99; 32].into())
2789 .rsa_identity([99; 20].into())
2790 .build()
2791 .unwrap();
2792 let id15_xx = RelayIds::builder()
2793 .ed_identity([15; 32].into())
2794 .build()
2795 .unwrap();
2796 let idxx_15 = RelayIds::builder()
2797 .rsa_identity([15; 20].into())
2798 .build()
2799 .unwrap();
2800
2801 assert!(matches!(netdir.by_ids_detailed(&id13_13), Ok(None)));
2802 assert!(matches!(netdir.by_ids_detailed(&id15_15), Ok(Some(_))));
2803 assert!(matches!(
2804 netdir.by_ids_detailed(&id15_99),
2805 Err(RelayLookupError::Impossible)
2806 ));
2807 assert!(matches!(
2808 netdir.by_ids_detailed(&id99_15),
2809 Err(RelayLookupError::Impossible)
2810 ));
2811 assert!(matches!(netdir.by_ids_detailed(&id99_99), Ok(None)));
2812 assert!(matches!(netdir.by_ids_detailed(&id15_xx), Ok(Some(_))));
2813 assert!(matches!(netdir.by_ids_detailed(&idxx_15), Ok(Some(_))));
2814 }
2815
2816 #[test]
2817 fn weight_type() {
2818 let r0 = RelayWeight(0);
2819 let r100 = RelayWeight(100);
2820 let r200 = RelayWeight(200);
2821 let r300 = RelayWeight(300);
2822 assert_eq!(r100 + r200, r300);
2823 assert_eq!(r100.checked_div(r200), Some(0.5));
2824 assert!(r100.checked_div(r0).is_none());
2825 assert_eq!(r200.ratio(0.5), Some(r100));
2826 assert!(r200.ratio(-1.0).is_none());
2827 }
2828
2829 #[test]
2830 fn weight_accessors() {
2831 // Make a netdir that omits the microdescriptor for 0xDDDDDD...
2832 let netdir = construct_netdir().unwrap_if_sufficient().unwrap();
2833
2834 let g_total = netdir.total_weight(WeightRole::Guard, |r| r.rs.is_flagged_guard());
2835 // This is just the total guard weight, since all our Wxy = 1.
2836 assert_eq!(g_total, RelayWeight(110_000));
2837
2838 let g_total = netdir.total_weight(WeightRole::Guard, |_| false);
2839 assert_eq!(g_total, RelayWeight(0));
2840
2841 let relay = netdir.by_id(&Ed25519Identity::from([35; 32])).unwrap();
2842 assert!(relay.rs.is_flagged_guard());
2843 let w = netdir.relay_weight(&relay, WeightRole::Guard);
2844 assert_eq!(w, RelayWeight(6_000));
2845
2846 let w = netdir
2847 .weight_by_rsa_id(&[33; 20].into(), WeightRole::Guard)
2848 .unwrap();
2849 assert_eq!(w, RelayWeight(4_000));
2850
2851 assert!(
2852 netdir
2853 .weight_by_rsa_id(&[99; 20].into(), WeightRole::Guard)
2854 .is_none()
2855 );
2856 }
2857
2858 #[test]
2859 fn family_list() {
2860 let netdir = construct_custom_netdir(|pos, n, _| {
2861 if pos == 0x0a {
2862 n.md.family(
2863 "$0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B0B \
2864 $0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C0C \
2865 $0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D0D"
2866 .parse()
2867 .unwrap(),
2868 );
2869 } else if pos == 0x0c {
2870 n.md.family("$0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A0A".parse().unwrap());
2871 }
2872 })
2873 .unwrap()
2874 .unwrap_if_sufficient()
2875 .unwrap();
2876
2877 // In the testing netdir, adjacent members are in the same family by default...
2878 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2879 let family: Vec<_> = netdir.known_family_members(&r0).collect();
2880 assert_eq!(family.len(), 1);
2881 assert_eq!(family[0].id(), &Ed25519Identity::from([1; 32]));
2882
2883 // But we've made this relay claim membership with several others.
2884 let r10 = netdir.by_id(&Ed25519Identity::from([10; 32])).unwrap();
2885 let family: HashSet<_> = netdir.known_family_members(&r10).map(|r| *r.id()).collect();
2886 assert_eq!(family.len(), 2);
2887 assert!(family.contains(&Ed25519Identity::from([11; 32])));
2888 assert!(family.contains(&Ed25519Identity::from([12; 32])));
2889 // Note that 13 doesn't get put in, even though it's listed, since it doesn't claim
2890 // membership with 10.
2891 }
2892 #[test]
2893 #[cfg(feature = "geoip")]
2894 fn relay_has_country_code() {
2895 let src_v6 = r#"
2896 fe80:dead:beef::,fe80:dead:ffff::,US
2897 fe80:feed:eeee::1,fe80:feed:eeee::2,AT
2898 fe80:feed:eeee::2,fe80:feed:ffff::,DE
2899 "#;
2900 let db = GeoipDb::new_from_legacy_format("", src_v6).unwrap();
2901
2902 let netdir = construct_custom_netdir_with_geoip(
2903 |pos, n, _| {
2904 if pos == 0x01 {
2905 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2906 }
2907 if pos == 0x02 {
2908 n.rs.add_or_port("[fe80:feed:eeee::1]:42".parse().unwrap());
2909 n.rs.add_or_port("[fe80:feed:eeee::2]:42".parse().unwrap());
2910 }
2911 if pos == 0x03 {
2912 n.rs.add_or_port("[fe80:dead:beef::1]:42".parse().unwrap());
2913 n.rs.add_or_port("[fe80:dead:beef::2]:42".parse().unwrap());
2914 }
2915 },
2916 &db,
2917 )
2918 .unwrap()
2919 .unwrap_if_sufficient()
2920 .unwrap();
2921
2922 // No GeoIP data available -> None
2923 let r0 = netdir.by_id(&Ed25519Identity::from([0; 32])).unwrap();
2924 assert_eq!(r0.cc, None);
2925
2926 // Exactly one match -> Some
2927 let r1 = netdir.by_id(&Ed25519Identity::from([1; 32])).unwrap();
2928 assert_eq!(r1.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2929
2930 // Conflicting matches -> None
2931 let r2 = netdir.by_id(&Ed25519Identity::from([2; 32])).unwrap();
2932 assert_eq!(r2.cc, None);
2933
2934 // Multiple agreeing matches -> Some
2935 let r3 = netdir.by_id(&Ed25519Identity::from([3; 32])).unwrap();
2936 assert_eq!(r3.cc.as_ref().map(|x| x.as_ref()), Some("US"));
2937 }
2938
2939 #[test]
2940 #[cfg(feature = "hs-common")]
2941 #[allow(deprecated)]
2942 fn hs_dirs_selection() {
2943 use tor_basic_utils::test_rng::testing_rng;
2944
2945 const HSDIR_SPREAD_STORE: i32 = 6;
2946 const HSDIR_SPREAD_FETCH: i32 = 2;
2947 const PARAMS: [(&str, i32); 2] = [
2948 ("hsdir_spread_store", HSDIR_SPREAD_STORE),
2949 ("hsdir_spread_fetch", HSDIR_SPREAD_FETCH),
2950 ];
2951
2952 let netdir: Arc<NetDir> =
2953 crate::testnet::construct_custom_netdir_with_params(|_, _, _| {}, PARAMS, None)
2954 .unwrap()
2955 .unwrap_if_sufficient()
2956 .unwrap()
2957 .into();
2958 let hsid = dummy_hs_blind_id();
2959
2960 const OP_RELAY_COUNT: &[(HsDirOp, usize)] = &[
2961 // We can't upload to (hsdir_n_replicas * hsdir_spread_store) = 12, relays because there
2962 // are only 10 relays with the HsDir flag in the consensus.
2963 #[cfg(feature = "hs-service")]
2964 (HsDirOp::Upload, 10),
2965 (HsDirOp::Download, 4),
2966 ];
2967
2968 for (op, relay_count) in OP_RELAY_COUNT {
2969 let relays = netdir.hs_dirs(&hsid, *op, &mut testing_rng());
2970
2971 assert_eq!(relays.len(), *relay_count);
2972
2973 // There should be no duplicates (the filtering function passed to
2974 // HsDirRing::ring_items_at() ensures the relays that are already in use for
2975 // lower-numbered replicas aren't considered a second time for a higher-numbered
2976 // replica).
2977 let unique = relays
2978 .iter()
2979 .map(|relay| relay.ed_identity())
2980 .collect::<HashSet<_>>();
2981 assert_eq!(unique.len(), relays.len());
2982 }
2983
2984 // TODO: come up with a test that checks that HsDirRing::ring_items_at() skips over the
2985 // expected relays.
2986 //
2987 // For example, let's say we have the following hsdir ring:
2988 //
2989 // A - B
2990 // / \
2991 // F C
2992 // \ /
2993 // E - D
2994 //
2995 // Let's also assume that:
2996 //
2997 // * hsdir_spread_store = 3
2998 // * the ordering of the relays on the ring is [A, B, C, D, E, F]
2999 //
3000 // If we use relays [A, B, C] for replica 1, and hs_index(2) = E, then replica 2 _must_ get
3001 // relays [E, F, D]. We should have a test that checks this.
3002 }
3003
3004 #[test]
3005 fn zero_weights() {
3006 // Here we check the behavior of IndexedRandom::{choose_weighted, choose_multiple_weighted}
3007 // in the presence of items whose weight is 0.
3008 //
3009 // We think that the behavior is:
3010 // - An item with weight 0 is never returned.
3011 // - If all items have weight 0, choose_weighted returns an error.
3012 // - If all items have weight 0, choose_multiple_weighted returns an empty list.
3013 // - If we request n items from choose_multiple_weighted,
3014 // but only m<n items have nonzero weight, we return all m of those items.
3015 // - if the request for n items can't be completely satisfied with n items of weight >= 0,
3016 // we get InsufficientNonZero.
3017 let items = vec![1, 2, 3];
3018 let mut rng = testing_rng();
3019
3020 let a = items.choose_weighted(&mut rng, |_| 0);
3021 assert!(matches!(a, Err(WeightError::InsufficientNonZero)));
3022
3023 let x = items.choose_multiple_weighted(&mut rng, 2, |_| 0);
3024 let xs: Vec<_> = x.unwrap().collect();
3025 assert!(xs.is_empty());
3026
3027 let only_one = |n: &i32| if *n == 1 { 1 } else { 0 };
3028 let x = items.choose_multiple_weighted(&mut rng, 2, only_one);
3029 let xs: Vec<_> = x.unwrap().collect();
3030 assert_eq!(&xs[..], &[&1]);
3031
3032 for _ in 0..100 {
3033 let a = items.choose_weighted(&mut rng, only_one);
3034 assert_eq!(a.unwrap(), &1);
3035
3036 let x = items
3037 .choose_multiple_weighted(&mut rng, 1, only_one)
3038 .unwrap()
3039 .collect::<Vec<_>>();
3040 assert_eq!(x, vec![&1]);
3041 }
3042 }
3043
3044 #[test]
3045 fn insufficient_but_nonzero() {
3046 // Here we check IndexedRandom::choose_multiple_weighted when there no zero values,
3047 // but there are insufficient values.
3048 // (If this behavior changes, we need to change our usage.)
3049
3050 let items = vec![1, 2, 3];
3051 let mut rng = testing_rng();
3052 let mut a = items
3053 .choose_multiple_weighted(&mut rng, 10, |_| 1)
3054 .unwrap()
3055 .copied()
3056 .collect::<Vec<_>>();
3057 a.sort();
3058 assert_eq!(a, items);
3059 }
3060}