uv_resolver/resolver/
mod.rs

1//! Given a set of requirements, find a set of compatible packages.
2
3use std::borrow::Cow;
4use std::cmp::Ordering;
5use std::collections::{BTreeMap, BTreeSet, VecDeque};
6use std::fmt::{Display, Formatter, Write};
7use std::ops::Bound;
8use std::sync::Arc;
9use std::time::Instant;
10use std::{iter, slice, thread};
11
12use dashmap::DashMap;
13use either::Either;
14use futures::{FutureExt, StreamExt};
15use itertools::Itertools;
16use pubgrub::{Id, IncompId, Incompatibility, Kind, Range, Ranges, State};
17use rustc_hash::{FxHashMap, FxHashSet};
18use tokio::sync::mpsc::{self, Receiver, Sender};
19use tokio::sync::oneshot;
20use tokio_stream::wrappers::ReceiverStream;
21use tracing::{Level, debug, info, instrument, trace, warn};
22
23use uv_configuration::{Constraints, Excludes, Overrides};
24use uv_distribution::{ArchiveMetadata, DistributionDatabase};
25use uv_distribution_types::{
26    BuiltDist, CompatibleDist, DerivationChain, Dist, DistErrorKind, DistributionMetadata,
27    IncompatibleDist, IncompatibleSource, IncompatibleWheel, IndexCapabilities, IndexLocations,
28    IndexMetadata, IndexUrl, InstalledDist, Name, PythonRequirementKind, RemoteSource, Requirement,
29    ResolvedDist, ResolvedDistRef, SourceDist, VersionOrUrlRef,
30};
31use uv_git::GitResolver;
32use uv_normalize::{ExtraName, GroupName, PackageName};
33use uv_pep440::{MIN_VERSION, Version, VersionSpecifiers, release_specifiers_to_ranges};
34use uv_pep508::{
35    MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree, MarkerValueString,
36};
37use uv_platform_tags::Tags;
38use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKindRef, Conflicts, VerbatimParsedUrl};
39use uv_torch::TorchStrategy;
40use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
41use uv_warnings::warn_user_once;
42
43use crate::candidate_selector::{Candidate, CandidateDist, CandidateSelector};
44use crate::dependency_provider::UvDependencyProvider;
45use crate::error::{NoSolutionError, ResolveError};
46use crate::fork_indexes::ForkIndexes;
47use crate::fork_strategy::ForkStrategy;
48use crate::fork_urls::ForkUrls;
49use crate::manifest::Manifest;
50use crate::pins::FilePins;
51use crate::preferences::{PreferenceSource, Preferences};
52use crate::pubgrub::{
53    PubGrubDependency, PubGrubDistribution, PubGrubPackage, PubGrubPackageInner, PubGrubPriorities,
54    PubGrubPython,
55};
56use crate::python_requirement::PythonRequirement;
57use crate::resolution::ResolverOutput;
58use crate::resolution_mode::ResolutionStrategy;
59pub(crate) use crate::resolver::availability::{
60    ResolverVersion, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
61    UnavailableVersion,
62};
63use crate::resolver::batch_prefetch::BatchPrefetcher;
64pub use crate::resolver::derivation::DerivationChainBuilder;
65pub use crate::resolver::environment::ResolverEnvironment;
66use crate::resolver::environment::{
67    ForkingPossibility, fork_version_by_marker, fork_version_by_python_requirement,
68};
69pub(crate) use crate::resolver::fork_map::{ForkMap, ForkSet};
70pub use crate::resolver::index::InMemoryIndex;
71use crate::resolver::indexes::Indexes;
72pub use crate::resolver::provider::{
73    DefaultResolverProvider, MetadataResponse, PackageVersionsResult, ResolverProvider,
74    VersionsResponse, WheelMetadataResult,
75};
76pub use crate::resolver::reporter::{BuildId, Reporter};
77use crate::resolver::system::SystemDependency;
78pub(crate) use crate::resolver::urls::Urls;
79use crate::universal_marker::{ConflictMarker, UniversalMarker};
80use crate::yanks::AllowedYanks;
81use crate::{
82    DependencyMode, ExcludeNewer, Exclusions, FlatIndex, Options, ResolutionMode, VersionMap,
83    marker,
84};
85pub(crate) use provider::MetadataUnavailable;
86
87mod availability;
88mod batch_prefetch;
89mod derivation;
90mod environment;
91mod fork_map;
92mod index;
93mod indexes;
94mod provider;
95mod reporter;
96mod system;
97mod urls;
98
99/// The number of conflicts a package may accumulate before we re-prioritize and backtrack.
100const CONFLICT_THRESHOLD: usize = 5;
101
102pub struct Resolver<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
103    state: ResolverState<InstalledPackages>,
104    provider: Provider,
105}
106
107/// State that is shared between the prefetcher and the PubGrub solver during
108/// resolution, across all forks.
109struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
110    project: Option<PackageName>,
111    requirements: Vec<Requirement>,
112    constraints: Constraints,
113    overrides: Overrides,
114    excludes: Excludes,
115    preferences: Preferences,
116    git: GitResolver,
117    capabilities: IndexCapabilities,
118    locations: IndexLocations,
119    exclusions: Exclusions,
120    urls: Urls,
121    indexes: Indexes,
122    dependency_mode: DependencyMode,
123    hasher: HashStrategy,
124    env: ResolverEnvironment,
125    // The environment of the current Python interpreter.
126    current_environment: MarkerEnvironment,
127    tags: Option<Tags>,
128    python_requirement: PythonRequirement,
129    conflicts: Conflicts,
130    workspace_members: BTreeSet<PackageName>,
131    selector: CandidateSelector,
132    index: InMemoryIndex,
133    installed_packages: InstalledPackages,
134    /// Incompatibilities for packages that are entirely unavailable.
135    unavailable_packages: DashMap<PackageName, UnavailablePackage>,
136    /// Incompatibilities for packages that are unavailable at specific versions.
137    incomplete_packages: DashMap<PackageName, DashMap<Version, MetadataUnavailable>>,
138    /// The options that were used to configure this resolver.
139    options: Options,
140    /// The reporter to use for this resolver.
141    reporter: Option<Arc<dyn Reporter>>,
142}
143
144impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
145    Resolver<DefaultResolverProvider<'a, Context>, InstalledPackages>
146{
147    /// Initialize a new resolver using the default backend doing real requests.
148    ///
149    /// Reads the flat index entries.
150    ///
151    /// # Marker environment
152    ///
153    /// The marker environment is optional.
154    ///
155    /// When a marker environment is not provided, the resolver is said to be
156    /// in "universal" mode. When in universal mode, the resolution produced
157    /// may contain multiple versions of the same package. And thus, in order
158    /// to use the resulting resolution, there must be a "universal"-aware
159    /// reader of the resolution that knows to exclude distributions that can't
160    /// be used in the current environment.
161    ///
162    /// When a marker environment is provided, the resolver is in
163    /// "non-universal" mode, which corresponds to standard `pip` behavior that
164    /// works only for a specific marker environment.
165    pub fn new(
166        manifest: Manifest,
167        options: Options,
168        python_requirement: &'a PythonRequirement,
169        env: ResolverEnvironment,
170        current_environment: &MarkerEnvironment,
171        conflicts: Conflicts,
172        tags: Option<&'a Tags>,
173        flat_index: &'a FlatIndex,
174        index: &'a InMemoryIndex,
175        hasher: &'a HashStrategy,
176        build_context: &'a Context,
177        installed_packages: InstalledPackages,
178        database: DistributionDatabase<'a, Context>,
179    ) -> Result<Self, ResolveError> {
180        let provider = DefaultResolverProvider::new(
181            database,
182            flat_index,
183            tags,
184            python_requirement.target(),
185            AllowedYanks::from_manifest(&manifest, &env, options.dependency_mode),
186            hasher,
187            options.exclude_newer.clone(),
188            build_context.build_options(),
189            build_context.capabilities(),
190        );
191
192        Self::new_custom_io(
193            manifest,
194            options,
195            hasher,
196            env,
197            current_environment,
198            tags.cloned(),
199            python_requirement,
200            conflicts,
201            index,
202            build_context.git(),
203            build_context.capabilities(),
204            build_context.locations(),
205            provider,
206            installed_packages,
207        )
208    }
209}
210
211impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
212    Resolver<Provider, InstalledPackages>
213{
214    /// Initialize a new resolver using a user provided backend.
215    pub fn new_custom_io(
216        manifest: Manifest,
217        options: Options,
218        hasher: &HashStrategy,
219        env: ResolverEnvironment,
220        current_environment: &MarkerEnvironment,
221        tags: Option<Tags>,
222        python_requirement: &PythonRequirement,
223        conflicts: Conflicts,
224        index: &InMemoryIndex,
225        git: &GitResolver,
226        capabilities: &IndexCapabilities,
227        locations: &IndexLocations,
228        provider: Provider,
229        installed_packages: InstalledPackages,
230    ) -> Result<Self, ResolveError> {
231        let state = ResolverState {
232            index: index.clone(),
233            git: git.clone(),
234            capabilities: capabilities.clone(),
235            selector: CandidateSelector::for_resolution(&options, &manifest, &env),
236            dependency_mode: options.dependency_mode,
237            urls: Urls::from_manifest(&manifest, &env, git, options.dependency_mode),
238            indexes: Indexes::from_manifest(&manifest, &env, options.dependency_mode),
239            project: manifest.project,
240            workspace_members: manifest.workspace_members,
241            requirements: manifest.requirements,
242            constraints: manifest.constraints,
243            overrides: manifest.overrides,
244            excludes: manifest.excludes,
245            preferences: manifest.preferences,
246            exclusions: manifest.exclusions,
247            hasher: hasher.clone(),
248            locations: locations.clone(),
249            env,
250            current_environment: current_environment.clone(),
251            tags,
252            python_requirement: python_requirement.clone(),
253            conflicts,
254            installed_packages,
255            unavailable_packages: DashMap::default(),
256            incomplete_packages: DashMap::default(),
257            options,
258            reporter: None,
259        };
260        Ok(Self { state, provider })
261    }
262
263    /// Set the [`Reporter`] to use for this installer.
264    #[must_use]
265    pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
266        Self {
267            state: ResolverState {
268                reporter: Some(reporter.clone()),
269                ..self.state
270            },
271            provider: self
272                .provider
273                .with_reporter(reporter.into_distribution_reporter()),
274        }
275    }
276
277    /// Resolve a set of requirements into a set of pinned versions.
278    pub async fn resolve(self) -> Result<ResolverOutput, ResolveError> {
279        let state = Arc::new(self.state);
280        let provider = Arc::new(self.provider);
281
282        // A channel to fetch package metadata (e.g., given `flask`, fetch all versions) and version
283        // metadata (e.g., given `flask==1.0.0`, fetch the metadata for that version).
284        // Channel size is set large to accommodate batch prefetching.
285        let (request_sink, request_stream) = mpsc::channel(300);
286
287        // Run the fetcher.
288        let requests_fut = state.clone().fetch(provider.clone(), request_stream).fuse();
289
290        // Spawn the PubGrub solver on a dedicated thread.
291        let solver = state.clone();
292        let (tx, rx) = oneshot::channel();
293        thread::Builder::new()
294            .name("uv-resolver".into())
295            .spawn(move || {
296                let result = solver.solve(&request_sink);
297
298                // This may fail if the main thread returned early due to an error.
299                let _ = tx.send(result);
300            })
301            .unwrap();
302
303        let resolve_fut = async move { rx.await.map_err(|_| ResolveError::ChannelClosed) };
304
305        // Wait for both to complete.
306        let ((), resolution) = tokio::try_join!(requests_fut, resolve_fut)?;
307
308        state.on_complete();
309        resolution
310    }
311}
312
313impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackages> {
314    #[instrument(skip_all)]
315    fn solve(
316        self: Arc<Self>,
317        request_sink: &Sender<Request>,
318    ) -> Result<ResolverOutput, ResolveError> {
319        debug!(
320            "Solving with installed Python version: {}",
321            self.python_requirement.exact()
322        );
323        debug!(
324            "Solving with target Python version: {}",
325            self.python_requirement.target()
326        );
327
328        let mut visited = FxHashSet::default();
329
330        let root = PubGrubPackage::from(PubGrubPackageInner::Root(self.project.clone()));
331        let pubgrub = State::init(root.clone(), MIN_VERSION.clone());
332        let prefetcher = BatchPrefetcher::new(
333            self.capabilities.clone(),
334            self.index.clone(),
335            request_sink.clone(),
336        );
337        let state = ForkState::new(
338            pubgrub,
339            self.env.clone(),
340            self.python_requirement.clone(),
341            prefetcher,
342        );
343        let mut preferences = self.preferences.clone();
344        let mut forked_states = self.env.initial_forked_states(state)?;
345        let mut resolutions = vec![];
346
347        'FORK: while let Some(mut state) = forked_states.pop() {
348            if let Some(split) = state.env.end_user_fork_display() {
349                let requires_python = state.python_requirement.target();
350                debug!("Solving {split} (requires-python: {requires_python:?})");
351            }
352            let start = Instant::now();
353            loop {
354                let highest_priority_pkg =
355                    if let Some(initial) = state.initial_id.take() {
356                        // If we just forked based on `requires-python`, we can skip unit
357                        // propagation, since we already propagated the package that initiated
358                        // the fork.
359                        initial
360                    } else {
361                        // Run unit propagation.
362                        let result = state.pubgrub.unit_propagation(state.next);
363                        match result {
364                            Err(err) => {
365                                // If unit propagation failed, there is no solution.
366                                return Err(self.convert_no_solution_err(
367                                    err,
368                                    state.fork_urls,
369                                    state.fork_indexes,
370                                    state.env,
371                                    self.current_environment.clone(),
372                                    Some(&self.options.exclude_newer),
373                                    &visited,
374                                ));
375                            }
376                            Ok(conflicts) => {
377                                for (affected, incompatibility) in conflicts {
378                                    // Conflict tracking: If there was a conflict, track affected and
379                                    // culprit for all root cause incompatibilities
380                                    state.record_conflict(affected, None, incompatibility);
381                                }
382                            }
383                        }
384
385                        // Pre-visit all candidate packages, to allow metadata to be fetched in parallel.
386                        if self.dependency_mode.is_transitive() {
387                            Self::pre_visit(
388                                state
389                                    .pubgrub
390                                    .partial_solution
391                                    .prioritized_packages()
392                                    .map(|(id, range)| (&state.pubgrub.package_store[id], range)),
393                                &self.urls,
394                                &self.indexes,
395                                &state.python_requirement,
396                                request_sink,
397                            )?;
398                        }
399
400                        Self::reprioritize_conflicts(&mut state);
401
402                        trace!(
403                            "Assigned packages: {}",
404                            state
405                                .pubgrub
406                                .partial_solution
407                                .extract_solution()
408                                .filter(|(p, _)| !state.pubgrub.package_store[*p].is_proxy())
409                                .map(|(p, v)| format!("{}=={}", state.pubgrub.package_store[p], v))
410                                .join(", ")
411                        );
412                        // Choose a package.
413                        // We aren't allowed to use the term intersection as it would extend the
414                        // mutable borrow of `state`.
415                        let Some((highest_priority_pkg, _)) =
416                            state.pubgrub.partial_solution.pick_highest_priority_pkg(
417                                |id, _range| state.priorities.get(&state.pubgrub.package_store[id]),
418                            )
419                        else {
420                            // All packages have been assigned, the fork has been successfully resolved
421                            if tracing::enabled!(Level::DEBUG) {
422                                state.prefetcher.log_tried_versions();
423                            }
424                            debug!(
425                                "{} resolution took {:.3}s",
426                                state.env,
427                                start.elapsed().as_secs_f32()
428                            );
429
430                            let resolution = state.into_resolution();
431
432                            // Walk over the selected versions, and mark them as preferences. We have to
433                            // add forks back as to not override the preferences from the lockfile for
434                            // the next fork
435                            //
436                            // If we're using a resolution mode that varies based on whether a dependency is
437                            // direct or transitive, skip preferences, as we risk adding a preference from
438                            // one fork (in which it's a transitive dependency) to another fork (in which
439                            // it's direct).
440                            if matches!(
441                                self.options.resolution_mode,
442                                ResolutionMode::Lowest | ResolutionMode::Highest
443                            ) {
444                                for (package, version) in &resolution.nodes {
445                                    preferences.insert(
446                                        package.name.clone(),
447                                        package.index.clone(),
448                                        resolution
449                                            .env
450                                            .try_universal_markers()
451                                            .unwrap_or(UniversalMarker::TRUE),
452                                        version.clone(),
453                                        PreferenceSource::Resolver,
454                                    );
455                                }
456                            }
457
458                            resolutions.push(resolution);
459                            continue 'FORK;
460                        };
461                        trace!(
462                            "Chose package for decision: {}. remaining choices: {}",
463                            state.pubgrub.package_store[highest_priority_pkg],
464                            state
465                                .pubgrub
466                                .partial_solution
467                                .undecided_packages()
468                                .filter(|(p, _)| !state.pubgrub.package_store[**p].is_proxy())
469                                .map(|(p, _)| state.pubgrub.package_store[*p].to_string())
470                                .join(", ")
471                        );
472
473                        highest_priority_pkg
474                    };
475
476                state.next = highest_priority_pkg;
477
478                // TODO(charlie): Remove as many usages of `next_package` as we can.
479                let next_id = state.next;
480                let next_package = &state.pubgrub.package_store[state.next];
481
482                let url = next_package
483                    .name()
484                    .and_then(|name| state.fork_urls.get(name));
485                let index = next_package
486                    .name()
487                    .and_then(|name| state.fork_indexes.get(name));
488
489                // Consider:
490                // ```toml
491                // dependencies = [
492                //   "iniconfig == 1.1.1 ; python_version < '3.12'",
493                //   "iniconfig @ https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl ; python_version >= '3.12'",
494                // ]
495                // ```
496                // In the `python_version < '3.12'` case, we haven't pre-visited `iniconfig` yet,
497                // since we weren't sure whether it might also be a URL requirement when
498                // transforming the requirements. For that case, we do another request here
499                // (idempotent due to caching).
500                self.request_package(next_package, url, index, request_sink)?;
501
502                let version = if let Some(version) = state.initial_version.take() {
503                    // If we just forked based on platform support, we can skip version selection,
504                    // since the fork operation itself already selected the appropriate version for
505                    // the platform.
506                    version
507                } else {
508                    let term_intersection = state
509                        .pubgrub
510                        .partial_solution
511                        .term_intersection_for_package(next_id)
512                        .expect("a package was chosen but we don't have a term");
513                    let decision = self.choose_version(
514                        next_package,
515                        next_id,
516                        index.map(IndexMetadata::url),
517                        term_intersection.unwrap_positive(),
518                        &mut state.pins,
519                        &preferences,
520                        &state.fork_urls,
521                        &state.env,
522                        &state.python_requirement,
523                        &state.pubgrub,
524                        &mut visited,
525                        request_sink,
526                    )?;
527
528                    // Pick the next compatible version.
529                    let Some(version) = decision else {
530                        debug!("No compatible version found for: {next_package}");
531
532                        let term_intersection = state
533                            .pubgrub
534                            .partial_solution
535                            .term_intersection_for_package(next_id)
536                            .expect("a package was chosen but we don't have a term");
537
538                        if let PubGrubPackageInner::Package { name, .. } = &**next_package {
539                            // Check if the decision was due to the package being unavailable
540                            if let Some(entry) = self.unavailable_packages.get(name) {
541                                state
542                                    .pubgrub
543                                    .add_incompatibility(Incompatibility::custom_term(
544                                        next_id,
545                                        term_intersection.clone(),
546                                        UnavailableReason::Package(entry.clone()),
547                                    ));
548                                continue;
549                            }
550                        }
551
552                        state
553                            .pubgrub
554                            .add_incompatibility(Incompatibility::no_versions(
555                                next_id,
556                                term_intersection.clone(),
557                            ));
558                        continue;
559                    };
560
561                    let version = match version {
562                        ResolverVersion::Unforked(version) => version,
563                        ResolverVersion::Forked(forks) => {
564                            forked_states.extend(self.version_forks_to_fork_states(state, forks));
565                            continue 'FORK;
566                        }
567                        ResolverVersion::Unavailable(version, reason) => {
568                            state.add_unavailable_version(version, reason);
569                            continue;
570                        }
571                    };
572
573                    // Only consider registry packages for prefetch.
574                    if url.is_none() {
575                        state.prefetcher.prefetch_batches(
576                            next_package,
577                            index,
578                            &version,
579                            term_intersection.unwrap_positive(),
580                            state
581                                .pubgrub
582                                .partial_solution
583                                .unchanging_term_for_package(next_id),
584                            &state.python_requirement,
585                            &self.selector,
586                            &state.env,
587                        )?;
588                    }
589
590                    version
591                };
592
593                state.prefetcher.version_tried(next_package, &version);
594
595                self.on_progress(next_package, &version);
596
597                if !state
598                    .added_dependencies
599                    .entry(next_id)
600                    .or_default()
601                    .insert(version.clone())
602                {
603                    // `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
604                    // terms and can add the decision directly.
605                    state
606                        .pubgrub
607                        .partial_solution
608                        .add_decision(next_id, version);
609                    continue;
610                }
611
612                // Retrieve that package dependencies.
613                let forked_deps = self.get_dependencies_forking(
614                    next_id,
615                    next_package,
616                    &version,
617                    &state.pins,
618                    &state.fork_urls,
619                    &state.env,
620                    &state.python_requirement,
621                    &state.pubgrub,
622                )?;
623
624                match forked_deps {
625                    ForkedDependencies::Unavailable(reason) => {
626                        // Then here, if we get a reason that we consider unrecoverable, we should
627                        // show the derivation chain.
628                        state
629                            .pubgrub
630                            .add_incompatibility(Incompatibility::custom_version(
631                                next_id,
632                                version.clone(),
633                                UnavailableReason::Version(reason),
634                            ));
635                    }
636                    ForkedDependencies::Unforked(dependencies) => {
637                        // Enrich the state with any URLs, etc.
638                        state
639                            .visit_package_version_dependencies(
640                                next_id,
641                                &version,
642                                &self.urls,
643                                &self.indexes,
644                                &dependencies,
645                                &self.git,
646                                &self.workspace_members,
647                                self.selector.resolution_strategy(),
648                            )
649                            .map_err(|err| {
650                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
651                            })?;
652
653                        // Emit a request to fetch the metadata for each registry package.
654                        self.visit_dependencies(&dependencies, &state, request_sink)
655                            .map_err(|err| {
656                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
657                            })?;
658
659                        // Add the dependencies to the state.
660                        state.add_package_version_dependencies(next_id, &version, dependencies);
661                    }
662                    ForkedDependencies::Forked {
663                        mut forks,
664                        diverging_packages,
665                    } => {
666                        debug!(
667                            "Pre-fork {} took {:.3}s",
668                            state.env,
669                            start.elapsed().as_secs_f32()
670                        );
671
672                        // Prioritize the forks.
673                        match (self.options.fork_strategy, self.options.resolution_mode) {
674                            (ForkStrategy::Fewest, _) | (_, ResolutionMode::Lowest) => {
675                                // Prefer solving forks with lower Python bounds, since they're more
676                                // likely to produce solutions that work for forks with higher
677                                // Python bounds (whereas the inverse is not true).
678                                forks.sort_by(|a, b| {
679                                    a.cmp_requires_python(b)
680                                        .reverse()
681                                        .then_with(|| a.cmp_upper_bounds(b))
682                                });
683                            }
684                            (ForkStrategy::RequiresPython, _) => {
685                                // Otherwise, prefer solving forks with higher Python bounds, since
686                                // we want to prioritize choosing the latest-compatible package
687                                // version for each Python version.
688                                forks.sort_by(|a, b| {
689                                    a.cmp_requires_python(b).then_with(|| a.cmp_upper_bounds(b))
690                                });
691                            }
692                        }
693
694                        for new_fork_state in self.forks_to_fork_states(
695                            state,
696                            &version,
697                            forks,
698                            request_sink,
699                            &diverging_packages,
700                        ) {
701                            forked_states.push(new_fork_state?);
702                        }
703                        continue 'FORK;
704                    }
705                }
706            }
707        }
708        if resolutions.len() > 1 {
709            info!(
710                "Solved your requirements for {} environments",
711                resolutions.len()
712            );
713        }
714        if tracing::enabled!(Level::DEBUG) {
715            for resolution in &resolutions {
716                if let Some(env) = resolution.env.end_user_fork_display() {
717                    let packages: FxHashSet<_> = resolution
718                        .nodes
719                        .keys()
720                        .map(|package| &package.name)
721                        .collect();
722                    debug!(
723                        "Distinct solution for {env} with {} package(s)",
724                        packages.len()
725                    );
726                }
727            }
728        }
729        for resolution in &resolutions {
730            Self::trace_resolution(resolution);
731        }
732        ResolverOutput::from_state(
733            &resolutions,
734            &self.requirements,
735            &self.constraints,
736            &self.overrides,
737            &self.preferences,
738            &self.index,
739            &self.git,
740            &self.python_requirement,
741            &self.conflicts,
742            self.selector.resolution_strategy(),
743            self.options.clone(),
744        )
745    }
746
747    /// Change the priority of often conflicting packages and backtrack.
748    ///
749    /// To be called after unit propagation.
750    fn reprioritize_conflicts(state: &mut ForkState) {
751        for package in state.conflict_tracker.prioritize.drain(..) {
752            let changed = state
753                .priorities
754                .mark_conflict_early(&state.pubgrub.package_store[package]);
755            if changed {
756                debug!(
757                    "Package {} has too many conflicts (affected), prioritizing",
758                    &state.pubgrub.package_store[package]
759                );
760            } else {
761                debug!(
762                    "Package {} has too many conflicts (affected), already {:?}",
763                    state.pubgrub.package_store[package],
764                    state.priorities.get(&state.pubgrub.package_store[package])
765                );
766            }
767        }
768
769        for package in state.conflict_tracker.deprioritize.drain(..) {
770            let changed = state
771                .priorities
772                .mark_conflict_late(&state.pubgrub.package_store[package]);
773            if changed {
774                debug!(
775                    "Package {} has too many conflicts (culprit), deprioritizing and backtracking",
776                    state.pubgrub.package_store[package],
777                );
778                let backtrack_level = state.pubgrub.backtrack_package(package);
779                if let Some(backtrack_level) = backtrack_level {
780                    debug!("Backtracked {backtrack_level} decisions");
781                } else {
782                    debug!(
783                        "Package {} is not decided, cannot backtrack",
784                        state.pubgrub.package_store[package]
785                    );
786                }
787            } else {
788                debug!(
789                    "Package {} has too many conflicts (culprit), already {:?}",
790                    state.pubgrub.package_store[package],
791                    state.priorities.get(&state.pubgrub.package_store[package])
792                );
793            }
794        }
795    }
796
797    /// When trace level logging is enabled, we dump the final
798    /// set of resolutions, including markers, to help with
799    /// debugging. Namely, this tells use precisely the state
800    /// emitted by the resolver before going off to construct a
801    /// resolution graph.
802    fn trace_resolution(combined: &Resolution) {
803        if !tracing::enabled!(Level::TRACE) {
804            return;
805        }
806        trace!("Resolution: {:?}", combined.env);
807        for edge in &combined.edges {
808            trace!(
809                "Resolution edge: {} -> {}",
810                edge.from
811                    .as_ref()
812                    .map(PackageName::as_str)
813                    .unwrap_or("ROOT"),
814                edge.to,
815            );
816            // The unwraps below are OK because `write`ing to
817            // a String can never fail (except for OOM).
818            let mut msg = String::new();
819            write!(msg, "{}", edge.from_version).unwrap();
820            if let Some(ref extra) = edge.from_extra {
821                write!(msg, " (extra: {extra})").unwrap();
822            }
823            if let Some(ref dev) = edge.from_group {
824                write!(msg, " (group: {dev})").unwrap();
825            }
826
827            write!(msg, " -> ").unwrap();
828
829            write!(msg, "{}", edge.to_version).unwrap();
830            if let Some(ref extra) = edge.to_extra {
831                write!(msg, " (extra: {extra})").unwrap();
832            }
833            if let Some(ref dev) = edge.to_group {
834                write!(msg, " (group: {dev})").unwrap();
835            }
836            if let Some(marker) = edge.marker.contents() {
837                write!(msg, " ; {marker}").unwrap();
838            }
839            trace!("Resolution edge:     {msg}");
840        }
841    }
842
843    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
844    fn forks_to_fork_states<'a>(
845        &'a self,
846        current_state: ForkState,
847        version: &'a Version,
848        forks: Vec<Fork>,
849        request_sink: &'a Sender<Request>,
850        diverging_packages: &'a [PackageName],
851    ) -> impl Iterator<Item = Result<ForkState, ResolveError>> + 'a {
852        debug!(
853            "Splitting resolution on {}=={} over {} into {} resolution{} with separate markers",
854            current_state.pubgrub.package_store[current_state.next],
855            version,
856            diverging_packages
857                .iter()
858                .map(ToString::to_string)
859                .join(", "),
860            forks.len(),
861            if forks.len() == 1 { "" } else { "s" }
862        );
863        assert!(forks.len() >= 2);
864        // This is a somewhat tortured technique to ensure
865        // that our resolver state is only cloned as much
866        // as it needs to be. We basically move the state
867        // into `forked_states`, and then only clone it if
868        // there is at least one more fork to visit.
869        let package = current_state.next;
870        let mut cur_state = Some(current_state);
871        let forks_len = forks.len();
872        forks
873            .into_iter()
874            .enumerate()
875            .map(move |(i, fork)| {
876                let is_last = i == forks_len - 1;
877                let forked_state = cur_state.take().unwrap();
878                if !is_last {
879                    cur_state = Some(forked_state.clone());
880                }
881
882                let env = fork.env.clone();
883                (fork, forked_state.with_env(env))
884            })
885            .map(move |(fork, mut forked_state)| {
886                // Enrich the state with any URLs, etc.
887                forked_state
888                    .visit_package_version_dependencies(
889                        package,
890                        version,
891                        &self.urls,
892                        &self.indexes,
893                        &fork.dependencies,
894                        &self.git,
895                        &self.workspace_members,
896                        self.selector.resolution_strategy(),
897                    )
898                    .map_err(|err| {
899                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
900                    })?;
901
902                // Emit a request to fetch the metadata for each registry package.
903                self.visit_dependencies(&fork.dependencies, &forked_state, request_sink)
904                    .map_err(|err| {
905                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
906                    })?;
907
908                // Add the dependencies to the state.
909                forked_state.add_package_version_dependencies(package, version, fork.dependencies);
910
911                Ok(forked_state)
912            })
913    }
914
915    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
916    #[allow(clippy::unused_self)]
917    fn version_forks_to_fork_states(
918        &self,
919        current_state: ForkState,
920        forks: Vec<VersionFork>,
921    ) -> impl Iterator<Item = ForkState> + '_ {
922        // This is a somewhat tortured technique to ensure
923        // that our resolver state is only cloned as much
924        // as it needs to be. We basically move the state
925        // into `forked_states`, and then only clone it if
926        // there is at least one more fork to visit.
927        let mut cur_state = Some(current_state);
928        let forks_len = forks.len();
929        forks.into_iter().enumerate().map(move |(i, fork)| {
930            let is_last = i == forks_len - 1;
931            let mut forked_state = cur_state.take().unwrap();
932            if !is_last {
933                cur_state = Some(forked_state.clone());
934            }
935            forked_state.initial_id = Some(fork.id);
936            forked_state.initial_version = fork.version;
937            forked_state.with_env(fork.env)
938        })
939    }
940
941    /// Visit a set of [`PubGrubDependency`] entities prior to selection.
942    fn visit_dependencies(
943        &self,
944        dependencies: &[PubGrubDependency],
945        state: &ForkState,
946        request_sink: &Sender<Request>,
947    ) -> Result<(), ResolveError> {
948        for dependency in dependencies {
949            let PubGrubDependency {
950                package,
951                version: _,
952                parent: _,
953                url: _,
954            } = dependency;
955            let url = package.name().and_then(|name| state.fork_urls.get(name));
956            let index = package.name().and_then(|name| state.fork_indexes.get(name));
957            self.visit_package(package, url, index, request_sink)?;
958        }
959        Ok(())
960    }
961
962    /// Visit a [`PubGrubPackage`] prior to selection. This should be called on a [`PubGrubPackage`]
963    /// before it is selected, to allow metadata to be fetched in parallel.
964    fn visit_package(
965        &self,
966        package: &PubGrubPackage,
967        url: Option<&VerbatimParsedUrl>,
968        index: Option<&IndexMetadata>,
969        request_sink: &Sender<Request>,
970    ) -> Result<(), ResolveError> {
971        // Ignore unresolved URL packages, i.e., packages that use a direct URL in some forks.
972        if url.is_none()
973            && package
974                .name()
975                .map(|name| self.urls.any_url(name))
976                .unwrap_or(true)
977        {
978            return Ok(());
979        }
980
981        self.request_package(package, url, index, request_sink)
982    }
983
984    fn request_package(
985        &self,
986        package: &PubGrubPackage,
987        url: Option<&VerbatimParsedUrl>,
988        index: Option<&IndexMetadata>,
989        request_sink: &Sender<Request>,
990    ) -> Result<(), ResolveError> {
991        // Only request real packages.
992        let Some(name) = package.name_no_root() else {
993            return Ok(());
994        };
995
996        if let Some(url) = url {
997            // Verify that the package is allowed under the hash-checking policy.
998            if !self.hasher.allows_url(&url.verbatim) {
999                return Err(ResolveError::UnhashedPackage(name.clone()));
1000            }
1001
1002            // Emit a request to fetch the metadata for this distribution.
1003            let dist = Dist::from_url(name.clone(), url.clone())?;
1004            if self.index.distributions().register(dist.version_id()) {
1005                request_sink.blocking_send(Request::Dist(dist))?;
1006            }
1007        } else if let Some(index) = index {
1008            // Emit a request to fetch the metadata for this package on the index.
1009            if self
1010                .index
1011                .explicit()
1012                .register((name.clone(), index.url().clone()))
1013            {
1014                request_sink.blocking_send(Request::Package(name.clone(), Some(index.clone())))?;
1015            }
1016        } else {
1017            // Emit a request to fetch the metadata for this package.
1018            if self.index.implicit().register(name.clone()) {
1019                request_sink.blocking_send(Request::Package(name.clone(), None))?;
1020            }
1021        }
1022        Ok(())
1023    }
1024
1025    /// Visit the set of [`PubGrubPackage`] candidates prior to selection. This allows us to fetch
1026    /// metadata for all packages in parallel.
1027    fn pre_visit<'data>(
1028        packages: impl Iterator<Item = (&'data PubGrubPackage, &'data Range<Version>)>,
1029        urls: &Urls,
1030        indexes: &Indexes,
1031        python_requirement: &PythonRequirement,
1032        request_sink: &Sender<Request>,
1033    ) -> Result<(), ResolveError> {
1034        // Iterate over the potential packages, and fetch file metadata for any of them. These
1035        // represent our current best guesses for the versions that we _might_ select.
1036        for (package, range) in packages {
1037            let PubGrubPackageInner::Package {
1038                name,
1039                extra: None,
1040                group: None,
1041                marker: MarkerTree::TRUE,
1042            } = &**package
1043            else {
1044                continue;
1045            };
1046            // Avoid pre-visiting packages that have any URLs in any fork. At this point we can't
1047            // tell whether they are registry distributions or which url they use.
1048            if urls.any_url(name) {
1049                continue;
1050            }
1051            // Avoid visiting packages that may use an explicit index.
1052            if indexes.contains_key(name) {
1053                continue;
1054            }
1055            request_sink.blocking_send(Request::Prefetch(
1056                name.clone(),
1057                range.clone(),
1058                python_requirement.clone(),
1059            ))?;
1060        }
1061        Ok(())
1062    }
1063
1064    /// Given a candidate package, choose the next version in range to try.
1065    ///
1066    /// Returns `None` when there are no versions in the given range, rejecting the current partial
1067    /// solution.
1068    // TODO(konsti): re-enable tracing. This trace is crucial to understanding the
1069    // tracing-durations-export diagrams, but it took ~5% resolver thread runtime for apache-airflow
1070    // when I last measured.
1071    #[cfg_attr(feature = "tracing-durations-export", instrument(skip_all, fields(%package)))]
1072    fn choose_version(
1073        &self,
1074        package: &PubGrubPackage,
1075        id: Id<PubGrubPackage>,
1076        index: Option<&IndexUrl>,
1077        range: &Range<Version>,
1078        pins: &mut FilePins,
1079        preferences: &Preferences,
1080        fork_urls: &ForkUrls,
1081        env: &ResolverEnvironment,
1082        python_requirement: &PythonRequirement,
1083        pubgrub: &State<UvDependencyProvider>,
1084        visited: &mut FxHashSet<PackageName>,
1085        request_sink: &Sender<Request>,
1086    ) -> Result<Option<ResolverVersion>, ResolveError> {
1087        match &**package {
1088            PubGrubPackageInner::Root(_) => {
1089                Ok(Some(ResolverVersion::Unforked(MIN_VERSION.clone())))
1090            }
1091
1092            PubGrubPackageInner::Python(_) => {
1093                // Dependencies on Python are only added when a package is incompatible; as such,
1094                // we don't need to do anything here.
1095                Ok(None)
1096            }
1097
1098            PubGrubPackageInner::System(_) => {
1099                // We don't care what the actual version is here, just that it's consistent across
1100                // the dependency graph.
1101                let Some(version) = range.as_singleton() else {
1102                    return Ok(None);
1103                };
1104                Ok(Some(ResolverVersion::Unforked(version.clone())))
1105            }
1106
1107            PubGrubPackageInner::Marker { name, .. }
1108            | PubGrubPackageInner::Extra { name, .. }
1109            | PubGrubPackageInner::Group { name, .. }
1110            | PubGrubPackageInner::Package { name, .. } => {
1111                if let Some(url) = package.name().and_then(|name| fork_urls.get(name)) {
1112                    self.choose_version_url(name, range, url, python_requirement)
1113                } else {
1114                    self.choose_version_registry(
1115                        package,
1116                        id,
1117                        name,
1118                        index,
1119                        range,
1120                        preferences,
1121                        env,
1122                        python_requirement,
1123                        pubgrub,
1124                        pins,
1125                        visited,
1126                        request_sink,
1127                    )
1128                }
1129            }
1130        }
1131    }
1132
1133    /// Select a version for a URL requirement. Since there is only one version per URL, we return
1134    /// that version if it is in range and `None` otherwise.
1135    fn choose_version_url(
1136        &self,
1137        name: &PackageName,
1138        range: &Range<Version>,
1139        url: &VerbatimParsedUrl,
1140        python_requirement: &PythonRequirement,
1141    ) -> Result<Option<ResolverVersion>, ResolveError> {
1142        debug!(
1143            "Searching for a compatible version of {name} @ {} ({range})",
1144            url.verbatim
1145        );
1146
1147        let dist = PubGrubDistribution::from_url(name, url);
1148        let response = self
1149            .index
1150            .distributions()
1151            .wait_blocking(&dist.version_id())
1152            .ok_or_else(|| ResolveError::UnregisteredTask(dist.version_id().to_string()))?;
1153
1154        // If we failed to fetch the metadata for a URL, we can't proceed.
1155        let metadata = match &*response {
1156            MetadataResponse::Found(archive) => &archive.metadata,
1157            MetadataResponse::Unavailable(reason) => {
1158                self.unavailable_packages
1159                    .insert(name.clone(), reason.into());
1160                return Ok(None);
1161            }
1162            // TODO(charlie): Add derivation chain for URL dependencies. In practice, this isn't
1163            // critical since we fetch URL dependencies _prior_ to invoking the resolver.
1164            MetadataResponse::Error(dist, err) => {
1165                return Err(ResolveError::Dist(
1166                    DistErrorKind::from_requested_dist(dist, &**err),
1167                    dist.clone(),
1168                    DerivationChain::default(),
1169                    err.clone(),
1170                ));
1171            }
1172        };
1173
1174        let version = &metadata.version;
1175
1176        // The version is incompatible with the requirement.
1177        if !range.contains(version) {
1178            return Ok(None);
1179        }
1180
1181        // The version is incompatible due to its Python requirement.
1182        if let Some(requires_python) = metadata.requires_python.as_ref() {
1183            if !python_requirement
1184                .installed()
1185                .is_contained_by(requires_python)
1186            {
1187                return Ok(Some(ResolverVersion::Unavailable(
1188                    version.clone(),
1189                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1190                        IncompatibleSource::RequiresPython(
1191                            requires_python.clone(),
1192                            PythonRequirementKind::Installed,
1193                        ),
1194                    )),
1195                )));
1196            }
1197            if !python_requirement.target().is_contained_by(requires_python) {
1198                return Ok(Some(ResolverVersion::Unavailable(
1199                    version.clone(),
1200                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1201                        IncompatibleSource::RequiresPython(
1202                            requires_python.clone(),
1203                            PythonRequirementKind::Target,
1204                        ),
1205                    )),
1206                )));
1207            }
1208        }
1209
1210        Ok(Some(ResolverVersion::Unforked(version.clone())))
1211    }
1212
1213    /// Given a candidate registry requirement, choose the next version in range to try, or `None`
1214    /// if there is no version in this range.
1215    fn choose_version_registry(
1216        &self,
1217        package: &PubGrubPackage,
1218        id: Id<PubGrubPackage>,
1219        name: &PackageName,
1220        index: Option<&IndexUrl>,
1221        range: &Range<Version>,
1222        preferences: &Preferences,
1223        env: &ResolverEnvironment,
1224        python_requirement: &PythonRequirement,
1225        pubgrub: &State<UvDependencyProvider>,
1226        pins: &mut FilePins,
1227        visited: &mut FxHashSet<PackageName>,
1228        request_sink: &Sender<Request>,
1229    ) -> Result<Option<ResolverVersion>, ResolveError> {
1230        // Wait for the metadata to be available.
1231        let versions_response = if let Some(index) = index {
1232            self.index
1233                .explicit()
1234                .wait_blocking(&(name.clone(), index.clone()))
1235                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1236        } else {
1237            self.index
1238                .implicit()
1239                .wait_blocking(name)
1240                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1241        };
1242        visited.insert(name.clone());
1243
1244        let version_maps = match *versions_response {
1245            VersionsResponse::Found(ref version_maps) => version_maps.as_slice(),
1246            VersionsResponse::NoIndex => {
1247                self.unavailable_packages
1248                    .insert(name.clone(), UnavailablePackage::NoIndex);
1249                &[]
1250            }
1251            VersionsResponse::Offline => {
1252                self.unavailable_packages
1253                    .insert(name.clone(), UnavailablePackage::Offline);
1254                &[]
1255            }
1256            VersionsResponse::NotFound => {
1257                self.unavailable_packages
1258                    .insert(name.clone(), UnavailablePackage::NotFound);
1259                &[]
1260            }
1261        };
1262
1263        debug!("Searching for a compatible version of {package} ({range})");
1264
1265        // Find a version.
1266        let Some(candidate) = self.selector.select(
1267            name,
1268            range,
1269            version_maps,
1270            preferences,
1271            &self.installed_packages,
1272            &self.exclusions,
1273            index,
1274            env,
1275            self.tags.as_ref(),
1276        ) else {
1277            // Short circuit: we couldn't find _any_ versions for a package.
1278            return Ok(None);
1279        };
1280
1281        let dist = match candidate.dist() {
1282            CandidateDist::Compatible(dist) => dist,
1283            CandidateDist::Incompatible {
1284                incompatible_dist: incompatibility,
1285                prioritized_dist: _,
1286            } => {
1287                // If the version is incompatible because no distributions are compatible, exit early.
1288                return Ok(Some(ResolverVersion::Unavailable(
1289                    candidate.version().clone(),
1290                    // TODO(charlie): We can avoid this clone; the candidate is dropped here and
1291                    // owns the incompatibility.
1292                    UnavailableVersion::IncompatibleDist(incompatibility.clone()),
1293                )));
1294            }
1295        };
1296
1297        // Check whether the version is incompatible due to its Python requirement.
1298        if let Some((requires_python, incompatibility)) =
1299            Self::check_requires_python(dist, python_requirement)
1300        {
1301            if matches!(self.options.fork_strategy, ForkStrategy::RequiresPython) {
1302                if env.marker_environment().is_none() {
1303                    let forks = fork_version_by_python_requirement(
1304                        requires_python,
1305                        python_requirement,
1306                        env,
1307                    );
1308                    if !forks.is_empty() {
1309                        debug!(
1310                            "Forking Python requirement `{}` on `{}` for {}=={} ({})",
1311                            python_requirement.target(),
1312                            requires_python,
1313                            name,
1314                            candidate.version(),
1315                            forks
1316                                .iter()
1317                                .map(ToString::to_string)
1318                                .collect::<Vec<_>>()
1319                                .join(", ")
1320                        );
1321                        let forks = forks
1322                            .into_iter()
1323                            .map(|env| VersionFork {
1324                                env,
1325                                id,
1326                                version: None,
1327                            })
1328                            .collect();
1329                        return Ok(Some(ResolverVersion::Forked(forks)));
1330                    }
1331                }
1332            }
1333
1334            return Ok(Some(ResolverVersion::Unavailable(
1335                candidate.version().clone(),
1336                UnavailableVersion::IncompatibleDist(incompatibility),
1337            )));
1338        }
1339
1340        // Check whether this version covers all supported platforms; and, if not, generate a fork.
1341        if let Some(forked) = self.fork_version_registry(
1342            &candidate,
1343            dist,
1344            version_maps,
1345            package,
1346            id,
1347            name,
1348            index,
1349            range,
1350            preferences,
1351            env,
1352            pubgrub,
1353            pins,
1354            request_sink,
1355        )? {
1356            return Ok(Some(forked));
1357        }
1358
1359        let filename = match dist.for_installation() {
1360            ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1361                .filename()
1362                .unwrap_or(Cow::Borrowed("unknown filename")),
1363            ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1364                .filename()
1365                .unwrap_or(Cow::Borrowed("unknown filename")),
1366            ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1367        };
1368
1369        debug!(
1370            "Selecting: {}=={} [{}] ({})",
1371            name,
1372            candidate.version(),
1373            candidate.choice_kind(),
1374            filename,
1375        );
1376        self.visit_candidate(&candidate, dist, package, name, pins, request_sink)?;
1377
1378        let version = candidate.version().clone();
1379        Ok(Some(ResolverVersion::Unforked(version)))
1380    }
1381
1382    /// Determine whether a candidate covers all supported platforms; and, if not, generate a fork.
1383    ///
1384    /// This only ever applies to versions that lack source distributions And, for now, we only
1385    /// apply it in two cases:
1386    ///
1387    /// 1. Local versions, where the non-local version has greater platform coverage. The intent is
1388    ///    such that, if we're resolving PyTorch, and we choose `torch==2.5.2+cpu`, we want to
1389    ///    fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't
1390    ///    include any macOS wheels).
1391    /// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user
1392    ///    might require that the generated resolution always includes wheels for x86 macOS, and
1393    ///    fails entirely if the platform is unsupported.
1394    fn fork_version_registry(
1395        &self,
1396        candidate: &Candidate,
1397        dist: &CompatibleDist,
1398        version_maps: &[VersionMap],
1399        package: &PubGrubPackage,
1400        id: Id<PubGrubPackage>,
1401        name: &PackageName,
1402        index: Option<&IndexUrl>,
1403        range: &Range<Version>,
1404        preferences: &Preferences,
1405        env: &ResolverEnvironment,
1406        pubgrub: &State<UvDependencyProvider>,
1407        pins: &mut FilePins,
1408        request_sink: &Sender<Request>,
1409    ) -> Result<Option<ResolverVersion>, ResolveError> {
1410        // This only applies to universal resolutions.
1411        if env.marker_environment().is_some() {
1412            return Ok(None);
1413        }
1414
1415        // If the package is already compatible with all environments (as is the case for
1416        // packages that include a source distribution), we don't need to fork.
1417        if dist.implied_markers().is_true() {
1418            return Ok(None);
1419        }
1420
1421        // If the user explicitly marked a platform as required, ensure it has coverage.
1422        for marker in self.options.required_environments.iter().copied() {
1423            // If the platform is part of the current environment...
1424            if env.included_by_marker(marker) {
1425                // But isn't supported by the distribution...
1426                if dist.implied_markers().is_disjoint(marker)
1427                    && !find_environments(id, pubgrub).is_disjoint(marker)
1428                {
1429                    // Then we need to fork.
1430                    let Some((left, right)) = fork_version_by_marker(env, marker) else {
1431                        return Ok(Some(ResolverVersion::Unavailable(
1432                            candidate.version().clone(),
1433                            UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1434                                IncompatibleWheel::MissingPlatform(marker),
1435                            )),
1436                        )));
1437                    };
1438
1439                    debug!(
1440                        "Forking on required platform `{}` for {}=={} ({})",
1441                        marker.try_to_string().unwrap_or_else(|| "true".to_string()),
1442                        name,
1443                        candidate.version(),
1444                        [&left, &right]
1445                            .iter()
1446                            .map(ToString::to_string)
1447                            .collect::<Vec<_>>()
1448                            .join(", ")
1449                    );
1450                    let forks = vec![
1451                        VersionFork {
1452                            env: left,
1453                            id,
1454                            version: None,
1455                        },
1456                        VersionFork {
1457                            env: right,
1458                            id,
1459                            version: None,
1460                        },
1461                    ];
1462                    return Ok(Some(ResolverVersion::Forked(forks)));
1463                }
1464            }
1465        }
1466
1467        // For now, we only apply this to local versions.
1468        if !candidate.version().is_local() {
1469            return Ok(None);
1470        }
1471
1472        debug!(
1473            "Looking at local version: {}=={}",
1474            name,
1475            candidate.version()
1476        );
1477
1478        // If there's a non-local version...
1479        let range = range.clone().intersection(&Range::singleton(
1480            candidate.version().clone().without_local(),
1481        ));
1482
1483        let Some(base_candidate) = self.selector.select(
1484            name,
1485            &range,
1486            version_maps,
1487            preferences,
1488            &self.installed_packages,
1489            &self.exclusions,
1490            index,
1491            env,
1492            self.tags.as_ref(),
1493        ) else {
1494            return Ok(None);
1495        };
1496        let CandidateDist::Compatible(base_dist) = base_candidate.dist() else {
1497            return Ok(None);
1498        };
1499
1500        // ...and the non-local version has greater platform support...
1501        let mut remainder = {
1502            let mut remainder = base_dist.implied_markers();
1503            remainder.and(dist.implied_markers().negate());
1504            remainder
1505        };
1506        if remainder.is_false() {
1507            return Ok(None);
1508        }
1509
1510        // If the remainder isn't relevant to the current environment, there's no need to fork.
1511        // For example, if we're solving for `sys_platform == 'darwin'` but the remainder is
1512        // `sys_platform == 'linux'`, we don't need to fork.
1513        if !env.included_by_marker(remainder) {
1514            return Ok(None);
1515        }
1516
1517        // Similarly, if the local distribution is incompatible with the current environment, then
1518        // use the base distribution instead (but don't fork).
1519        if !env.included_by_marker(dist.implied_markers()) {
1520            let filename = match dist.for_installation() {
1521                ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1522                    .filename()
1523                    .unwrap_or(Cow::Borrowed("unknown filename")),
1524                ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1525                    .filename()
1526                    .unwrap_or(Cow::Borrowed("unknown filename")),
1527                ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1528            };
1529
1530            debug!(
1531                "Preferring non-local candidate: {}=={} [{}] ({})",
1532                name,
1533                base_candidate.version(),
1534                base_candidate.choice_kind(),
1535                filename,
1536            );
1537            self.visit_candidate(
1538                &base_candidate,
1539                base_dist,
1540                package,
1541                name,
1542                pins,
1543                request_sink,
1544            )?;
1545
1546            return Ok(Some(ResolverVersion::Unforked(
1547                base_candidate.version().clone(),
1548            )));
1549        }
1550
1551        // If the implied markers includes _some_ macOS environments, but the remainder doesn't,
1552        // then we can extend the implied markers to include _all_ macOS environments. Same goes for
1553        // Linux and Windows.
1554        //
1555        // The idea here is that the base version could support (e.g.) ARM macOS, but not Intel
1556        // macOS. But if _neither_ version supports Intel macOS, we'd rather use `sys_platform == 'darwin'`
1557        // instead of `sys_platform == 'darwin' and platform_machine == 'arm64'`, since it's much
1558        // simpler, and _neither_ version will succeed with Intel macOS anyway.
1559        for value in [
1560            arcstr::literal!("darwin"),
1561            arcstr::literal!("linux"),
1562            arcstr::literal!("win32"),
1563        ] {
1564            let sys_platform = MarkerTree::expression(MarkerExpression::String {
1565                key: MarkerValueString::SysPlatform,
1566                operator: MarkerOperator::Equal,
1567                value,
1568            });
1569            if dist.implied_markers().is_disjoint(sys_platform)
1570                && !remainder.is_disjoint(sys_platform)
1571            {
1572                remainder.or(sys_platform);
1573            }
1574        }
1575
1576        // Otherwise, we need to fork.
1577        let Some((base_env, local_env)) = fork_version_by_marker(env, remainder) else {
1578            return Ok(None);
1579        };
1580
1581        debug!(
1582            "Forking platform for {}=={} ({})",
1583            name,
1584            candidate.version(),
1585            [&base_env, &local_env]
1586                .iter()
1587                .map(ToString::to_string)
1588                .collect::<Vec<_>>()
1589                .join(", ")
1590        );
1591        self.visit_candidate(candidate, dist, package, name, pins, request_sink)?;
1592        self.visit_candidate(
1593            &base_candidate,
1594            base_dist,
1595            package,
1596            name,
1597            pins,
1598            request_sink,
1599        )?;
1600
1601        let forks = vec![
1602            VersionFork {
1603                env: base_env.clone(),
1604                id,
1605                version: Some(base_candidate.version().clone()),
1606            },
1607            VersionFork {
1608                env: local_env.clone(),
1609                id,
1610                version: Some(candidate.version().clone()),
1611            },
1612        ];
1613        Ok(Some(ResolverVersion::Forked(forks)))
1614    }
1615
1616    /// Visit a selected candidate.
1617    fn visit_candidate(
1618        &self,
1619        candidate: &Candidate,
1620        dist: &CompatibleDist,
1621        package: &PubGrubPackage,
1622        name: &PackageName,
1623        pins: &mut FilePins,
1624        request_sink: &Sender<Request>,
1625    ) -> Result<(), ResolveError> {
1626        // We want to return a package pinned to a specific version; but we _also_ want to
1627        // store the exact file that we selected to satisfy that version.
1628        pins.insert(candidate, dist);
1629
1630        // Emit a request to fetch the metadata for this version.
1631        if matches!(&**package, PubGrubPackageInner::Package { .. }) {
1632            if self.dependency_mode.is_transitive() {
1633                if self.index.distributions().register(candidate.version_id()) {
1634                    if name != dist.name() {
1635                        return Err(ResolveError::MismatchedPackageName {
1636                            request: "distribution",
1637                            expected: name.clone(),
1638                            actual: dist.name().clone(),
1639                        });
1640                    }
1641                    // Verify that the package is allowed under the hash-checking policy.
1642                    if !self
1643                        .hasher
1644                        .allows_package(candidate.name(), candidate.version())
1645                    {
1646                        return Err(ResolveError::UnhashedPackage(candidate.name().clone()));
1647                    }
1648
1649                    let request = Request::from(dist.for_resolution());
1650                    request_sink.blocking_send(request)?;
1651                }
1652            }
1653        }
1654
1655        Ok(())
1656    }
1657
1658    /// Check if the distribution is incompatible with the Python requirement, and if so, return
1659    /// the incompatibility.
1660    fn check_requires_python<'dist>(
1661        dist: &'dist CompatibleDist,
1662        python_requirement: &PythonRequirement,
1663    ) -> Option<(&'dist VersionSpecifiers, IncompatibleDist)> {
1664        let requires_python = dist.requires_python()?;
1665        if python_requirement.target().is_contained_by(requires_python) {
1666            None
1667        } else {
1668            let incompatibility = if matches!(dist, CompatibleDist::CompatibleWheel { .. }) {
1669                IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(
1670                    requires_python.clone(),
1671                    if python_requirement.installed() == python_requirement.target() {
1672                        PythonRequirementKind::Installed
1673                    } else {
1674                        PythonRequirementKind::Target
1675                    },
1676                ))
1677            } else {
1678                IncompatibleDist::Source(IncompatibleSource::RequiresPython(
1679                    requires_python.clone(),
1680                    if python_requirement.installed() == python_requirement.target() {
1681                        PythonRequirementKind::Installed
1682                    } else {
1683                        PythonRequirementKind::Target
1684                    },
1685                ))
1686            };
1687            Some((requires_python, incompatibility))
1688        }
1689    }
1690
1691    /// Given a candidate package and version, return its dependencies.
1692    #[instrument(skip_all, fields(%package, %version))]
1693    fn get_dependencies_forking(
1694        &self,
1695        id: Id<PubGrubPackage>,
1696        package: &PubGrubPackage,
1697        version: &Version,
1698        pins: &FilePins,
1699        fork_urls: &ForkUrls,
1700        env: &ResolverEnvironment,
1701        python_requirement: &PythonRequirement,
1702        pubgrub: &State<UvDependencyProvider>,
1703    ) -> Result<ForkedDependencies, ResolveError> {
1704        let result = self.get_dependencies(
1705            id,
1706            package,
1707            version,
1708            pins,
1709            fork_urls,
1710            env,
1711            python_requirement,
1712            pubgrub,
1713        );
1714        if env.marker_environment().is_some() {
1715            result.map(|deps| match deps {
1716                Dependencies::Available(deps) | Dependencies::Unforkable(deps) => {
1717                    ForkedDependencies::Unforked(deps)
1718                }
1719                Dependencies::Unavailable(err) => ForkedDependencies::Unavailable(err),
1720            })
1721        } else {
1722            Ok(result?.fork(env, python_requirement, &self.conflicts))
1723        }
1724    }
1725
1726    /// Given a candidate package and version, return its dependencies.
1727    #[instrument(skip_all, fields(%package, %version))]
1728    fn get_dependencies(
1729        &self,
1730        id: Id<PubGrubPackage>,
1731        package: &PubGrubPackage,
1732        version: &Version,
1733        pins: &FilePins,
1734        fork_urls: &ForkUrls,
1735        env: &ResolverEnvironment,
1736        python_requirement: &PythonRequirement,
1737        pubgrub: &State<UvDependencyProvider>,
1738    ) -> Result<Dependencies, ResolveError> {
1739        let url = package.name().and_then(|name| fork_urls.get(name));
1740        let dependencies = match &**package {
1741            PubGrubPackageInner::Root(_) => {
1742                let no_dev_deps = BTreeMap::default();
1743                let requirements = self.flatten_requirements(
1744                    &self.requirements,
1745                    &no_dev_deps,
1746                    None,
1747                    None,
1748                    None,
1749                    env,
1750                    python_requirement,
1751                );
1752
1753                requirements
1754                    .flat_map(move |requirement| {
1755                        PubGrubDependency::from_requirement(
1756                            &self.conflicts,
1757                            requirement,
1758                            None,
1759                            Some(package),
1760                        )
1761                    })
1762                    .collect()
1763            }
1764
1765            PubGrubPackageInner::Package {
1766                name,
1767                extra,
1768                group,
1769                marker: _,
1770            } => {
1771                // If we're excluding transitive dependencies, short-circuit.
1772                if self.dependency_mode.is_direct() {
1773                    return Ok(Dependencies::Unforkable(Vec::default()));
1774                }
1775
1776                // Determine the distribution to lookup.
1777                let dist = match url {
1778                    Some(url) => PubGrubDistribution::from_url(name, url),
1779                    None => PubGrubDistribution::from_registry(name, version),
1780                };
1781                let version_id = dist.version_id();
1782
1783                // If the package does not exist in the registry or locally, we cannot fetch its dependencies
1784                if self.dependency_mode.is_transitive()
1785                    && self.unavailable_packages.get(name).is_some()
1786                    && self.installed_packages.get_packages(name).is_empty()
1787                {
1788                    debug_assert!(
1789                        false,
1790                        "Dependencies were requested for a package that is not available"
1791                    );
1792                    return Err(ResolveError::PackageUnavailable(name.clone()));
1793                }
1794
1795                // Wait for the metadata to be available.
1796                let response = self
1797                    .index
1798                    .distributions()
1799                    .wait_blocking(&version_id)
1800                    .ok_or_else(|| ResolveError::UnregisteredTask(version_id.to_string()))?;
1801
1802                let metadata = match &*response {
1803                    MetadataResponse::Found(archive) => &archive.metadata,
1804                    MetadataResponse::Unavailable(reason) => {
1805                        let unavailable_version = UnavailableVersion::from(reason);
1806                        let message = unavailable_version.singular_message();
1807                        if let Some(err) = reason.source() {
1808                            // Show the detailed error for metadata parse errors.
1809                            warn!("{name} {message}: {err}");
1810                        } else {
1811                            warn!("{name} {message}");
1812                        }
1813                        self.incomplete_packages
1814                            .entry(name.clone())
1815                            .or_default()
1816                            .insert(version.clone(), reason.clone());
1817                        return Ok(Dependencies::Unavailable(unavailable_version));
1818                    }
1819                    MetadataResponse::Error(dist, err) => {
1820                        let chain = DerivationChainBuilder::from_state(id, version, pubgrub)
1821                            .unwrap_or_default();
1822                        return Err(ResolveError::Dist(
1823                            DistErrorKind::from_requested_dist(dist, &**err),
1824                            dist.clone(),
1825                            chain,
1826                            err.clone(),
1827                        ));
1828                    }
1829                };
1830
1831                // If there was no requires-python on the index page, we may have an incompatible
1832                // distribution.
1833                if let Some(requires_python) = &metadata.requires_python {
1834                    if !python_requirement.target().is_contained_by(requires_python) {
1835                        return Ok(Dependencies::Unavailable(
1836                            UnavailableVersion::RequiresPython(requires_python.clone()),
1837                        ));
1838                    }
1839                }
1840
1841                // Identify any system dependencies based on the index URL.
1842                let system_dependencies = self
1843                    .options
1844                    .torch_backend
1845                    .as_ref()
1846                    .filter(|torch_backend| matches!(torch_backend, TorchStrategy::Cuda { .. }))
1847                    .filter(|torch_backend| torch_backend.has_system_dependency(name))
1848                    .and_then(|_| pins.get(name, version).and_then(ResolvedDist::index))
1849                    .map(IndexUrl::url)
1850                    .and_then(SystemDependency::from_index)
1851                    .into_iter()
1852                    .inspect(|system_dependency| {
1853                        debug!(
1854                            "Adding system dependency `{}` for `{package}@{version}`",
1855                            system_dependency
1856                        );
1857                    })
1858                    .map(PubGrubDependency::from);
1859
1860                let requirements = self.flatten_requirements(
1861                    &metadata.requires_dist,
1862                    &metadata.dependency_groups,
1863                    extra.as_ref(),
1864                    group.as_ref(),
1865                    Some(name),
1866                    env,
1867                    python_requirement,
1868                );
1869
1870                requirements
1871                    .filter(|requirement| !self.excludes.contains(&requirement.name))
1872                    .flat_map(|requirement| {
1873                        PubGrubDependency::from_requirement(
1874                            &self.conflicts,
1875                            requirement,
1876                            group.as_ref(),
1877                            Some(package),
1878                        )
1879                    })
1880                    .chain(system_dependencies)
1881                    .collect()
1882            }
1883
1884            PubGrubPackageInner::Python(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1885
1886            PubGrubPackageInner::System(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1887
1888            // Add a dependency on both the marker and base package.
1889            PubGrubPackageInner::Marker { name, marker } => {
1890                return Ok(Dependencies::Unforkable(
1891                    [MarkerTree::TRUE, *marker]
1892                        .into_iter()
1893                        .map(move |marker| PubGrubDependency {
1894                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1895                                name: name.clone(),
1896                                extra: None,
1897                                group: None,
1898                                marker,
1899                            }),
1900                            version: Range::singleton(version.clone()),
1901                            parent: None,
1902                            url: None,
1903                        })
1904                        .collect(),
1905                ));
1906            }
1907
1908            // Add a dependency on both the extra and base package, with and without the marker.
1909            PubGrubPackageInner::Extra {
1910                name,
1911                extra,
1912                marker,
1913            } => {
1914                return Ok(Dependencies::Unforkable(
1915                    [MarkerTree::TRUE, *marker]
1916                        .into_iter()
1917                        .dedup()
1918                        .flat_map(move |marker| {
1919                            [None, Some(extra)]
1920                                .into_iter()
1921                                .map(move |extra| PubGrubDependency {
1922                                    package: PubGrubPackage::from(PubGrubPackageInner::Package {
1923                                        name: name.clone(),
1924                                        extra: extra.cloned(),
1925                                        group: None,
1926                                        marker,
1927                                    }),
1928                                    version: Range::singleton(version.clone()),
1929                                    parent: None,
1930                                    url: None,
1931                                })
1932                        })
1933                        .collect(),
1934                ));
1935            }
1936
1937            // Add a dependency on the dependency group, with and without the marker.
1938            PubGrubPackageInner::Group {
1939                name,
1940                group,
1941                marker,
1942            } => {
1943                return Ok(Dependencies::Unforkable(
1944                    [MarkerTree::TRUE, *marker]
1945                        .into_iter()
1946                        .dedup()
1947                        .map(|marker| PubGrubDependency {
1948                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1949                                name: name.clone(),
1950                                extra: None,
1951                                group: Some(group.clone()),
1952                                marker,
1953                            }),
1954                            version: Range::singleton(version.clone()),
1955                            parent: None,
1956                            url: None,
1957                        })
1958                        .collect(),
1959                ));
1960            }
1961        };
1962        Ok(Dependencies::Available(dependencies))
1963    }
1964
1965    /// The regular and dev dependencies filtered by Python version and the markers of this fork,
1966    /// plus the extras dependencies of the current package (e.g., `black` depending on
1967    /// `black[colorama]`).
1968    fn flatten_requirements<'a>(
1969        &'a self,
1970        dependencies: &'a [Requirement],
1971        dev_dependencies: &'a BTreeMap<GroupName, Box<[Requirement]>>,
1972        extra: Option<&'a ExtraName>,
1973        dev: Option<&'a GroupName>,
1974        name: Option<&PackageName>,
1975        env: &'a ResolverEnvironment,
1976        python_requirement: &'a PythonRequirement,
1977    ) -> impl Iterator<Item = Cow<'a, Requirement>> {
1978        let python_marker = python_requirement.to_marker_tree();
1979
1980        if let Some(dev) = dev {
1981            // Dependency groups can include the project itself, so no need to flatten recursive
1982            // dependencies.
1983            Either::Left(Either::Left(self.requirements_for_extra(
1984                dev_dependencies.get(dev).into_iter().flatten(),
1985                extra,
1986                env,
1987                python_marker,
1988                python_requirement,
1989            )))
1990        } else if !dependencies
1991            .iter()
1992            .any(|req| name == Some(&req.name) && !req.extras.is_empty())
1993        {
1994            // If the project doesn't define any recursive dependencies, take the fast path.
1995            Either::Left(Either::Right(self.requirements_for_extra(
1996                dependencies.iter(),
1997                extra,
1998                env,
1999                python_marker,
2000                python_requirement,
2001            )))
2002        } else {
2003            let mut requirements = self
2004                .requirements_for_extra(
2005                    dependencies.iter(),
2006                    extra,
2007                    env,
2008                    python_marker,
2009                    python_requirement,
2010                )
2011                .collect::<Vec<_>>();
2012
2013            // Transitively process all extras that are recursively included, starting with the current
2014            // extra.
2015            let mut seen = FxHashSet::<(ExtraName, MarkerTree)>::default();
2016            let mut queue: VecDeque<_> = requirements
2017                .iter()
2018                .filter(|req| name == Some(&req.name))
2019                .flat_map(|req| req.extras.iter().cloned().map(|extra| (extra, req.marker)))
2020                .collect();
2021            while let Some((extra, marker)) = queue.pop_front() {
2022                if !seen.insert((extra.clone(), marker)) {
2023                    continue;
2024                }
2025                for requirement in self.requirements_for_extra(
2026                    dependencies,
2027                    Some(&extra),
2028                    env,
2029                    python_marker,
2030                    python_requirement,
2031                ) {
2032                    let requirement = match requirement {
2033                        Cow::Owned(mut requirement) => {
2034                            requirement.marker.and(marker);
2035                            requirement
2036                        }
2037                        Cow::Borrowed(requirement) => {
2038                            let mut marker = marker;
2039                            marker.and(requirement.marker);
2040                            Requirement {
2041                                name: requirement.name.clone(),
2042                                extras: requirement.extras.clone(),
2043                                groups: requirement.groups.clone(),
2044                                source: requirement.source.clone(),
2045                                origin: requirement.origin.clone(),
2046                                marker: marker.simplify_extras(slice::from_ref(&extra)),
2047                            }
2048                        }
2049                    };
2050                    if name == Some(&requirement.name) {
2051                        // Add each transitively included extra.
2052                        queue.extend(
2053                            requirement
2054                                .extras
2055                                .iter()
2056                                .cloned()
2057                                .map(|extra| (extra, requirement.marker)),
2058                        );
2059                    } else {
2060                        // Add the requirements for that extra.
2061                        requirements.push(Cow::Owned(requirement));
2062                    }
2063                }
2064            }
2065
2066            // Retain any self-constraints for that extra, e.g., if `project[foo]` includes
2067            // `project[bar]>1.0`, as a dependency, we need to propagate `project>1.0`, in addition to
2068            // transitively expanding `project[bar]`.
2069            let mut self_constraints = vec![];
2070            for req in &requirements {
2071                if name == Some(&req.name) && !req.source.is_empty() {
2072                    self_constraints.push(Requirement {
2073                        name: req.name.clone(),
2074                        extras: Box::new([]),
2075                        groups: req.groups.clone(),
2076                        source: req.source.clone(),
2077                        origin: req.origin.clone(),
2078                        marker: req.marker,
2079                    });
2080                }
2081            }
2082
2083            // Drop all the self-requirements now that we flattened them out.
2084            requirements.retain(|req| name != Some(&req.name) || req.extras.is_empty());
2085            requirements.extend(self_constraints.into_iter().map(Cow::Owned));
2086
2087            Either::Right(requirements.into_iter())
2088        }
2089    }
2090
2091    /// The set of the regular and dev dependencies, filtered by Python version,
2092    /// the markers of this fork and the requested extra.
2093    fn requirements_for_extra<'data, 'parameters>(
2094        &'data self,
2095        dependencies: impl IntoIterator<Item = &'data Requirement> + 'parameters,
2096        extra: Option<&'parameters ExtraName>,
2097        env: &'parameters ResolverEnvironment,
2098        python_marker: MarkerTree,
2099        python_requirement: &'parameters PythonRequirement,
2100    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2101    where
2102        'data: 'parameters,
2103    {
2104        self.overrides
2105            .apply(dependencies)
2106            .filter(move |requirement| {
2107                Self::is_requirement_applicable(
2108                    requirement,
2109                    extra,
2110                    env,
2111                    python_marker,
2112                    python_requirement,
2113                )
2114            })
2115            .flat_map(move |requirement| {
2116                iter::once(requirement.clone()).chain(self.constraints_for_requirement(
2117                    requirement,
2118                    extra,
2119                    env,
2120                    python_marker,
2121                    python_requirement,
2122                ))
2123            })
2124    }
2125
2126    /// Whether a requirement is applicable for the Python version, the markers of this fork and the
2127    /// requested extra.
2128    fn is_requirement_applicable(
2129        requirement: &Requirement,
2130        extra: Option<&ExtraName>,
2131        env: &ResolverEnvironment,
2132        python_marker: MarkerTree,
2133        python_requirement: &PythonRequirement,
2134    ) -> bool {
2135        // If the requirement isn't relevant for the current platform, skip it.
2136        match extra {
2137            Some(source_extra) => {
2138                // Only include requirements that are relevant for the current extra.
2139                if requirement.evaluate_markers(env.marker_environment(), &[]) {
2140                    return false;
2141                }
2142                if !requirement
2143                    .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2144                {
2145                    return false;
2146                }
2147                if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2148                {
2149                    return false;
2150                }
2151            }
2152            None => {
2153                if !requirement.evaluate_markers(env.marker_environment(), &[]) {
2154                    return false;
2155                }
2156            }
2157        }
2158
2159        // If the requirement would not be selected with any Python version
2160        // supported by the root, skip it.
2161        if python_marker.is_disjoint(requirement.marker) {
2162            trace!(
2163                "Skipping {requirement} because of Requires-Python: {requires_python}",
2164                requires_python = python_requirement.target(),
2165            );
2166            return false;
2167        }
2168
2169        // If we're in a fork in universal mode, ignore any dependency that isn't part of
2170        // this fork (but will be part of another fork).
2171        if !env.included_by_marker(requirement.marker) {
2172            trace!("Skipping {requirement} because of {env}");
2173            return false;
2174        }
2175
2176        true
2177    }
2178
2179    /// The constraints applicable to the requirement, filtered by Python version, the markers of
2180    /// this fork and the requested extra.
2181    fn constraints_for_requirement<'data, 'parameters>(
2182        &'data self,
2183        requirement: Cow<'data, Requirement>,
2184        extra: Option<&'parameters ExtraName>,
2185        env: &'parameters ResolverEnvironment,
2186        python_marker: MarkerTree,
2187        python_requirement: &'parameters PythonRequirement,
2188    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2189    where
2190        'data: 'parameters,
2191    {
2192        self.constraints
2193            .get(&requirement.name)
2194            .into_iter()
2195            .flatten()
2196            .filter_map(move |constraint| {
2197                // If the requirement would not be selected with any Python version
2198                // supported by the root, skip it.
2199                let constraint = if constraint.marker.is_true() {
2200                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2201                    // and the constraint is `requests ; python_version == '3.6'`, the
2202                    // constraint should only apply when _both_ markers are true.
2203                    if requirement.marker.is_true() {
2204                        Cow::Borrowed(constraint)
2205                    } else {
2206                        let mut marker = constraint.marker;
2207                        marker.and(requirement.marker);
2208
2209                        if marker.is_false() {
2210                            trace!(
2211                                "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2212                                constraint.marker.try_to_string().unwrap(),
2213                                requirement.marker.try_to_string().unwrap(),
2214                            );
2215                            return None;
2216                        }
2217
2218                        Cow::Owned(Requirement {
2219                            name: constraint.name.clone(),
2220                            extras: constraint.extras.clone(),
2221                            groups: constraint.groups.clone(),
2222                            source: constraint.source.clone(),
2223                            origin: constraint.origin.clone(),
2224                            marker,
2225                        })
2226                    }
2227                } else {
2228                    let requires_python = python_requirement.target();
2229
2230                    let mut marker = constraint.marker;
2231                    marker.and(requirement.marker);
2232
2233                    if marker.is_false() {
2234                        trace!(
2235                            "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2236                            constraint.marker.try_to_string().unwrap(),
2237                            requirement.marker.try_to_string().unwrap(),
2238                        );
2239                        return None;
2240                    }
2241
2242                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2243                    // and the constraint is `requests ; python_version == '3.6'`, the
2244                    // constraint should only apply when _both_ markers are true.
2245                    if python_marker.is_disjoint(marker) {
2246                        trace!(
2247                            "Skipping constraint {requirement} because of Requires-Python: {requires_python}"
2248                        );
2249                        return None;
2250                    }
2251
2252                    if marker == constraint.marker {
2253                        Cow::Borrowed(constraint)
2254                    } else {
2255                        Cow::Owned(Requirement {
2256                            name: constraint.name.clone(),
2257                            extras: constraint.extras.clone(),
2258                            groups: constraint.groups.clone(),
2259                            source: constraint.source.clone(),
2260                            origin: constraint.origin.clone(),
2261                            marker,
2262                        })
2263                    }
2264                };
2265
2266                // If we're in a fork in universal mode, ignore any dependency that isn't part of
2267                // this fork (but will be part of another fork).
2268                if !env.included_by_marker(constraint.marker) {
2269                    trace!("Skipping {constraint} because of {env}");
2270                    return None;
2271                }
2272
2273                // If the constraint isn't relevant for the current platform, skip it.
2274                match extra {
2275                    Some(source_extra) => {
2276                        if !constraint
2277                            .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2278                        {
2279                            return None;
2280                        }
2281                        if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2282                        {
2283                            return None;
2284                        }
2285                    }
2286                    None => {
2287                        if !constraint.evaluate_markers(env.marker_environment(), &[]) {
2288                            return None;
2289                        }
2290                    }
2291                }
2292
2293                Some(constraint)
2294            })
2295    }
2296
2297    /// Fetch the metadata for a stream of packages and versions.
2298    async fn fetch<Provider: ResolverProvider>(
2299        self: Arc<Self>,
2300        provider: Arc<Provider>,
2301        request_stream: Receiver<Request>,
2302    ) -> Result<(), ResolveError> {
2303        let mut response_stream = ReceiverStream::new(request_stream)
2304            .map(|request| self.process_request(request, &*provider).boxed_local())
2305            // Allow as many futures as possible to start in the background.
2306            // Backpressure is provided by at a more granular level by `DistributionDatabase`
2307            // and `SourceDispatch`, as well as the bounded request channel.
2308            .buffer_unordered(usize::MAX);
2309
2310        while let Some(response) = response_stream.next().await {
2311            match response? {
2312                Some(Response::Package(name, index, version_map)) => {
2313                    trace!("Received package metadata for: {name}");
2314                    if let Some(index) = index {
2315                        self.index
2316                            .explicit()
2317                            .done((name, index), Arc::new(version_map));
2318                    } else {
2319                        self.index.implicit().done(name, Arc::new(version_map));
2320                    }
2321                }
2322                Some(Response::Installed { dist, metadata }) => {
2323                    trace!("Received installed distribution metadata for: {dist}");
2324                    self.index
2325                        .distributions()
2326                        .done(dist.version_id(), Arc::new(metadata));
2327                }
2328                Some(Response::Dist { dist, metadata }) => {
2329                    let dist_kind = match dist {
2330                        Dist::Built(_) => "built",
2331                        Dist::Source(_) => "source",
2332                    };
2333                    trace!("Received {dist_kind} distribution metadata for: {dist}");
2334                    if let MetadataResponse::Unavailable(reason) = &metadata {
2335                        let message = UnavailableVersion::from(reason).singular_message();
2336                        if let Some(err) = reason.source() {
2337                            // Show the detailed error for metadata parse errors.
2338                            warn!("{dist} {message}: {err}");
2339                        } else {
2340                            warn!("{dist} {message}");
2341                        }
2342                    }
2343                    self.index
2344                        .distributions()
2345                        .done(dist.version_id(), Arc::new(metadata));
2346                }
2347                None => {}
2348            }
2349        }
2350
2351        Ok::<(), ResolveError>(())
2352    }
2353
2354    #[instrument(skip_all, fields(%request))]
2355    async fn process_request<Provider: ResolverProvider>(
2356        &self,
2357        request: Request,
2358        provider: &Provider,
2359    ) -> Result<Option<Response>, ResolveError> {
2360        match request {
2361            // Fetch package metadata from the registry.
2362            Request::Package(package_name, index) => {
2363                let package_versions = provider
2364                    .get_package_versions(&package_name, index.as_ref())
2365                    .boxed_local()
2366                    .await
2367                    .map_err(ResolveError::Client)?;
2368
2369                Ok(Some(Response::Package(
2370                    package_name,
2371                    index.map(IndexMetadata::into_url),
2372                    package_versions,
2373                )))
2374            }
2375
2376            // Fetch distribution metadata from the distribution database.
2377            Request::Dist(dist) => {
2378                if let Some(version) = dist.version() {
2379                    if let Some(index) = dist.index() {
2380                        // Check the implicit indexes for pre-provided metadata.
2381                        let versions_response = self.index.implicit().get(dist.name());
2382                        if let Some(VersionsResponse::Found(version_maps)) =
2383                            versions_response.as_deref()
2384                        {
2385                            for version_map in version_maps {
2386                                if version_map.index() == Some(index) {
2387                                    let Some(metadata) = version_map.get_metadata(version) else {
2388                                        continue;
2389                                    };
2390                                    debug!("Found registry-provided metadata for: {dist}");
2391                                    return Ok(Some(Response::Dist {
2392                                        dist,
2393                                        metadata: MetadataResponse::Found(
2394                                            ArchiveMetadata::from_metadata23(metadata.clone()),
2395                                        ),
2396                                    }));
2397                                }
2398                            }
2399                        }
2400
2401                        // Check the explicit indexes for pre-provided metadata.
2402                        let versions_response = self
2403                            .index
2404                            .explicit()
2405                            .get(&(dist.name().clone(), index.clone()));
2406                        if let Some(VersionsResponse::Found(version_maps)) =
2407                            versions_response.as_deref()
2408                        {
2409                            for version_map in version_maps {
2410                                let Some(metadata) = version_map.get_metadata(version) else {
2411                                    continue;
2412                                };
2413                                debug!("Found registry-provided metadata for: {dist}");
2414                                return Ok(Some(Response::Dist {
2415                                    dist,
2416                                    metadata: MetadataResponse::Found(
2417                                        ArchiveMetadata::from_metadata23(metadata.clone()),
2418                                    ),
2419                                }));
2420                            }
2421                        }
2422                    }
2423                }
2424
2425                let metadata = provider
2426                    .get_or_build_wheel_metadata(&dist)
2427                    .boxed_local()
2428                    .await?;
2429
2430                if let MetadataResponse::Found(metadata) = &metadata {
2431                    if &metadata.metadata.name != dist.name() {
2432                        return Err(ResolveError::MismatchedPackageName {
2433                            request: "distribution metadata",
2434                            expected: dist.name().clone(),
2435                            actual: metadata.metadata.name.clone(),
2436                        });
2437                    }
2438                }
2439
2440                Ok(Some(Response::Dist { dist, metadata }))
2441            }
2442
2443            Request::Installed(dist) => {
2444                let metadata = provider.get_installed_metadata(&dist).boxed_local().await?;
2445
2446                if let MetadataResponse::Found(metadata) = &metadata {
2447                    if &metadata.metadata.name != dist.name() {
2448                        return Err(ResolveError::MismatchedPackageName {
2449                            request: "installed metadata",
2450                            expected: dist.name().clone(),
2451                            actual: metadata.metadata.name.clone(),
2452                        });
2453                    }
2454                }
2455
2456                Ok(Some(Response::Installed { dist, metadata }))
2457            }
2458
2459            // Pre-fetch the package and distribution metadata.
2460            Request::Prefetch(package_name, range, python_requirement) => {
2461                // Wait for the package metadata to become available.
2462                let versions_response = self
2463                    .index
2464                    .implicit()
2465                    .wait(&package_name)
2466                    .await
2467                    .ok_or_else(|| ResolveError::UnregisteredTask(package_name.to_string()))?;
2468
2469                let version_map = match *versions_response {
2470                    VersionsResponse::Found(ref version_map) => version_map,
2471                    // Short-circuit if we did not find any versions for the package
2472                    VersionsResponse::NoIndex => {
2473                        self.unavailable_packages
2474                            .insert(package_name.clone(), UnavailablePackage::NoIndex);
2475
2476                        return Ok(None);
2477                    }
2478                    VersionsResponse::Offline => {
2479                        self.unavailable_packages
2480                            .insert(package_name.clone(), UnavailablePackage::Offline);
2481
2482                        return Ok(None);
2483                    }
2484                    VersionsResponse::NotFound => {
2485                        self.unavailable_packages
2486                            .insert(package_name.clone(), UnavailablePackage::NotFound);
2487
2488                        return Ok(None);
2489                    }
2490                };
2491
2492                // We don't have access to the fork state when prefetching, so assume that
2493                // pre-release versions are allowed.
2494                let env = ResolverEnvironment::universal(vec![]);
2495
2496                // Try to find a compatible version. If there aren't any compatible versions,
2497                // short-circuit.
2498                let Some(candidate) = self.selector.select(
2499                    &package_name,
2500                    &range,
2501                    version_map,
2502                    &self.preferences,
2503                    &self.installed_packages,
2504                    &self.exclusions,
2505                    None,
2506                    &env,
2507                    self.tags.as_ref(),
2508                ) else {
2509                    return Ok(None);
2510                };
2511
2512                // If there is not a compatible distribution, short-circuit.
2513                let Some(dist) = candidate.compatible() else {
2514                    return Ok(None);
2515                };
2516
2517                // If the registry provided metadata for this distribution, use it.
2518                for version_map in version_map {
2519                    if let Some(metadata) = version_map.get_metadata(candidate.version()) {
2520                        let dist = dist.for_resolution();
2521                        if version_map.index() == dist.index() {
2522                            debug!("Found registry-provided metadata for: {dist}");
2523
2524                            let metadata = MetadataResponse::Found(
2525                                ArchiveMetadata::from_metadata23(metadata.clone()),
2526                            );
2527
2528                            let dist = dist.to_owned();
2529                            if &package_name != dist.name() {
2530                                return Err(ResolveError::MismatchedPackageName {
2531                                    request: "distribution",
2532                                    expected: package_name,
2533                                    actual: dist.name().clone(),
2534                                });
2535                            }
2536
2537                            let response = match dist {
2538                                ResolvedDist::Installable { dist, .. } => Response::Dist {
2539                                    dist: (*dist).clone(),
2540                                    metadata,
2541                                },
2542                                ResolvedDist::Installed { dist } => Response::Installed {
2543                                    dist: (*dist).clone(),
2544                                    metadata,
2545                                },
2546                            };
2547
2548                            return Ok(Some(response));
2549                        }
2550                    }
2551                }
2552
2553                // Avoid prefetching source distributions with unbounded lower-bound ranges. This
2554                // often leads to failed attempts to build legacy versions of packages that are
2555                // incompatible with modern build tools.
2556                if dist.wheel().is_none() {
2557                    if !self.selector.use_highest_version(&package_name, &env) {
2558                        if let Some((lower, _)) = range.iter().next() {
2559                            if lower == &Bound::Unbounded {
2560                                debug!(
2561                                    "Skipping prefetch for unbounded minimum-version range: {package_name} ({range})"
2562                                );
2563                                return Ok(None);
2564                            }
2565                        }
2566                    }
2567                }
2568
2569                // Validate the Python requirement.
2570                let requires_python = match dist {
2571                    CompatibleDist::InstalledDist(_) => None,
2572                    CompatibleDist::SourceDist { sdist, .. }
2573                    | CompatibleDist::IncompatibleWheel { sdist, .. } => {
2574                        sdist.file.requires_python.as_ref()
2575                    }
2576                    CompatibleDist::CompatibleWheel { wheel, .. } => {
2577                        wheel.file.requires_python.as_ref()
2578                    }
2579                };
2580                if let Some(requires_python) = requires_python.as_ref() {
2581                    if !python_requirement.target().is_contained_by(requires_python) {
2582                        return Ok(None);
2583                    }
2584                }
2585
2586                // Verify that the package is allowed under the hash-checking policy.
2587                if !self
2588                    .hasher
2589                    .allows_package(candidate.name(), candidate.version())
2590                {
2591                    return Ok(None);
2592                }
2593
2594                // Emit a request to fetch the metadata for this version.
2595                if self.index.distributions().register(candidate.version_id()) {
2596                    let dist = dist.for_resolution().to_owned();
2597                    if &package_name != dist.name() {
2598                        return Err(ResolveError::MismatchedPackageName {
2599                            request: "distribution",
2600                            expected: package_name,
2601                            actual: dist.name().clone(),
2602                        });
2603                    }
2604
2605                    let response = match dist {
2606                        ResolvedDist::Installable { dist, .. } => {
2607                            let metadata = provider
2608                                .get_or_build_wheel_metadata(&dist)
2609                                .boxed_local()
2610                                .await?;
2611
2612                            Response::Dist {
2613                                dist: (*dist).clone(),
2614                                metadata,
2615                            }
2616                        }
2617                        ResolvedDist::Installed { dist } => {
2618                            let metadata =
2619                                provider.get_installed_metadata(&dist).boxed_local().await?;
2620
2621                            Response::Installed {
2622                                dist: (*dist).clone(),
2623                                metadata,
2624                            }
2625                        }
2626                    };
2627
2628                    Ok(Some(response))
2629                } else {
2630                    Ok(None)
2631                }
2632            }
2633        }
2634    }
2635
2636    fn convert_no_solution_err(
2637        &self,
2638        mut err: pubgrub::NoSolutionError<UvDependencyProvider>,
2639        fork_urls: ForkUrls,
2640        fork_indexes: ForkIndexes,
2641        env: ResolverEnvironment,
2642        current_environment: MarkerEnvironment,
2643        exclude_newer: Option<&ExcludeNewer>,
2644        visited: &FxHashSet<PackageName>,
2645    ) -> ResolveError {
2646        err = NoSolutionError::collapse_local_version_segments(NoSolutionError::collapse_proxies(
2647            err,
2648        ));
2649
2650        let mut unavailable_packages = FxHashMap::default();
2651        for package in err.packages() {
2652            if let PubGrubPackageInner::Package { name, .. } = &**package {
2653                if let Some(reason) = self.unavailable_packages.get(name) {
2654                    unavailable_packages.insert(name.clone(), reason.clone());
2655                }
2656            }
2657        }
2658
2659        let mut incomplete_packages = FxHashMap::default();
2660        for package in err.packages() {
2661            if let PubGrubPackageInner::Package { name, .. } = &**package {
2662                if let Some(versions) = self.incomplete_packages.get(name) {
2663                    for entry in versions.iter() {
2664                        let (version, reason) = entry.pair();
2665                        incomplete_packages
2666                            .entry(name.clone())
2667                            .or_insert_with(BTreeMap::default)
2668                            .insert(version.clone(), reason.clone());
2669                    }
2670                }
2671            }
2672        }
2673
2674        let mut available_indexes = FxHashMap::default();
2675        let mut available_versions = FxHashMap::default();
2676        for package in err.packages() {
2677            let Some(name) = package.name() else { continue };
2678            if !visited.contains(name) {
2679                // Avoid including available versions for packages that exist in the derivation
2680                // tree, but were never visited during resolution. We _may_ have metadata for
2681                // these packages, but it's non-deterministic, and omitting them ensures that
2682                // we represent the self of the resolver at the time of failure.
2683                continue;
2684            }
2685            let versions_response = if let Some(index) = fork_indexes.get(name) {
2686                self.index
2687                    .explicit()
2688                    .get(&(name.clone(), index.url().clone()))
2689            } else {
2690                self.index.implicit().get(name)
2691            };
2692            if let Some(response) = versions_response {
2693                if let VersionsResponse::Found(ref version_maps) = *response {
2694                    // Track the available versions, across all indexes.
2695                    for version_map in version_maps {
2696                        let package_versions = available_versions
2697                            .entry(name.clone())
2698                            .or_insert_with(BTreeSet::new);
2699
2700                        for (version, dists) in version_map.iter(&Ranges::full()) {
2701                            // Don't show versions removed by excluded-newer in hints.
2702                            if let Some(exclude_newer) =
2703                                exclude_newer.and_then(|en| en.exclude_newer_package(name))
2704                            {
2705                                let Some(prioritized_dist) = dists.prioritized_dist() else {
2706                                    continue;
2707                                };
2708                                if prioritized_dist.files().all(|file| {
2709                                    file.upload_time_utc_ms.is_none_or(|upload_time| {
2710                                        upload_time >= exclude_newer.timestamp_millis()
2711                                    })
2712                                }) {
2713                                    continue;
2714                                }
2715                            }
2716
2717                            package_versions.insert(version.clone());
2718                        }
2719                    }
2720
2721                    // Track the indexes in which the package is available.
2722                    available_indexes
2723                        .entry(name.clone())
2724                        .or_insert(BTreeSet::new())
2725                        .extend(
2726                            version_maps
2727                                .iter()
2728                                .filter_map(|version_map| version_map.index().cloned()),
2729                        );
2730                }
2731            }
2732        }
2733
2734        ResolveError::NoSolution(Box::new(NoSolutionError::new(
2735            err,
2736            self.index.clone(),
2737            available_versions,
2738            available_indexes,
2739            self.selector.clone(),
2740            self.python_requirement.clone(),
2741            self.locations.clone(),
2742            self.capabilities.clone(),
2743            unavailable_packages,
2744            incomplete_packages,
2745            fork_urls,
2746            fork_indexes,
2747            env,
2748            current_environment,
2749            self.tags.clone(),
2750            self.workspace_members.clone(),
2751            self.options.clone(),
2752        )))
2753    }
2754
2755    fn on_progress(&self, package: &PubGrubPackage, version: &Version) {
2756        if let Some(reporter) = self.reporter.as_ref() {
2757            match &**package {
2758                PubGrubPackageInner::Root(_) => {}
2759                PubGrubPackageInner::Python(_) => {}
2760                PubGrubPackageInner::System(_) => {}
2761                PubGrubPackageInner::Marker { .. } => {}
2762                PubGrubPackageInner::Extra { .. } => {}
2763                PubGrubPackageInner::Group { .. } => {}
2764                PubGrubPackageInner::Package { name, .. } => {
2765                    reporter.on_progress(name, &VersionOrUrlRef::Version(version));
2766                }
2767            }
2768        }
2769    }
2770
2771    fn on_complete(&self) {
2772        if let Some(reporter) = self.reporter.as_ref() {
2773            reporter.on_complete();
2774        }
2775    }
2776}
2777
2778/// State that is used during unit propagation in the resolver, one instance per fork.
2779#[derive(Clone)]
2780pub(crate) struct ForkState {
2781    /// The internal state used by the resolver.
2782    ///
2783    /// Note that not all parts of this state are strictly internal. For
2784    /// example, the edges in the dependency graph generated as part of the
2785    /// output of resolution are derived from the "incompatibilities" tracked
2786    /// in this state. We also ultimately retrieve the final set of version
2787    /// assignments (to packages) from this state's "partial solution."
2788    pubgrub: State<UvDependencyProvider>,
2789    /// The initial package to select. If set, the first iteration over this state will avoid
2790    /// asking PubGrub for the highest-priority package, and will instead use the provided package.
2791    initial_id: Option<Id<PubGrubPackage>>,
2792    /// The initial version to select. If set, the first iteration over this state will avoid
2793    /// asking PubGrub for the highest-priority version, and will instead use the provided version.
2794    initial_version: Option<Version>,
2795    /// The next package on which to run unit propagation.
2796    next: Id<PubGrubPackage>,
2797    /// The set of pinned versions we accrue throughout resolution.
2798    ///
2799    /// The key of this map is a package name, and each package name maps to
2800    /// a set of versions for that package. Each version in turn is mapped
2801    /// to a single [`ResolvedDist`]. That [`ResolvedDist`] represents, at time
2802    /// of writing (2024/05/09), at most one wheel. The idea here is that
2803    /// [`FilePins`] tracks precisely which wheel was selected during resolution.
2804    /// After resolution is finished, this maps is consulted in order to select
2805    /// the wheel chosen during resolution.
2806    pins: FilePins,
2807    /// Ensure we don't have duplicate URLs in any branch.
2808    ///
2809    /// Unlike [`Urls`], we add only the URLs we have seen in this branch, and there can be only
2810    /// one URL per package. By prioritizing direct URL dependencies over registry dependencies,
2811    /// this map is populated for all direct URL packages before we look at any registry packages.
2812    fork_urls: ForkUrls,
2813    /// Ensure we don't have duplicate indexes in any branch.
2814    ///
2815    /// Unlike [`Indexes`], we add only the indexes we have seen in this branch, and there can be
2816    /// only one index per package.
2817    fork_indexes: ForkIndexes,
2818    /// When dependencies for a package are retrieved, this map of priorities
2819    /// is updated based on how each dependency was specified. Certain types
2820    /// of dependencies have more "priority" than others (like direct URL
2821    /// dependencies). These priorities help determine which package to
2822    /// consider next during resolution.
2823    priorities: PubGrubPriorities,
2824    /// This keeps track of the set of versions for each package that we've
2825    /// already visited during resolution. This avoids doing redundant work.
2826    added_dependencies: FxHashMap<Id<PubGrubPackage>, FxHashSet<Version>>,
2827    /// The marker expression that created this state.
2828    ///
2829    /// The root state always corresponds to a marker expression that is always
2830    /// `true` for every `MarkerEnvironment`.
2831    ///
2832    /// In non-universal mode, forking never occurs and so this marker
2833    /// expression is always `true`.
2834    ///
2835    /// Whenever dependencies are fetched, all requirement specifications
2836    /// are checked for disjointness with the marker expression of the fork
2837    /// in which those dependencies were fetched. If a requirement has a
2838    /// completely disjoint marker expression (i.e., it can never be true given
2839    /// that the marker expression that provoked the fork is true), then that
2840    /// dependency is completely ignored.
2841    env: ResolverEnvironment,
2842    /// The Python requirement for this fork. Defaults to the Python requirement for
2843    /// the resolution, but may be narrowed if a `python_version` marker is present
2844    /// in a given fork.
2845    ///
2846    /// For example, in:
2847    /// ```text
2848    /// numpy >=1.26 ; python_version >= "3.9"
2849    /// numpy <1.26 ; python_version < "3.9"
2850    /// ```
2851    ///
2852    /// The top fork has a narrower Python compatibility range, and thus can find a
2853    /// solution that omits Python 3.8 support.
2854    python_requirement: PythonRequirement,
2855    conflict_tracker: ConflictTracker,
2856    /// Prefetch package versions for packages with many rejected versions.
2857    ///
2858    /// Tracked on the fork state to avoid counting each identical version between forks as new try.
2859    prefetcher: BatchPrefetcher,
2860}
2861
2862impl ForkState {
2863    fn new(
2864        pubgrub: State<UvDependencyProvider>,
2865        env: ResolverEnvironment,
2866        python_requirement: PythonRequirement,
2867        prefetcher: BatchPrefetcher,
2868    ) -> Self {
2869        Self {
2870            initial_id: None,
2871            initial_version: None,
2872            next: pubgrub.root_package,
2873            pubgrub,
2874            pins: FilePins::default(),
2875            fork_urls: ForkUrls::default(),
2876            fork_indexes: ForkIndexes::default(),
2877            priorities: PubGrubPriorities::default(),
2878            added_dependencies: FxHashMap::default(),
2879            env,
2880            python_requirement,
2881            conflict_tracker: ConflictTracker::default(),
2882            prefetcher,
2883        }
2884    }
2885
2886    /// Visit the dependencies for the selected version of the current package, incorporating any
2887    /// relevant URLs and pinned indexes into the [`ForkState`].
2888    fn visit_package_version_dependencies(
2889        &mut self,
2890        for_package: Id<PubGrubPackage>,
2891        for_version: &Version,
2892        urls: &Urls,
2893        indexes: &Indexes,
2894        dependencies: &[PubGrubDependency],
2895        git: &GitResolver,
2896        workspace_members: &BTreeSet<PackageName>,
2897        resolution_strategy: &ResolutionStrategy,
2898    ) -> Result<(), ResolveError> {
2899        for dependency in dependencies {
2900            let PubGrubDependency {
2901                package,
2902                version,
2903                parent: _,
2904                url,
2905            } = dependency;
2906
2907            let mut has_url = false;
2908            if let Some(name) = package.name() {
2909                // From the [`Requirement`] to [`PubGrubDependency`] conversion, we get a URL if the
2910                // requirement was a URL requirement. `Urls` applies canonicalization to this and
2911                // override URLs to both URL and registry requirements, which we then check for
2912                // conflicts using [`ForkUrl`].
2913                for url in urls.get_url(&self.env, name, url.as_ref(), git)? {
2914                    self.fork_urls.insert(name, url, &self.env)?;
2915                    has_url = true;
2916                }
2917
2918                // If the package is pinned to an exact index, add it to the fork.
2919                for index in indexes.get(name, &self.env) {
2920                    self.fork_indexes.insert(name, index, &self.env)?;
2921                }
2922            }
2923
2924            if let Some(name) = self.pubgrub.package_store[for_package]
2925                .name_no_root()
2926                .filter(|name| !workspace_members.contains(name))
2927            {
2928                debug!(
2929                    "Adding transitive dependency for {name}=={for_version}: {package}{version}"
2930                );
2931            } else {
2932                // A dependency from the root package or `requirements.txt`.
2933                debug!("Adding direct dependency: {package}{version}");
2934
2935                // Warn the user if a direct dependency lacks a lower bound in `--lowest` resolution.
2936                let missing_lower_bound = version
2937                    .bounding_range()
2938                    .map(|(lowest, _highest)| lowest == Bound::Unbounded)
2939                    .unwrap_or(true);
2940                let strategy_lowest = matches!(
2941                    resolution_strategy,
2942                    ResolutionStrategy::Lowest | ResolutionStrategy::LowestDirect(..)
2943                );
2944
2945                if !has_url && missing_lower_bound && strategy_lowest {
2946                    let name = package.name_no_root().unwrap();
2947                    // Handle cases where a package is listed both without and with a lower bound.
2948                    // Example:
2949                    // ```
2950                    // "coverage[toml] ; python_version < '3.11'",
2951                    // "coverage >= 7.10.0",
2952                    // ```
2953                    let bound_on_other_package = dependencies.iter().any(|other| {
2954                        Some(name) == other.package.name()
2955                            && !other
2956                                .version
2957                                .bounding_range()
2958                                .map(|(lowest, _highest)| lowest == Bound::Unbounded)
2959                                .unwrap_or(true)
2960                    });
2961
2962                    if !bound_on_other_package {
2963                        warn_user_once!(
2964                            "The direct dependency `{name}` is unpinned. \
2965                            Consider setting a lower bound when using `--resolution lowest` \
2966                            or `--resolution lowest-direct` to avoid using outdated versions.",
2967                        );
2968                    }
2969                }
2970            }
2971
2972            // Update the package priorities.
2973            self.priorities.insert(package, version, &self.fork_urls);
2974            // As we're adding an incompatibility from the proxy package to the base package,
2975            // we need to register the base package.
2976            if let Some(base_package) = package.base_package() {
2977                self.priorities
2978                    .insert(&base_package, version, &self.fork_urls);
2979            }
2980        }
2981
2982        Ok(())
2983    }
2984
2985    /// Add the dependencies for the selected version of the current package.
2986    fn add_package_version_dependencies(
2987        &mut self,
2988        for_package: Id<PubGrubPackage>,
2989        for_version: &Version,
2990        dependencies: Vec<PubGrubDependency>,
2991    ) {
2992        for dependency in &dependencies {
2993            let PubGrubDependency {
2994                package,
2995                version,
2996                parent: _,
2997                url: _,
2998            } = dependency;
2999
3000            let Some(base_package) = package.base_package() else {
3001                continue;
3002            };
3003
3004            let proxy_package = self.pubgrub.package_store.alloc(package.clone());
3005            let base_package_id = self.pubgrub.package_store.alloc(base_package.clone());
3006            self.pubgrub
3007                .add_proxy_package(proxy_package, base_package_id, version.clone());
3008        }
3009
3010        let conflict = self.pubgrub.add_package_version_dependencies(
3011            self.next,
3012            for_version.clone(),
3013            dependencies.into_iter().map(|dependency| {
3014                let PubGrubDependency {
3015                    package,
3016                    version,
3017                    parent: _,
3018                    url: _,
3019                } = dependency;
3020                (package, version)
3021            }),
3022        );
3023
3024        // Conflict tracking: If the version was rejected due to its dependencies, record culprit
3025        // and affected.
3026        if let Some(incompatibility) = conflict {
3027            self.record_conflict(for_package, Some(for_version), incompatibility);
3028        }
3029    }
3030
3031    fn record_conflict(
3032        &mut self,
3033        affected: Id<PubGrubPackage>,
3034        version: Option<&Version>,
3035        incompatibility: IncompId<PubGrubPackage, Ranges<Version>, UnavailableReason>,
3036    ) {
3037        let mut culprit_is_real = false;
3038        for (incompatible, _term) in self.pubgrub.incompatibility_store[incompatibility].iter() {
3039            if incompatible == affected {
3040                continue;
3041            }
3042            if self.pubgrub.package_store[affected].name()
3043                == self.pubgrub.package_store[incompatible].name()
3044            {
3045                // Don't track conflicts between a marker package and the main package, when the
3046                // marker is "copying" the obligations from the main package through conflicts.
3047                continue;
3048            }
3049            culprit_is_real = true;
3050            let culprit_count = self
3051                .conflict_tracker
3052                .culprit
3053                .entry(incompatible)
3054                .or_default();
3055            *culprit_count += 1;
3056            if *culprit_count == CONFLICT_THRESHOLD {
3057                self.conflict_tracker.deprioritize.push(incompatible);
3058            }
3059        }
3060        // Don't track conflicts between a marker package and the main package, when the
3061        // marker is "copying" the obligations from the main package through conflicts.
3062        if culprit_is_real {
3063            if tracing::enabled!(Level::DEBUG) {
3064                let incompatibility = self.pubgrub.incompatibility_store[incompatibility]
3065                    .iter()
3066                    .map(|(package, _term)| &self.pubgrub.package_store[package])
3067                    .join(", ");
3068                if let Some(version) = version {
3069                    debug!(
3070                        "Recording dependency conflict of {}=={} from incompatibility of ({})",
3071                        self.pubgrub.package_store[affected], version, incompatibility
3072                    );
3073                } else {
3074                    debug!(
3075                        "Recording unit propagation conflict of {} from incompatibility of ({})",
3076                        self.pubgrub.package_store[affected], incompatibility
3077                    );
3078                }
3079            }
3080
3081            let affected_count = self.conflict_tracker.affected.entry(self.next).or_default();
3082            *affected_count += 1;
3083            if *affected_count == CONFLICT_THRESHOLD {
3084                self.conflict_tracker.prioritize.push(self.next);
3085            }
3086        }
3087    }
3088
3089    fn add_unavailable_version(&mut self, version: Version, reason: UnavailableVersion) {
3090        // Incompatible requires-python versions are special in that we track
3091        // them as incompatible dependencies instead of marking the package version
3092        // as unavailable directly.
3093        if let UnavailableVersion::IncompatibleDist(
3094            IncompatibleDist::Source(IncompatibleSource::RequiresPython(requires_python, kind))
3095            | IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(requires_python, kind)),
3096        ) = reason
3097        {
3098            let package = &self.next;
3099            let python = self.pubgrub.package_store.alloc(PubGrubPackage::from(
3100                PubGrubPackageInner::Python(match kind {
3101                    PythonRequirementKind::Installed => PubGrubPython::Installed,
3102                    PythonRequirementKind::Target => PubGrubPython::Target,
3103                }),
3104            ));
3105            self.pubgrub
3106                .add_incompatibility(Incompatibility::from_dependency(
3107                    *package,
3108                    Range::singleton(version.clone()),
3109                    (python, release_specifiers_to_ranges(requires_python)),
3110                ));
3111            self.pubgrub
3112                .partial_solution
3113                .add_decision(self.next, version);
3114            return;
3115        }
3116        self.pubgrub
3117            .add_incompatibility(Incompatibility::custom_version(
3118                self.next,
3119                version.clone(),
3120                UnavailableReason::Version(reason),
3121            ));
3122    }
3123
3124    /// Subset the current markers with the new markers and update the python requirements fields
3125    /// accordingly.
3126    ///
3127    /// If the fork should be dropped (e.g., because its markers can never be true for its
3128    /// Python requirement), then this returns `None`.
3129    fn with_env(mut self, env: ResolverEnvironment) -> Self {
3130        self.env = env;
3131        // If the fork contains a narrowed Python requirement, apply it.
3132        if let Some(req) = self.env.narrow_python_requirement(&self.python_requirement) {
3133            debug!("Narrowed `requires-python` bound to: {}", req.target());
3134            self.python_requirement = req;
3135        }
3136        self
3137    }
3138
3139    /// Returns the URL or index for a package and version.
3140    ///
3141    /// In practice, exactly one of the returned values will be `Some`.
3142    fn source(
3143        &self,
3144        name: &PackageName,
3145        version: &Version,
3146    ) -> (Option<&VerbatimParsedUrl>, Option<&IndexUrl>) {
3147        let url = self.fork_urls.get(name);
3148        let index = url
3149            .is_none()
3150            .then(|| {
3151                self.pins
3152                    .get(name, version)
3153                    .expect("Every package should be pinned")
3154                    .index()
3155            })
3156            .flatten();
3157        (url, index)
3158    }
3159
3160    fn into_resolution(self) -> Resolution {
3161        let solution: FxHashMap<_, _> = self.pubgrub.partial_solution.extract_solution().collect();
3162        let edge_count: usize = solution
3163            .keys()
3164            .map(|package| self.pubgrub.incompatibilities[package].len())
3165            .sum();
3166        let mut edges: Vec<ResolutionDependencyEdge> = Vec::with_capacity(edge_count);
3167        for (package, self_version) in &solution {
3168            for id in &self.pubgrub.incompatibilities[package] {
3169                let pubgrub::Kind::FromDependencyOf(
3170                    self_package,
3171                    ref self_range,
3172                    dependency_package,
3173                    ref dependency_range,
3174                ) = self.pubgrub.incompatibility_store[*id].kind
3175                else {
3176                    continue;
3177                };
3178                if *package != self_package {
3179                    continue;
3180                }
3181                if !self_range.contains(self_version) {
3182                    continue;
3183                }
3184                let Some(dependency_version) = solution.get(&dependency_package) else {
3185                    continue;
3186                };
3187                if !dependency_range.contains(dependency_version) {
3188                    continue;
3189                }
3190
3191                let self_package = &self.pubgrub.package_store[self_package];
3192                let dependency_package = &self.pubgrub.package_store[dependency_package];
3193
3194                let (self_name, self_extra, self_group) = match &**self_package {
3195                    PubGrubPackageInner::Package {
3196                        name: self_name,
3197                        extra: self_extra,
3198                        group: self_group,
3199                        marker: _,
3200                    } => (Some(self_name), self_extra.as_ref(), self_group.as_ref()),
3201
3202                    PubGrubPackageInner::Root(_) => (None, None, None),
3203
3204                    _ => continue,
3205                };
3206
3207                let (self_url, self_index) = self_name
3208                    .map(|self_name| self.source(self_name, self_version))
3209                    .unwrap_or((None, None));
3210
3211                match **dependency_package {
3212                    PubGrubPackageInner::Package {
3213                        name: ref dependency_name,
3214                        extra: ref dependency_extra,
3215                        group: ref dependency_dev,
3216                        marker: ref dependency_marker,
3217                    } => {
3218                        debug_assert!(
3219                            dependency_extra.is_none(),
3220                            "Packages should depend on an extra proxy"
3221                        );
3222                        debug_assert!(
3223                            dependency_dev.is_none(),
3224                            "Packages should depend on a group proxy"
3225                        );
3226
3227                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3228                        // but allow groups to depend on other groups, or on the package itself.
3229                        if self_group.is_none() {
3230                            if self_name == Some(dependency_name) {
3231                                continue;
3232                            }
3233                        }
3234
3235                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3236
3237                        let edge = ResolutionDependencyEdge {
3238                            from: self_name.cloned(),
3239                            from_version: self_version.clone(),
3240                            from_url: self_url.cloned(),
3241                            from_index: self_index.cloned(),
3242                            from_extra: self_extra.cloned(),
3243                            from_group: self_group.cloned(),
3244                            to: dependency_name.clone(),
3245                            to_version: dependency_version.clone(),
3246                            to_url: to_url.cloned(),
3247                            to_index: to_index.cloned(),
3248                            to_extra: dependency_extra.clone(),
3249                            to_group: dependency_dev.clone(),
3250                            marker: *dependency_marker,
3251                        };
3252                        edges.push(edge);
3253                    }
3254
3255                    PubGrubPackageInner::Marker {
3256                        name: ref dependency_name,
3257                        marker: ref dependency_marker,
3258                    } => {
3259                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3260                        // but allow groups to depend on other groups, or on the package itself.
3261                        if self_group.is_none() {
3262                            if self_name == Some(dependency_name) {
3263                                continue;
3264                            }
3265                        }
3266
3267                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3268
3269                        let edge = ResolutionDependencyEdge {
3270                            from: self_name.cloned(),
3271                            from_version: self_version.clone(),
3272                            from_url: self_url.cloned(),
3273                            from_index: self_index.cloned(),
3274                            from_extra: self_extra.cloned(),
3275                            from_group: self_group.cloned(),
3276                            to: dependency_name.clone(),
3277                            to_version: dependency_version.clone(),
3278                            to_url: to_url.cloned(),
3279                            to_index: to_index.cloned(),
3280                            to_extra: None,
3281                            to_group: None,
3282                            marker: *dependency_marker,
3283                        };
3284                        edges.push(edge);
3285                    }
3286
3287                    PubGrubPackageInner::Extra {
3288                        name: ref dependency_name,
3289                        extra: ref dependency_extra,
3290                        marker: ref dependency_marker,
3291                    } => {
3292                        if self_group.is_none() {
3293                            debug_assert!(
3294                                self_name != Some(dependency_name),
3295                                "Extras should be flattened"
3296                            );
3297                        }
3298                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3299
3300                        // Insert an edge from the dependent package to the extra package.
3301                        let edge = ResolutionDependencyEdge {
3302                            from: self_name.cloned(),
3303                            from_version: self_version.clone(),
3304                            from_url: self_url.cloned(),
3305                            from_index: self_index.cloned(),
3306                            from_extra: self_extra.cloned(),
3307                            from_group: self_group.cloned(),
3308                            to: dependency_name.clone(),
3309                            to_version: dependency_version.clone(),
3310                            to_url: to_url.cloned(),
3311                            to_index: to_index.cloned(),
3312                            to_extra: Some(dependency_extra.clone()),
3313                            to_group: None,
3314                            marker: *dependency_marker,
3315                        };
3316                        edges.push(edge);
3317
3318                        // Insert an edge from the dependent package to the base package.
3319                        let edge = ResolutionDependencyEdge {
3320                            from: self_name.cloned(),
3321                            from_version: self_version.clone(),
3322                            from_url: self_url.cloned(),
3323                            from_index: self_index.cloned(),
3324                            from_extra: self_extra.cloned(),
3325                            from_group: self_group.cloned(),
3326                            to: dependency_name.clone(),
3327                            to_version: dependency_version.clone(),
3328                            to_url: to_url.cloned(),
3329                            to_index: to_index.cloned(),
3330                            to_extra: None,
3331                            to_group: None,
3332                            marker: *dependency_marker,
3333                        };
3334                        edges.push(edge);
3335                    }
3336
3337                    PubGrubPackageInner::Group {
3338                        name: ref dependency_name,
3339                        group: ref dependency_group,
3340                        marker: ref dependency_marker,
3341                    } => {
3342                        debug_assert!(
3343                            self_name != Some(dependency_name),
3344                            "Groups should be flattened"
3345                        );
3346
3347                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3348
3349                        // Add an edge from the dependent package to the dev package, but _not_ the
3350                        // base package.
3351                        let edge = ResolutionDependencyEdge {
3352                            from: self_name.cloned(),
3353                            from_version: self_version.clone(),
3354                            from_url: self_url.cloned(),
3355                            from_index: self_index.cloned(),
3356                            from_extra: self_extra.cloned(),
3357                            from_group: self_group.cloned(),
3358                            to: dependency_name.clone(),
3359                            to_version: dependency_version.clone(),
3360                            to_url: to_url.cloned(),
3361                            to_index: to_index.cloned(),
3362                            to_extra: None,
3363                            to_group: Some(dependency_group.clone()),
3364                            marker: *dependency_marker,
3365                        };
3366                        edges.push(edge);
3367                    }
3368
3369                    _ => {}
3370                }
3371            }
3372        }
3373
3374        let nodes = solution
3375            .into_iter()
3376            .filter_map(|(package, version)| {
3377                if let PubGrubPackageInner::Package {
3378                    name,
3379                    extra,
3380                    group,
3381                    marker: MarkerTree::TRUE,
3382                } = &*self.pubgrub.package_store[package]
3383                {
3384                    let (url, index) = self.source(name, &version);
3385                    Some((
3386                        ResolutionPackage {
3387                            name: name.clone(),
3388                            extra: extra.clone(),
3389                            dev: group.clone(),
3390                            url: url.cloned(),
3391                            index: index.cloned(),
3392                        },
3393                        version,
3394                    ))
3395                } else {
3396                    None
3397                }
3398            })
3399            .collect();
3400
3401        Resolution {
3402            nodes,
3403            edges,
3404            pins: self.pins,
3405            env: self.env,
3406        }
3407    }
3408}
3409
3410/// The resolution from a single fork including the virtual packages and the edges between them.
3411#[derive(Debug)]
3412pub(crate) struct Resolution {
3413    pub(crate) nodes: FxHashMap<ResolutionPackage, Version>,
3414    /// The directed connections between the nodes, where the marker is the node weight. We don't
3415    /// store the requirement itself, but it can be retrieved from the package metadata.
3416    pub(crate) edges: Vec<ResolutionDependencyEdge>,
3417    /// Map each package name, version tuple from `packages` to a distribution.
3418    pub(crate) pins: FilePins,
3419    /// The environment setting this resolution was found under.
3420    pub(crate) env: ResolverEnvironment,
3421}
3422
3423/// Package representation we used during resolution where each extra and also the dev-dependencies
3424/// group are their own package.
3425#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3426pub(crate) struct ResolutionPackage {
3427    pub(crate) name: PackageName,
3428    pub(crate) extra: Option<ExtraName>,
3429    pub(crate) dev: Option<GroupName>,
3430    /// For registry packages, this is `None`; otherwise, the direct URL of the distribution.
3431    pub(crate) url: Option<VerbatimParsedUrl>,
3432    /// For URL packages, this is `None`; otherwise, the index URL of the distribution.
3433    pub(crate) index: Option<IndexUrl>,
3434}
3435
3436/// The `from_` fields and the `to_` fields allow mapping to the originating and target
3437///  [`ResolutionPackage`] respectively. The `marker` is the edge weight.
3438#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3439pub(crate) struct ResolutionDependencyEdge {
3440    /// This value is `None` if the dependency comes from the root package.
3441    pub(crate) from: Option<PackageName>,
3442    pub(crate) from_version: Version,
3443    pub(crate) from_url: Option<VerbatimParsedUrl>,
3444    pub(crate) from_index: Option<IndexUrl>,
3445    pub(crate) from_extra: Option<ExtraName>,
3446    pub(crate) from_group: Option<GroupName>,
3447    pub(crate) to: PackageName,
3448    pub(crate) to_version: Version,
3449    pub(crate) to_url: Option<VerbatimParsedUrl>,
3450    pub(crate) to_index: Option<IndexUrl>,
3451    pub(crate) to_extra: Option<ExtraName>,
3452    pub(crate) to_group: Option<GroupName>,
3453    pub(crate) marker: MarkerTree,
3454}
3455
3456impl ResolutionDependencyEdge {
3457    pub(crate) fn universal_marker(&self) -> UniversalMarker {
3458        // We specifically do not account for conflict
3459        // markers here. Instead, those are computed via
3460        // a traversal on the resolution graph.
3461        UniversalMarker::new(self.marker, ConflictMarker::TRUE)
3462    }
3463}
3464
3465/// Fetch the metadata for an item
3466#[derive(Debug)]
3467#[allow(clippy::large_enum_variant)]
3468pub(crate) enum Request {
3469    /// A request to fetch the metadata for a package.
3470    Package(PackageName, Option<IndexMetadata>),
3471    /// A request to fetch the metadata for a built or source distribution.
3472    Dist(Dist),
3473    /// A request to fetch the metadata from an already-installed distribution.
3474    Installed(InstalledDist),
3475    /// A request to pre-fetch the metadata for a package and the best-guess distribution.
3476    Prefetch(PackageName, Range<Version>, PythonRequirement),
3477}
3478
3479impl<'a> From<ResolvedDistRef<'a>> for Request {
3480    fn from(dist: ResolvedDistRef<'a>) -> Self {
3481        // N.B. This is almost identical to `ResolvedDistRef::to_owned`, but
3482        // creates a `Request` instead of a `ResolvedDist`. There's probably
3483        // some room for DRYing this up a bit. The obvious way would be to
3484        // add a method to create a `Dist`, but a `Dist` cannot be represented
3485        // as an installed dist.
3486        match dist {
3487            ResolvedDistRef::InstallableRegistrySourceDist { sdist, prioritized } => {
3488                // This is okay because we're only here if the prioritized dist
3489                // has an sdist, so this always succeeds.
3490                let source = prioritized.source_dist().expect("a source distribution");
3491                assert_eq!(
3492                    (&sdist.name, &sdist.version),
3493                    (&source.name, &source.version),
3494                    "expected chosen sdist to match prioritized sdist"
3495                );
3496                Self::Dist(Dist::Source(SourceDist::Registry(source)))
3497            }
3498            ResolvedDistRef::InstallableRegistryBuiltDist {
3499                wheel, prioritized, ..
3500            } => {
3501                assert_eq!(
3502                    Some(&wheel.filename),
3503                    prioritized.best_wheel().map(|(wheel, _)| &wheel.filename),
3504                    "expected chosen wheel to match best wheel"
3505                );
3506                // This is okay because we're only here if the prioritized dist
3507                // has at least one wheel, so this always succeeds.
3508                let built = prioritized.built_dist().expect("at least one wheel");
3509                Self::Dist(Dist::Built(BuiltDist::Registry(built)))
3510            }
3511            ResolvedDistRef::Installed { dist } => Self::Installed(dist.clone()),
3512        }
3513    }
3514}
3515
3516impl Display for Request {
3517    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
3518        match self {
3519            Self::Package(package_name, _) => {
3520                write!(f, "Versions {package_name}")
3521            }
3522            Self::Dist(dist) => {
3523                write!(f, "Metadata {dist}")
3524            }
3525            Self::Installed(dist) => {
3526                write!(f, "Installed metadata {dist}")
3527            }
3528            Self::Prefetch(package_name, range, _) => {
3529                write!(f, "Prefetch {package_name} {range}")
3530            }
3531        }
3532    }
3533}
3534
3535#[derive(Debug)]
3536#[allow(clippy::large_enum_variant)]
3537enum Response {
3538    /// The returned metadata for a package hosted on a registry.
3539    Package(PackageName, Option<IndexUrl>, VersionsResponse),
3540    /// The returned metadata for a distribution.
3541    Dist {
3542        dist: Dist,
3543        metadata: MetadataResponse,
3544    },
3545    /// The returned metadata for an already-installed distribution.
3546    Installed {
3547        dist: InstalledDist,
3548        metadata: MetadataResponse,
3549    },
3550}
3551
3552/// Information about the dependencies for a particular package.
3553///
3554/// This effectively distills the dependency metadata of a package down into
3555/// its pubgrub specific constituent parts: each dependency package has a range
3556/// of possible versions.
3557enum Dependencies {
3558    /// Package dependencies are not available.
3559    Unavailable(UnavailableVersion),
3560    /// Container for all available package versions.
3561    ///
3562    /// Note that in universal mode, it is possible and allowed for multiple
3563    /// `PubGrubPackage` values in this list to have the same package name.
3564    /// These conflicts are resolved via `Dependencies::fork`.
3565    Available(Vec<PubGrubDependency>),
3566    /// Dependencies that should never result in a fork.
3567    ///
3568    /// For example, the dependencies of a `Marker` package will have the
3569    /// same name and version, but differ according to marker expressions.
3570    /// But we never want this to result in a fork.
3571    Unforkable(Vec<PubGrubDependency>),
3572}
3573
3574impl Dependencies {
3575    /// Turn this flat list of dependencies into a potential set of forked
3576    /// groups of dependencies.
3577    ///
3578    /// A fork *only* occurs when there are multiple dependencies with the same
3579    /// name *and* those dependency specifications have corresponding marker
3580    /// expressions that are completely disjoint with one another.
3581    fn fork(
3582        self,
3583        env: &ResolverEnvironment,
3584        python_requirement: &PythonRequirement,
3585        conflicts: &Conflicts,
3586    ) -> ForkedDependencies {
3587        let deps = match self {
3588            Self::Available(deps) => deps,
3589            Self::Unforkable(deps) => return ForkedDependencies::Unforked(deps),
3590            Self::Unavailable(err) => return ForkedDependencies::Unavailable(err),
3591        };
3592        let mut name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>> = BTreeMap::new();
3593        for dep in deps {
3594            let name = dep
3595                .package
3596                .name()
3597                .expect("dependency always has a name")
3598                .clone();
3599            name_to_deps.entry(name).or_default().push(dep);
3600        }
3601        let Forks {
3602            mut forks,
3603            diverging_packages,
3604        } = Forks::new(name_to_deps, env, python_requirement, conflicts);
3605        if forks.is_empty() {
3606            ForkedDependencies::Unforked(vec![])
3607        } else if forks.len() == 1 {
3608            ForkedDependencies::Unforked(forks.pop().unwrap().dependencies)
3609        } else {
3610            ForkedDependencies::Forked {
3611                forks,
3612                diverging_packages: diverging_packages.into_iter().collect(),
3613            }
3614        }
3615    }
3616}
3617
3618/// Information about the (possibly forked) dependencies for a particular
3619/// package.
3620///
3621/// This is like `Dependencies` but with an extra variant that only occurs when
3622/// a `Dependencies` list has multiple dependency specifications with the same
3623/// name and non-overlapping marker expressions (i.e., a fork occurs).
3624#[derive(Debug)]
3625enum ForkedDependencies {
3626    /// Package dependencies are not available.
3627    Unavailable(UnavailableVersion),
3628    /// No forking occurred.
3629    ///
3630    /// This is the same as `Dependencies::Available`.
3631    Unforked(Vec<PubGrubDependency>),
3632    /// Forked containers for all available package versions.
3633    ///
3634    /// Note that there is always at least two forks. If there would
3635    /// be fewer than 2 forks, then there is no fork at all and the
3636    /// `Unforked` variant is used instead.
3637    Forked {
3638        forks: Vec<Fork>,
3639        /// The package(s) with different requirements for disjoint markers.
3640        diverging_packages: Vec<PackageName>,
3641    },
3642}
3643
3644/// A list of forks determined from the dependencies of a single package.
3645///
3646/// Any time a marker expression is seen that is not true for all possible
3647/// marker environments, it is possible for it to introduce a new fork.
3648#[derive(Debug, Default)]
3649struct Forks {
3650    /// The forks discovered among the dependencies.
3651    forks: Vec<Fork>,
3652    /// The package(s) that provoked at least one additional fork.
3653    diverging_packages: BTreeSet<PackageName>,
3654}
3655
3656impl Forks {
3657    fn new(
3658        name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>>,
3659        env: &ResolverEnvironment,
3660        python_requirement: &PythonRequirement,
3661        conflicts: &Conflicts,
3662    ) -> Self {
3663        let python_marker = python_requirement.to_marker_tree();
3664
3665        let mut forks = vec![Fork::new(env.clone())];
3666        let mut diverging_packages = BTreeSet::new();
3667        for (name, mut deps) in name_to_deps {
3668            assert!(!deps.is_empty(), "every name has at least one dependency");
3669            // We never fork if there's only one dependency
3670            // specification for a given package name. This particular
3671            // strategy results in a "conservative" approach to forking
3672            // that gives up correctness in some cases in exchange for
3673            // more limited forking. More limited forking results in
3674            // simpler-and-easier-to-understand lock files and faster
3675            // resolving. The correctness we give up manifests when
3676            // two transitive non-sibling dependencies conflict. In
3677            // that case, we don't detect the fork ahead of time (at
3678            // present).
3679            if let [dep] = deps.as_slice() {
3680                // There's one exception: if the requirement increases the minimum-supported Python
3681                // version, we also fork in order to respect that minimum in the subsequent
3682                // resolution.
3683                //
3684                // For example, given `requires-python = ">=3.7"` and `uv ; python_version >= "3.8"`,
3685                // where uv itself only supports Python 3.8 and later, we need to fork to ensure
3686                // that the resolution can find a solution.
3687                if marker::requires_python(dep.package.marker())
3688                    .is_none_or(|bound| !python_requirement.raises(&bound))
3689                {
3690                    let dep = deps.pop().unwrap();
3691                    let marker = dep.package.marker();
3692                    for fork in &mut forks {
3693                        if fork.env.included_by_marker(marker) {
3694                            fork.add_dependency(dep.clone());
3695                        }
3696                    }
3697                    continue;
3698                }
3699            } else {
3700                // If all dependencies have the same markers, we should also avoid forking.
3701                if let Some(dep) = deps.first() {
3702                    let marker = dep.package.marker();
3703                    if deps.iter().all(|dep| marker == dep.package.marker()) {
3704                        // Unless that "same marker" is a Python requirement that is stricter than
3705                        // the current Python requirement. In that case, we need to fork to respect
3706                        // the stricter requirement.
3707                        if marker::requires_python(marker)
3708                            .is_none_or(|bound| !python_requirement.raises(&bound))
3709                        {
3710                            for dep in deps {
3711                                for fork in &mut forks {
3712                                    if fork.env.included_by_marker(marker) {
3713                                        fork.add_dependency(dep.clone());
3714                                    }
3715                                }
3716                            }
3717                            continue;
3718                        }
3719                    }
3720                }
3721            }
3722            for dep in deps {
3723                let mut forker = match ForkingPossibility::new(env, &dep) {
3724                    ForkingPossibility::Possible(forker) => forker,
3725                    ForkingPossibility::DependencyAlwaysExcluded => {
3726                        // If the markers can never be satisfied by the parent
3727                        // fork, then we can drop this dependency unceremoniously.
3728                        continue;
3729                    }
3730                    ForkingPossibility::NoForkingPossible => {
3731                        // Or, if the markers are always true, then we just
3732                        // add the dependency to every fork unconditionally.
3733                        for fork in &mut forks {
3734                            fork.add_dependency(dep.clone());
3735                        }
3736                        continue;
3737                    }
3738                };
3739                // Otherwise, we *should* need to add a new fork...
3740                diverging_packages.insert(name.clone());
3741
3742                let mut new = vec![];
3743                for fork in std::mem::take(&mut forks) {
3744                    let Some((remaining_forker, envs)) = forker.fork(&fork.env) else {
3745                        new.push(fork);
3746                        continue;
3747                    };
3748                    forker = remaining_forker;
3749
3750                    for fork_env in envs {
3751                        let mut new_fork = fork.clone();
3752                        new_fork.set_env(fork_env);
3753                        // We only add the dependency to this fork if it
3754                        // satisfies the fork's markers. Some forks are
3755                        // specifically created to exclude this dependency,
3756                        // so this isn't always true!
3757                        if forker.included(&new_fork.env) {
3758                            new_fork.add_dependency(dep.clone());
3759                        }
3760                        // Filter out any forks we created that are disjoint with our
3761                        // Python requirement.
3762                        if new_fork.env.included_by_marker(python_marker) {
3763                            new.push(new_fork);
3764                        }
3765                    }
3766                }
3767                forks = new;
3768            }
3769        }
3770        // When there is a conflicting group configuration, we need
3771        // to potentially add more forks. Each fork added contains an
3772        // exclusion list of conflicting groups where dependencies with
3773        // the corresponding package and extra name are forcefully
3774        // excluded from that group.
3775        //
3776        // We specifically iterate on conflicting groups and
3777        // potentially re-generate all forks for each one. We do it
3778        // this way in case there are multiple sets of conflicting
3779        // groups that impact the forks here.
3780        //
3781        // For example, if we have conflicting groups {x1, x2} and {x3,
3782        // x4}, we need to make sure the forks generated from one set
3783        // also account for the other set.
3784        for set in conflicts.iter() {
3785            let mut new = vec![];
3786            for fork in std::mem::take(&mut forks) {
3787                let mut has_conflicting_dependency = false;
3788                for item in set.iter() {
3789                    if fork.contains_conflicting_item(item.as_ref()) {
3790                        has_conflicting_dependency = true;
3791                        diverging_packages.insert(item.package().clone());
3792                        break;
3793                    }
3794                }
3795                if !has_conflicting_dependency {
3796                    new.push(fork);
3797                    continue;
3798                }
3799
3800                // Create a fork that excludes ALL conflicts.
3801                if let Some(fork_none) = fork.clone().filter(set.iter().cloned().map(Err)) {
3802                    new.push(fork_none);
3803                }
3804
3805                // Now create a fork for each conflicting group, where
3806                // that fork excludes every *other* conflicting group.
3807                //
3808                // So if we have conflicting extras foo, bar and baz,
3809                // then this creates three forks: one that excludes
3810                // {foo, bar}, one that excludes {foo, baz} and one
3811                // that excludes {bar, baz}.
3812                for (i, _) in set.iter().enumerate() {
3813                    let fork_allows_group = fork.clone().filter(
3814                        set.iter()
3815                            .cloned()
3816                            .enumerate()
3817                            .map(|(j, group)| if i == j { Ok(group) } else { Err(group) }),
3818                    );
3819                    if let Some(fork_allows_group) = fork_allows_group {
3820                        new.push(fork_allows_group);
3821                    }
3822                }
3823            }
3824            forks = new;
3825        }
3826        Self {
3827            forks,
3828            diverging_packages,
3829        }
3830    }
3831}
3832
3833/// A single fork in a list of dependencies.
3834///
3835/// A fork corresponds to the full list of dependencies for a package,
3836/// but with any conflicting dependency specifications omitted. For
3837/// example, if we have `a<2 ; sys_platform == 'foo'` and `a>=2 ;
3838/// sys_platform == 'bar'`, then because the dependency specifications
3839/// have the same name and because the marker expressions are disjoint,
3840/// a fork occurs. One fork will contain `a<2` but not `a>=2`, while
3841/// the other fork will contain `a>=2` but not `a<2`.
3842#[derive(Clone, Debug)]
3843struct Fork {
3844    /// The list of dependencies for this fork, guaranteed to be conflict
3845    /// free. (i.e., There are no two packages with the same name with
3846    /// non-overlapping marker expressions.)
3847    ///
3848    /// Note that callers shouldn't mutate this sequence directly. Instead,
3849    /// they should use `add_forked_package` or `add_nonfork_package`. Namely,
3850    /// it should be impossible for a package with a marker expression that is
3851    /// disjoint from the marker expression on this fork to be added.
3852    dependencies: Vec<PubGrubDependency>,
3853    /// The conflicting groups in this fork.
3854    ///
3855    /// This exists to make some access patterns more efficient. Namely,
3856    /// it makes it easy to check whether there's a dependency with a
3857    /// particular conflicting group in this fork.
3858    conflicts: crate::FxHashbrownSet<ConflictItem>,
3859    /// The resolver environment for this fork.
3860    ///
3861    /// Principally, this corresponds to the markers in this for. So in the
3862    /// example above, the `a<2` fork would have `sys_platform == 'foo'`, while
3863    /// the `a>=2` fork would have `sys_platform == 'bar'`.
3864    ///
3865    /// If this fork was generated from another fork, then this *includes*
3866    /// the criteria from its parent. i.e., Its marker expression represents
3867    /// the intersection of the marker expression from its parent and any
3868    /// additional marker expression generated by addition forking based on
3869    /// conflicting dependency specifications.
3870    env: ResolverEnvironment,
3871}
3872
3873impl Fork {
3874    /// Create a new fork with no dependencies with the given resolver
3875    /// environment.
3876    fn new(env: ResolverEnvironment) -> Self {
3877        Self {
3878            dependencies: vec![],
3879            conflicts: crate::FxHashbrownSet::default(),
3880            env,
3881        }
3882    }
3883
3884    /// Add a dependency to this fork.
3885    fn add_dependency(&mut self, dep: PubGrubDependency) {
3886        if let Some(conflicting_item) = dep.conflicting_item() {
3887            self.conflicts.insert(conflicting_item.to_owned());
3888        }
3889        self.dependencies.push(dep);
3890    }
3891
3892    /// Sets the resolver environment to the one given.
3893    ///
3894    /// Any dependency in this fork that does not satisfy the given environment
3895    /// is removed.
3896    fn set_env(&mut self, env: ResolverEnvironment) {
3897        self.env = env;
3898        self.dependencies.retain(|dep| {
3899            let marker = dep.package.marker();
3900            if self.env.included_by_marker(marker) {
3901                return true;
3902            }
3903            if let Some(conflicting_item) = dep.conflicting_item() {
3904                self.conflicts.remove(&conflicting_item);
3905            }
3906            false
3907        });
3908    }
3909
3910    /// Returns true if any of the dependencies in this fork contain a
3911    /// dependency with the given package and extra values.
3912    fn contains_conflicting_item(&self, item: ConflictItemRef<'_>) -> bool {
3913        self.conflicts.contains(&item)
3914    }
3915
3916    /// Include or Exclude the given groups from this fork.
3917    ///
3918    /// This removes all dependencies matching the given conflicting groups.
3919    ///
3920    /// If the exclusion rules would result in a fork with an unsatisfiable
3921    /// resolver environment, then this returns `None`.
3922    fn filter(
3923        mut self,
3924        rules: impl IntoIterator<Item = Result<ConflictItem, ConflictItem>>,
3925    ) -> Option<Self> {
3926        self.env = self.env.filter_by_group(rules)?;
3927        self.dependencies.retain(|dep| {
3928            let Some(conflicting_item) = dep.conflicting_item() else {
3929                return true;
3930            };
3931            if self.env.included_by_group(conflicting_item) {
3932                return true;
3933            }
3934            match conflicting_item.kind() {
3935                // We should not filter entire projects unless they're a top-level dependency
3936                // Otherwise, we'll fail to solve for children of the project, like extras
3937                ConflictKindRef::Project => {
3938                    if dep.parent.is_some() {
3939                        return true;
3940                    }
3941                }
3942                ConflictKindRef::Group(_) => {}
3943                ConflictKindRef::Extra(_) => {}
3944            }
3945            self.conflicts.remove(&conflicting_item);
3946            false
3947        });
3948        Some(self)
3949    }
3950
3951    /// Compare forks, preferring forks with g `requires-python` requirements.
3952    fn cmp_requires_python(&self, other: &Self) -> Ordering {
3953        // A higher `requires-python` requirement indicates a _higher-priority_ fork.
3954        //
3955        // This ordering ensures that we prefer choosing the highest version for each fork based on
3956        // its `requires-python` requirement.
3957        //
3958        // The reverse would prefer choosing fewer versions, at the cost of using older package
3959        // versions on newer Python versions. For example, if reversed, we'd prefer to solve `<3.7
3960        // before solving `>=3.7`, since the resolution produced by the former might work for the
3961        // latter, but the inverse is unlikely to be true.
3962        let self_bound = self.env.requires_python().unwrap_or_default();
3963        let other_bound = other.env.requires_python().unwrap_or_default();
3964        self_bound.lower().cmp(other_bound.lower())
3965    }
3966
3967    /// Compare forks, preferring forks with upper bounds.
3968    fn cmp_upper_bounds(&self, other: &Self) -> Ordering {
3969        // We'd prefer to solve `numpy <= 2` before solving `numpy >= 1`, since the resolution
3970        // produced by the former might work for the latter, but the inverse is unlikely to be true
3971        // due to maximum version selection. (Selecting `numpy==2.0.0` would satisfy both forks, but
3972        // selecting the latest `numpy` would not.)
3973        let self_upper_bounds = self
3974            .dependencies
3975            .iter()
3976            .filter(|dep| {
3977                dep.version
3978                    .bounding_range()
3979                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
3980            })
3981            .count();
3982        let other_upper_bounds = other
3983            .dependencies
3984            .iter()
3985            .filter(|dep| {
3986                dep.version
3987                    .bounding_range()
3988                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
3989            })
3990            .count();
3991
3992        self_upper_bounds.cmp(&other_upper_bounds)
3993    }
3994}
3995
3996impl Eq for Fork {}
3997
3998impl PartialEq for Fork {
3999    fn eq(&self, other: &Self) -> bool {
4000        self.dependencies == other.dependencies && self.env == other.env
4001    }
4002}
4003
4004#[derive(Debug, Clone)]
4005pub(crate) struct VersionFork {
4006    /// The environment to use in the fork.
4007    env: ResolverEnvironment,
4008    /// The initial package to select in the fork.
4009    id: Id<PubGrubPackage>,
4010    /// The initial version to set for the selected package in the fork.
4011    version: Option<Version>,
4012}
4013
4014/// Enrich a [`ResolveError`] with additional information about why a given package was included.
4015fn enrich_dependency_error(
4016    error: ResolveError,
4017    id: Id<PubGrubPackage>,
4018    version: &Version,
4019    pubgrub: &State<UvDependencyProvider>,
4020) -> ResolveError {
4021    let Some(name) = pubgrub.package_store[id].name_no_root() else {
4022        return error;
4023    };
4024    let chain = DerivationChainBuilder::from_state(id, version, pubgrub).unwrap_or_default();
4025    ResolveError::Dependencies(Box::new(error), name.clone(), version.clone(), chain)
4026}
4027
4028/// Compute the set of markers for which a package is known to be relevant.
4029fn find_environments(id: Id<PubGrubPackage>, state: &State<UvDependencyProvider>) -> MarkerTree {
4030    let package = &state.package_store[id];
4031    if package.is_root() {
4032        return MarkerTree::TRUE;
4033    }
4034
4035    // Retrieve the incompatibilities for the current package.
4036    let Some(incompatibilities) = state.incompatibilities.get(&id) else {
4037        return MarkerTree::FALSE;
4038    };
4039
4040    // Find all dependencies on the current package.
4041    let mut marker = MarkerTree::FALSE;
4042    for index in incompatibilities {
4043        let incompat = &state.incompatibility_store[*index];
4044        if let Kind::FromDependencyOf(id1, _, id2, _) = &incompat.kind {
4045            if id == *id2 {
4046                marker.or({
4047                    let mut marker = package.marker();
4048                    marker.and(find_environments(*id1, state));
4049                    marker
4050                });
4051            }
4052        }
4053    }
4054    marker
4055}
4056
4057#[derive(Debug, Default, Clone)]
4058struct ConflictTracker {
4059    /// How often a decision on the package was discarded due to another package decided earlier.
4060    affected: FxHashMap<Id<PubGrubPackage>, usize>,
4061    /// Package(s) to be prioritized after the next unit propagation
4062    ///
4063    /// Distilled from `affected` for fast checking in the hot loop.
4064    prioritize: Vec<Id<PubGrubPackage>>,
4065    /// How often a package was decided earlier and caused another package to be discarded.
4066    culprit: FxHashMap<Id<PubGrubPackage>, usize>,
4067    /// Package(s) to be de-prioritized after the next unit propagation
4068    ///
4069    /// Distilled from `culprit` for fast checking in the hot loop.
4070    deprioritize: Vec<Id<PubGrubPackage>>,
4071}