Skip to main content

uv_resolver/resolver/
mod.rs

1//! Given a set of requirements, find a set of compatible packages.
2
3use std::borrow::Cow;
4use std::cmp::Ordering;
5use std::collections::{BTreeMap, BTreeSet, VecDeque};
6use std::fmt::{Display, Formatter, Write};
7use std::ops::Bound;
8use std::sync::Arc;
9use std::time::Instant;
10use std::{iter, slice, thread};
11
12use dashmap::DashMap;
13use either::Either;
14use futures::{FutureExt, StreamExt};
15use itertools::Itertools;
16use pubgrub::{Id, IncompId, Incompatibility, Kind, Range, Ranges, State};
17use rustc_hash::{FxHashMap, FxHashSet};
18use tokio::sync::mpsc::{self, Receiver, Sender};
19use tokio::sync::oneshot;
20use tokio_stream::wrappers::ReceiverStream;
21use tracing::{Level, debug, info, instrument, trace, warn};
22
23use uv_configuration::{Constraints, Excludes, Overrides};
24use uv_distribution::{ArchiveMetadata, DistributionDatabase};
25use uv_distribution_types::{
26    BuiltDist, CompatibleDist, DerivationChain, Dist, DistErrorKind, Identifier, IncompatibleDist,
27    IncompatibleSource, IncompatibleWheel, IndexCapabilities, IndexLocations, IndexMetadata,
28    IndexUrl, InstalledDist, Name, PythonRequirementKind, RemoteSource, Requirement, ResolvedDist,
29    ResolvedDistRef, SourceDist, VersionOrUrlRef, implied_markers,
30};
31use uv_git::GitResolver;
32use uv_normalize::{ExtraName, GroupName, PackageName};
33use uv_pep440::{MIN_VERSION, Version, VersionSpecifiers, release_specifiers_to_ranges};
34use uv_pep508::{
35    MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree, MarkerValueString,
36};
37use uv_platform_tags::{IncompatibleTag, Tags};
38use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKindRef, Conflicts, VerbatimParsedUrl};
39use uv_torch::TorchStrategy;
40use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
41use uv_warnings::warn_user_once;
42
43use crate::candidate_selector::{Candidate, CandidateDist, CandidateSelector};
44use crate::dependency_provider::UvDependencyProvider;
45use crate::error::{NoSolutionError, ResolveError};
46use crate::fork_indexes::ForkIndexes;
47use crate::fork_strategy::ForkStrategy;
48use crate::fork_urls::ForkUrls;
49use crate::manifest::Manifest;
50use crate::pins::FilePins;
51use crate::preferences::{PreferenceSource, Preferences};
52use crate::pubgrub::{
53    PubGrubDependency, PubGrubPackage, PubGrubPackageInner, PubGrubPriorities, PubGrubPython,
54};
55use crate::python_requirement::PythonRequirement;
56use crate::resolution::ResolverOutput;
57use crate::resolution_mode::ResolutionStrategy;
58pub(crate) use crate::resolver::availability::{
59    ResolverVersion, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
60    UnavailableVersion,
61};
62use crate::resolver::batch_prefetch::BatchPrefetcher;
63pub use crate::resolver::derivation::DerivationChainBuilder;
64pub use crate::resolver::environment::ResolverEnvironment;
65use crate::resolver::environment::{
66    ForkingPossibility, fork_version_by_marker, fork_version_by_python_requirement,
67};
68pub(crate) use crate::resolver::fork_map::{ForkMap, ForkSet};
69pub use crate::resolver::index::InMemoryIndex;
70use crate::resolver::indexes::Indexes;
71pub use crate::resolver::provider::{
72    DefaultResolverProvider, MetadataResponse, PackageVersionsResult, ResolverProvider,
73    VersionsResponse, WheelMetadataResult,
74};
75pub use crate::resolver::reporter::{BuildId, Reporter};
76use crate::resolver::system::SystemDependency;
77pub(crate) use crate::resolver::urls::Urls;
78use crate::universal_marker::{ConflictMarker, UniversalMarker};
79use crate::yanks::AllowedYanks;
80use crate::{
81    DependencyMode, ExcludeNewer, Exclusions, FlatIndex, Options, ResolutionMode, VersionMap,
82    marker,
83};
84pub(crate) use provider::MetadataUnavailable;
85
86mod availability;
87mod batch_prefetch;
88mod derivation;
89mod environment;
90mod fork_map;
91mod index;
92mod indexes;
93mod provider;
94mod reporter;
95mod system;
96mod urls;
97
98/// The number of conflicts a package may accumulate before we re-prioritize and backtrack.
99const CONFLICT_THRESHOLD: usize = 5;
100
101pub struct Resolver<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
102    state: ResolverState<InstalledPackages>,
103    provider: Provider,
104}
105
106/// State that is shared between the prefetcher and the PubGrub solver during
107/// resolution, across all forks.
108struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
109    project: Option<PackageName>,
110    requirements: Vec<Requirement>,
111    constraints: Constraints,
112    overrides: Overrides,
113    excludes: Excludes,
114    preferences: Preferences,
115    git: GitResolver,
116    capabilities: IndexCapabilities,
117    locations: IndexLocations,
118    exclusions: Exclusions,
119    urls: Urls,
120    indexes: Indexes,
121    dependency_mode: DependencyMode,
122    hasher: HashStrategy,
123    env: ResolverEnvironment,
124    // The environment of the current Python interpreter.
125    current_environment: MarkerEnvironment,
126    tags: Option<Tags>,
127    python_requirement: PythonRequirement,
128    conflicts: Conflicts,
129    workspace_members: BTreeSet<PackageName>,
130    selector: CandidateSelector,
131    index: InMemoryIndex,
132    installed_packages: InstalledPackages,
133    /// Incompatibilities for packages that are entirely unavailable.
134    unavailable_packages: DashMap<PackageName, UnavailablePackage>,
135    /// Incompatibilities for packages that are unavailable at specific versions.
136    incomplete_packages: DashMap<PackageName, DashMap<Version, MetadataUnavailable>>,
137    /// The options that were used to configure this resolver.
138    options: Options,
139    /// The reporter to use for this resolver.
140    reporter: Option<Arc<dyn Reporter>>,
141}
142
143impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
144    Resolver<DefaultResolverProvider<'a, Context>, InstalledPackages>
145{
146    /// Initialize a new resolver using the default backend doing real requests.
147    ///
148    /// Reads the flat index entries.
149    ///
150    /// # Marker environment
151    ///
152    /// The marker environment is optional.
153    ///
154    /// When a marker environment is not provided, the resolver is said to be
155    /// in "universal" mode. When in universal mode, the resolution produced
156    /// may contain multiple versions of the same package. And thus, in order
157    /// to use the resulting resolution, there must be a "universal"-aware
158    /// reader of the resolution that knows to exclude distributions that can't
159    /// be used in the current environment.
160    ///
161    /// When a marker environment is provided, the resolver is in
162    /// "non-universal" mode, which corresponds to standard `pip` behavior that
163    /// works only for a specific marker environment.
164    pub fn new(
165        manifest: Manifest,
166        options: Options,
167        python_requirement: &'a PythonRequirement,
168        env: ResolverEnvironment,
169        current_environment: &MarkerEnvironment,
170        conflicts: Conflicts,
171        tags: Option<&'a Tags>,
172        flat_index: &'a FlatIndex,
173        index: &'a InMemoryIndex,
174        hasher: &'a HashStrategy,
175        build_context: &'a Context,
176        installed_packages: InstalledPackages,
177        database: DistributionDatabase<'a, Context>,
178    ) -> Result<Self, ResolveError> {
179        let provider = DefaultResolverProvider::new(
180            database,
181            flat_index,
182            tags,
183            python_requirement.target(),
184            AllowedYanks::from_manifest(&manifest, &env, options.dependency_mode),
185            hasher,
186            options.exclude_newer.clone(),
187            build_context.build_options(),
188            build_context.capabilities(),
189        );
190
191        Self::new_custom_io(
192            manifest,
193            options,
194            hasher,
195            env,
196            current_environment,
197            tags.cloned(),
198            python_requirement,
199            conflicts,
200            index,
201            build_context.git(),
202            build_context.capabilities(),
203            build_context.locations(),
204            provider,
205            installed_packages,
206        )
207    }
208}
209
210impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
211    Resolver<Provider, InstalledPackages>
212{
213    /// Initialize a new resolver using a user provided backend.
214    pub fn new_custom_io(
215        manifest: Manifest,
216        options: Options,
217        hasher: &HashStrategy,
218        env: ResolverEnvironment,
219        current_environment: &MarkerEnvironment,
220        tags: Option<Tags>,
221        python_requirement: &PythonRequirement,
222        conflicts: Conflicts,
223        index: &InMemoryIndex,
224        git: &GitResolver,
225        capabilities: &IndexCapabilities,
226        locations: &IndexLocations,
227        provider: Provider,
228        installed_packages: InstalledPackages,
229    ) -> Result<Self, ResolveError> {
230        let state = ResolverState {
231            index: index.clone(),
232            git: git.clone(),
233            capabilities: capabilities.clone(),
234            selector: CandidateSelector::for_resolution(&options, &manifest, &env),
235            dependency_mode: options.dependency_mode,
236            urls: Urls::from_manifest(&manifest, &env, git, options.dependency_mode),
237            indexes: Indexes::from_manifest(&manifest, &env, options.dependency_mode),
238            project: manifest.project,
239            workspace_members: manifest.workspace_members,
240            requirements: manifest.requirements,
241            constraints: manifest.constraints,
242            overrides: manifest.overrides,
243            excludes: manifest.excludes,
244            preferences: manifest.preferences,
245            exclusions: manifest.exclusions,
246            hasher: hasher.clone(),
247            locations: locations.clone(),
248            env,
249            current_environment: current_environment.clone(),
250            tags,
251            python_requirement: python_requirement.clone(),
252            conflicts,
253            installed_packages,
254            unavailable_packages: DashMap::default(),
255            incomplete_packages: DashMap::default(),
256            options,
257            reporter: None,
258        };
259        Ok(Self { state, provider })
260    }
261
262    /// Set the [`Reporter`] to use for this installer.
263    #[must_use]
264    pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
265        Self {
266            state: ResolverState {
267                reporter: Some(reporter.clone()),
268                ..self.state
269            },
270            provider: self
271                .provider
272                .with_reporter(reporter.into_distribution_reporter()),
273        }
274    }
275
276    /// Resolve a set of requirements into a set of pinned versions.
277    pub async fn resolve(self) -> Result<ResolverOutput, ResolveError> {
278        let state = Arc::new(self.state);
279        let provider = Arc::new(self.provider);
280
281        // A channel to fetch package metadata (e.g., given `flask`, fetch all versions) and version
282        // metadata (e.g., given `flask==1.0.0`, fetch the metadata for that version).
283        // Channel size is set large to accommodate batch prefetching.
284        let (request_sink, request_stream) = mpsc::channel(300);
285
286        // Run the fetcher.
287        let requests_fut = state.clone().fetch(provider.clone(), request_stream).fuse();
288
289        // Spawn the PubGrub solver on a dedicated thread.
290        let solver = state.clone();
291        let (tx, rx) = oneshot::channel();
292        thread::Builder::new()
293            .name("uv-resolver".into())
294            .spawn(move || {
295                let result = solver.solve(&request_sink);
296
297                // This may fail if the main thread returned early due to an error.
298                let _ = tx.send(result);
299            })
300            .unwrap();
301
302        let resolve_fut = async move { rx.await.map_err(|_| ResolveError::ChannelClosed) };
303
304        // Wait for both to complete.
305        let ((), resolution) = tokio::try_join!(requests_fut, resolve_fut)?;
306
307        state.on_complete();
308        resolution
309    }
310}
311
312impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackages> {
313    #[instrument(skip_all)]
314    fn solve(
315        self: Arc<Self>,
316        request_sink: &Sender<Request>,
317    ) -> Result<ResolverOutput, ResolveError> {
318        debug!(
319            "Solving with installed Python version: {}",
320            self.python_requirement.exact()
321        );
322        debug!(
323            "Solving with target Python version: {}",
324            self.python_requirement.target()
325        );
326        if !self.options.exclude_newer.is_empty() {
327            debug!("Solving with exclude-newer: {}", self.options.exclude_newer);
328        }
329
330        let mut visited = FxHashSet::default();
331
332        let root = PubGrubPackage::from(PubGrubPackageInner::Root(self.project.clone()));
333        let pubgrub = State::init(root.clone(), MIN_VERSION.clone());
334        let prefetcher = BatchPrefetcher::new(
335            self.capabilities.clone(),
336            self.index.clone(),
337            request_sink.clone(),
338        );
339        let state = ForkState::new(
340            pubgrub,
341            self.env.clone(),
342            self.python_requirement.clone(),
343            prefetcher,
344        );
345        let mut preferences = self.preferences.clone();
346        let mut forked_states = self.env.initial_forked_states(state)?;
347        let mut resolutions = vec![];
348
349        'FORK: while let Some(mut state) = forked_states.pop() {
350            if let Some(split) = state.env.end_user_fork_display() {
351                let requires_python = state.python_requirement.target();
352                debug!("Solving {split} (requires-python: {requires_python:?})");
353            }
354            let start = Instant::now();
355            loop {
356                let highest_priority_pkg =
357                    if let Some(initial) = state.initial_id.take() {
358                        // If we just forked based on `requires-python`, we can skip unit
359                        // propagation, since we already propagated the package that initiated
360                        // the fork.
361                        initial
362                    } else {
363                        // Run unit propagation.
364                        let result = state.pubgrub.unit_propagation(state.next);
365                        match result {
366                            Err(err) => {
367                                // If unit propagation failed, there is no solution.
368                                return Err(self.convert_no_solution_err(
369                                    err,
370                                    state.fork_urls,
371                                    state.fork_indexes,
372                                    state.env,
373                                    self.current_environment.clone(),
374                                    Some(&self.options.exclude_newer),
375                                    &visited,
376                                ));
377                            }
378                            Ok(conflicts) => {
379                                for (affected, incompatibility) in conflicts {
380                                    // Conflict tracking: If there was a conflict, track affected and
381                                    // culprit for all root cause incompatibilities
382                                    state.record_conflict(affected, None, incompatibility);
383                                }
384                            }
385                        }
386
387                        // Pre-visit all candidate packages, to allow metadata to be fetched in parallel.
388                        if self.dependency_mode.is_transitive() {
389                            Self::pre_visit(
390                                state
391                                    .pubgrub
392                                    .partial_solution
393                                    .prioritized_packages()
394                                    .map(|(id, range)| (&state.pubgrub.package_store[id], range)),
395                                &self.urls,
396                                &self.indexes,
397                                &state.python_requirement,
398                                request_sink,
399                            )?;
400                        }
401
402                        Self::reprioritize_conflicts(&mut state);
403
404                        trace!(
405                            "Assigned packages: {}",
406                            state
407                                .pubgrub
408                                .partial_solution
409                                .extract_solution()
410                                .filter(|(p, _)| !state.pubgrub.package_store[*p].is_proxy())
411                                .map(|(p, v)| format!("{}=={}", state.pubgrub.package_store[p], v))
412                                .join(", ")
413                        );
414                        // Choose a package.
415                        // We aren't allowed to use the term intersection as it would extend the
416                        // mutable borrow of `state`.
417                        let Some((highest_priority_pkg, _)) =
418                            state.pubgrub.partial_solution.pick_highest_priority_pkg(
419                                |id, _range| state.priorities.get(&state.pubgrub.package_store[id]),
420                            )
421                        else {
422                            // All packages have been assigned, the fork has been successfully resolved
423                            if tracing::enabled!(Level::DEBUG) {
424                                state.prefetcher.log_tried_versions();
425                            }
426                            debug!(
427                                "{} resolution took {:.3}s",
428                                state.env,
429                                start.elapsed().as_secs_f32()
430                            );
431
432                            let resolution = state.into_resolution();
433
434                            // Walk over the selected versions, and mark them as preferences. We have to
435                            // add forks back as to not override the preferences from the lockfile for
436                            // the next fork
437                            //
438                            // If we're using a resolution mode that varies based on whether a dependency is
439                            // direct or transitive, skip preferences, as we risk adding a preference from
440                            // one fork (in which it's a transitive dependency) to another fork (in which
441                            // it's direct).
442                            if matches!(
443                                self.options.resolution_mode,
444                                ResolutionMode::Lowest | ResolutionMode::Highest
445                            ) {
446                                for (package, version) in &resolution.nodes {
447                                    preferences.insert(
448                                        package.name.clone(),
449                                        package.index.clone(),
450                                        resolution
451                                            .env
452                                            .try_universal_markers()
453                                            .unwrap_or(UniversalMarker::TRUE),
454                                        version.clone(),
455                                        PreferenceSource::Resolver,
456                                    );
457                                }
458                            }
459
460                            resolutions.push(resolution);
461                            continue 'FORK;
462                        };
463                        trace!(
464                            "Chose package for decision: {}. remaining choices: {}",
465                            state.pubgrub.package_store[highest_priority_pkg],
466                            state
467                                .pubgrub
468                                .partial_solution
469                                .undecided_packages()
470                                .filter(|(p, _)| !state.pubgrub.package_store[**p].is_proxy())
471                                .map(|(p, _)| state.pubgrub.package_store[*p].to_string())
472                                .join(", ")
473                        );
474
475                        highest_priority_pkg
476                    };
477
478                state.next = highest_priority_pkg;
479
480                // TODO(charlie): Remove as many usages of `next_package` as we can.
481                let next_id = state.next;
482                let next_package = &state.pubgrub.package_store[state.next];
483
484                let url = next_package
485                    .name()
486                    .and_then(|name| state.fork_urls.get(name));
487                let index = next_package
488                    .name()
489                    .and_then(|name| state.fork_indexes.get(name));
490
491                // Consider:
492                // ```toml
493                // dependencies = [
494                //   "iniconfig == 1.1.1 ; python_version < '3.12'",
495                //   "iniconfig @ https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl ; python_version >= '3.12'",
496                // ]
497                // ```
498                // In the `python_version < '3.12'` case, we haven't pre-visited `iniconfig` yet,
499                // since we weren't sure whether it might also be a URL requirement when
500                // transforming the requirements. For that case, we do another request here
501                // (idempotent due to caching).
502                self.request_package(next_package, url, index, request_sink)?;
503
504                let version = if let Some(version) = state.initial_version.take() {
505                    // If we just forked based on platform support, we can skip version selection,
506                    // since the fork operation itself already selected the appropriate version for
507                    // the platform.
508                    version
509                } else {
510                    let term_intersection = state
511                        .pubgrub
512                        .partial_solution
513                        .term_intersection_for_package(next_id)
514                        .expect("a package was chosen but we don't have a term");
515                    let decision = self.choose_version(
516                        next_package,
517                        next_id,
518                        index.map(IndexMetadata::url),
519                        term_intersection.unwrap_positive(),
520                        &mut state.pins,
521                        &preferences,
522                        &state.fork_urls,
523                        &state.env,
524                        &state.python_requirement,
525                        &state.pubgrub,
526                        &mut visited,
527                        request_sink,
528                    )?;
529
530                    // Pick the next compatible version.
531                    let Some(version) = decision else {
532                        debug!("No compatible version found for: {next_package}");
533
534                        let term_intersection = state
535                            .pubgrub
536                            .partial_solution
537                            .term_intersection_for_package(next_id)
538                            .expect("a package was chosen but we don't have a term");
539
540                        if let PubGrubPackageInner::Package { name, .. } = &**next_package {
541                            // Check if the decision was due to the package being unavailable
542                            if let Some(entry) = self.unavailable_packages.get(name) {
543                                state
544                                    .pubgrub
545                                    .add_incompatibility(Incompatibility::custom_term(
546                                        next_id,
547                                        term_intersection.clone(),
548                                        UnavailableReason::Package(entry.clone()),
549                                    ));
550                                continue;
551                            }
552                        }
553
554                        state
555                            .pubgrub
556                            .add_incompatibility(Incompatibility::no_versions(
557                                next_id,
558                                term_intersection.clone(),
559                            ));
560                        continue;
561                    };
562
563                    let version = match version {
564                        ResolverVersion::Unforked(version) => version,
565                        ResolverVersion::Forked(forks) => {
566                            forked_states.extend(self.version_forks_to_fork_states(state, forks));
567                            continue 'FORK;
568                        }
569                        ResolverVersion::Unavailable(version, reason) => {
570                            state.add_unavailable_version(version, reason);
571                            continue;
572                        }
573                    };
574
575                    // Only consider registry packages for prefetch.
576                    if url.is_none() {
577                        state.prefetcher.prefetch_batches(
578                            next_package,
579                            index,
580                            &version,
581                            term_intersection.unwrap_positive(),
582                            state
583                                .pubgrub
584                                .partial_solution
585                                .unchanging_term_for_package(next_id),
586                            &state.python_requirement,
587                            &self.selector,
588                            &state.env,
589                        )?;
590                    }
591
592                    version
593                };
594
595                state.prefetcher.version_tried(next_package, &version);
596
597                self.on_progress(next_package, &version);
598
599                if !state
600                    .added_dependencies
601                    .entry(next_id)
602                    .or_default()
603                    .insert(version.clone())
604                {
605                    // `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
606                    // terms and can add the decision directly.
607                    state
608                        .pubgrub
609                        .partial_solution
610                        .add_decision(next_id, version);
611                    continue;
612                }
613
614                // Retrieve that package dependencies.
615                let forked_deps = self.get_dependencies_forking(
616                    next_id,
617                    next_package,
618                    &version,
619                    &state.pins,
620                    &state.fork_urls,
621                    &state.env,
622                    &state.python_requirement,
623                    &state.pubgrub,
624                )?;
625
626                match forked_deps {
627                    ForkedDependencies::Unavailable(reason) => {
628                        // Then here, if we get a reason that we consider unrecoverable, we should
629                        // show the derivation chain.
630                        state
631                            .pubgrub
632                            .add_incompatibility(Incompatibility::custom_version(
633                                next_id,
634                                version.clone(),
635                                UnavailableReason::Version(reason),
636                            ));
637                    }
638                    ForkedDependencies::Unforked(dependencies) => {
639                        // Enrich the state with any URLs, etc.
640                        state
641                            .visit_package_version_dependencies(
642                                next_id,
643                                &version,
644                                &self.urls,
645                                &self.indexes,
646                                &dependencies,
647                                &self.git,
648                                &self.workspace_members,
649                                self.selector.resolution_strategy(),
650                            )
651                            .map_err(|err| {
652                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
653                            })?;
654
655                        // Emit a request to fetch the metadata for each registry package.
656                        self.visit_dependencies(&dependencies, &state, request_sink)
657                            .map_err(|err| {
658                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
659                            })?;
660
661                        // Add the dependencies to the state.
662                        state.add_package_version_dependencies(next_id, &version, dependencies);
663                    }
664                    ForkedDependencies::Forked {
665                        mut forks,
666                        diverging_packages,
667                    } => {
668                        debug!(
669                            "Pre-fork {} took {:.3}s",
670                            state.env,
671                            start.elapsed().as_secs_f32()
672                        );
673
674                        // Prioritize the forks.
675                        match (self.options.fork_strategy, self.options.resolution_mode) {
676                            (ForkStrategy::Fewest, _) | (_, ResolutionMode::Lowest) => {
677                                // Prefer solving forks with lower Python bounds, since they're more
678                                // likely to produce solutions that work for forks with higher
679                                // Python bounds (whereas the inverse is not true).
680                                forks.sort_by(|a, b| {
681                                    a.cmp_requires_python(b)
682                                        .reverse()
683                                        .then_with(|| a.cmp_upper_bounds(b))
684                                });
685                            }
686                            (ForkStrategy::RequiresPython, _) => {
687                                // Otherwise, prefer solving forks with higher Python bounds, since
688                                // we want to prioritize choosing the latest-compatible package
689                                // version for each Python version.
690                                forks.sort_by(|a, b| {
691                                    a.cmp_requires_python(b).then_with(|| a.cmp_upper_bounds(b))
692                                });
693                            }
694                        }
695
696                        for new_fork_state in self.forks_to_fork_states(
697                            state,
698                            &version,
699                            forks,
700                            request_sink,
701                            &diverging_packages,
702                        ) {
703                            forked_states.push(new_fork_state?);
704                        }
705                        continue 'FORK;
706                    }
707                }
708            }
709        }
710        if resolutions.len() > 1 {
711            info!(
712                "Solved your requirements for {} environments",
713                resolutions.len()
714            );
715        }
716        if tracing::enabled!(Level::DEBUG) {
717            for resolution in &resolutions {
718                if let Some(env) = resolution.env.end_user_fork_display() {
719                    let packages: FxHashSet<_> = resolution
720                        .nodes
721                        .keys()
722                        .map(|package| &package.name)
723                        .collect();
724                    debug!(
725                        "Distinct solution for {env} with {} package(s)",
726                        packages.len()
727                    );
728                }
729            }
730        }
731        for resolution in &resolutions {
732            Self::trace_resolution(resolution);
733        }
734        ResolverOutput::from_state(
735            &resolutions,
736            &self.requirements,
737            &self.constraints,
738            &self.overrides,
739            &self.preferences,
740            &self.index,
741            &self.git,
742            &self.python_requirement,
743            &self.conflicts,
744            self.selector.resolution_strategy(),
745            self.options.clone(),
746        )
747    }
748
749    /// Change the priority of often conflicting packages and backtrack.
750    ///
751    /// To be called after unit propagation.
752    fn reprioritize_conflicts(state: &mut ForkState) {
753        for package in state.conflict_tracker.prioritize.drain(..) {
754            let changed = state
755                .priorities
756                .mark_conflict_early(&state.pubgrub.package_store[package]);
757            if changed {
758                debug!(
759                    "Package {} has too many conflicts (affected), prioritizing",
760                    &state.pubgrub.package_store[package]
761                );
762            } else {
763                debug!(
764                    "Package {} has too many conflicts (affected), already {:?}",
765                    state.pubgrub.package_store[package],
766                    state.priorities.get(&state.pubgrub.package_store[package])
767                );
768            }
769        }
770
771        for package in state.conflict_tracker.deprioritize.drain(..) {
772            let changed = state
773                .priorities
774                .mark_conflict_late(&state.pubgrub.package_store[package]);
775            if changed {
776                debug!(
777                    "Package {} has too many conflicts (culprit), deprioritizing and backtracking",
778                    state.pubgrub.package_store[package],
779                );
780                let backtrack_level = state.pubgrub.backtrack_package(package);
781                if let Some(backtrack_level) = backtrack_level {
782                    debug!("Backtracked {backtrack_level} decisions");
783                } else {
784                    debug!(
785                        "Package {} is not decided, cannot backtrack",
786                        state.pubgrub.package_store[package]
787                    );
788                }
789            } else {
790                debug!(
791                    "Package {} has too many conflicts (culprit), already {:?}",
792                    state.pubgrub.package_store[package],
793                    state.priorities.get(&state.pubgrub.package_store[package])
794                );
795            }
796        }
797    }
798
799    /// When trace level logging is enabled, we dump the final
800    /// set of resolutions, including markers, to help with
801    /// debugging. Namely, this tells use precisely the state
802    /// emitted by the resolver before going off to construct a
803    /// resolution graph.
804    fn trace_resolution(combined: &Resolution) {
805        if !tracing::enabled!(Level::TRACE) {
806            return;
807        }
808        trace!("Resolution: {:?}", combined.env);
809        for edge in &combined.edges {
810            trace!(
811                "Resolution edge: {} -> {}",
812                edge.from
813                    .as_ref()
814                    .map(PackageName::as_str)
815                    .unwrap_or("ROOT"),
816                edge.to,
817            );
818            // The unwraps below are OK because `write`ing to
819            // a String can never fail (except for OOM).
820            let mut msg = String::new();
821            write!(msg, "{}", edge.from_version).unwrap();
822            if let Some(ref extra) = edge.from_extra {
823                write!(msg, " (extra: {extra})").unwrap();
824            }
825            if let Some(ref dev) = edge.from_group {
826                write!(msg, " (group: {dev})").unwrap();
827            }
828
829            write!(msg, " -> ").unwrap();
830
831            write!(msg, "{}", edge.to_version).unwrap();
832            if let Some(ref extra) = edge.to_extra {
833                write!(msg, " (extra: {extra})").unwrap();
834            }
835            if let Some(ref dev) = edge.to_group {
836                write!(msg, " (group: {dev})").unwrap();
837            }
838            if let Some(marker) = edge.marker.contents() {
839                write!(msg, " ; {marker}").unwrap();
840            }
841            trace!("Resolution edge:     {msg}");
842        }
843    }
844
845    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
846    fn forks_to_fork_states<'a>(
847        &'a self,
848        current_state: ForkState,
849        version: &'a Version,
850        forks: Vec<Fork>,
851        request_sink: &'a Sender<Request>,
852        diverging_packages: &'a [PackageName],
853    ) -> impl Iterator<Item = Result<ForkState, ResolveError>> + 'a {
854        debug!(
855            "Splitting resolution on {}=={} over {} into {} resolution{} with separate markers",
856            current_state.pubgrub.package_store[current_state.next],
857            version,
858            diverging_packages
859                .iter()
860                .map(ToString::to_string)
861                .join(", "),
862            forks.len(),
863            if forks.len() == 1 { "" } else { "s" }
864        );
865        assert!(forks.len() >= 2);
866        // This is a somewhat tortured technique to ensure
867        // that our resolver state is only cloned as much
868        // as it needs to be. We basically move the state
869        // into `forked_states`, and then only clone it if
870        // there is at least one more fork to visit.
871        let package = current_state.next;
872        let mut cur_state = Some(current_state);
873        let forks_len = forks.len();
874        forks
875            .into_iter()
876            .enumerate()
877            .map(move |(i, fork)| {
878                let is_last = i == forks_len - 1;
879                let forked_state = cur_state.take().unwrap();
880                if !is_last {
881                    cur_state = Some(forked_state.clone());
882                }
883
884                let env = fork.env.clone();
885                (fork, forked_state.with_env(env))
886            })
887            .map(move |(fork, mut forked_state)| {
888                // Enrich the state with any URLs, etc.
889                forked_state
890                    .visit_package_version_dependencies(
891                        package,
892                        version,
893                        &self.urls,
894                        &self.indexes,
895                        &fork.dependencies,
896                        &self.git,
897                        &self.workspace_members,
898                        self.selector.resolution_strategy(),
899                    )
900                    .map_err(|err| {
901                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
902                    })?;
903
904                // Emit a request to fetch the metadata for each registry package.
905                self.visit_dependencies(&fork.dependencies, &forked_state, request_sink)
906                    .map_err(|err| {
907                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
908                    })?;
909
910                // Add the dependencies to the state.
911                forked_state.add_package_version_dependencies(package, version, fork.dependencies);
912
913                Ok(forked_state)
914            })
915    }
916
917    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
918    #[expect(clippy::unused_self)]
919    fn version_forks_to_fork_states(
920        &self,
921        current_state: ForkState,
922        forks: Vec<VersionFork>,
923    ) -> impl Iterator<Item = ForkState> + '_ {
924        // This is a somewhat tortured technique to ensure
925        // that our resolver state is only cloned as much
926        // as it needs to be. We basically move the state
927        // into `forked_states`, and then only clone it if
928        // there is at least one more fork to visit.
929        let mut cur_state = Some(current_state);
930        let forks_len = forks.len();
931        forks.into_iter().enumerate().map(move |(i, fork)| {
932            let is_last = i == forks_len - 1;
933            let mut forked_state = cur_state.take().unwrap();
934            if !is_last {
935                cur_state = Some(forked_state.clone());
936            }
937            forked_state.initial_id = Some(fork.id);
938            forked_state.initial_version = fork.version;
939            forked_state.with_env(fork.env)
940        })
941    }
942
943    /// Visit a set of [`PubGrubDependency`] entities prior to selection.
944    fn visit_dependencies(
945        &self,
946        dependencies: &[PubGrubDependency],
947        state: &ForkState,
948        request_sink: &Sender<Request>,
949    ) -> Result<(), ResolveError> {
950        for dependency in dependencies {
951            let PubGrubDependency {
952                package,
953                version: _,
954                parent: _,
955                url: _,
956            } = dependency;
957            let url = package.name().and_then(|name| state.fork_urls.get(name));
958            let index = package.name().and_then(|name| state.fork_indexes.get(name));
959            self.visit_package(package, url, index, request_sink)?;
960        }
961        Ok(())
962    }
963
964    /// Visit a [`PubGrubPackage`] prior to selection. This should be called on a [`PubGrubPackage`]
965    /// before it is selected, to allow metadata to be fetched in parallel.
966    fn visit_package(
967        &self,
968        package: &PubGrubPackage,
969        url: Option<&VerbatimParsedUrl>,
970        index: Option<&IndexMetadata>,
971        request_sink: &Sender<Request>,
972    ) -> Result<(), ResolveError> {
973        // Ignore unresolved URL packages, i.e., packages that use a direct URL in some forks.
974        if url.is_none()
975            && package
976                .name()
977                .map(|name| self.urls.any_url(name))
978                .unwrap_or(true)
979        {
980            return Ok(());
981        }
982
983        self.request_package(package, url, index, request_sink)
984    }
985
986    fn request_package(
987        &self,
988        package: &PubGrubPackage,
989        url: Option<&VerbatimParsedUrl>,
990        index: Option<&IndexMetadata>,
991        request_sink: &Sender<Request>,
992    ) -> Result<(), ResolveError> {
993        // Only request real packages.
994        let Some(name) = package.name_no_root() else {
995            return Ok(());
996        };
997
998        if let Some(url) = url {
999            // Verify that the package is allowed under the hash-checking policy.
1000            if !self.hasher.allows_url(&url.verbatim) {
1001                return Err(ResolveError::UnhashedPackage(name.clone()));
1002            }
1003
1004            // Emit a request to fetch the metadata for this distribution.
1005            let dist = Dist::from_url(name.clone(), url.clone())?;
1006            if self.index.distributions().register(dist.distribution_id()) {
1007                request_sink.blocking_send(Request::Dist(dist))?;
1008            }
1009        } else if let Some(index) = index {
1010            // Emit a request to fetch the metadata for this package on the index.
1011            if self
1012                .index
1013                .explicit()
1014                .register((name.clone(), index.url().clone()))
1015            {
1016                request_sink.blocking_send(Request::Package(name.clone(), Some(index.clone())))?;
1017            }
1018        } else {
1019            // Emit a request to fetch the metadata for this package.
1020            if self.index.implicit().register(name.clone()) {
1021                request_sink.blocking_send(Request::Package(name.clone(), None))?;
1022            }
1023        }
1024        Ok(())
1025    }
1026
1027    /// Visit the set of [`PubGrubPackage`] candidates prior to selection. This allows us to fetch
1028    /// metadata for all packages in parallel.
1029    fn pre_visit<'data>(
1030        packages: impl Iterator<Item = (&'data PubGrubPackage, &'data Range<Version>)>,
1031        urls: &Urls,
1032        indexes: &Indexes,
1033        python_requirement: &PythonRequirement,
1034        request_sink: &Sender<Request>,
1035    ) -> Result<(), ResolveError> {
1036        // Iterate over the potential packages, and fetch file metadata for any of them. These
1037        // represent our current best guesses for the versions that we _might_ select.
1038        for (package, range) in packages {
1039            let PubGrubPackageInner::Package {
1040                name,
1041                extra: None,
1042                group: None,
1043                marker: MarkerTree::TRUE,
1044            } = &**package
1045            else {
1046                continue;
1047            };
1048            // Avoid pre-visiting packages that have any URLs in any fork. At this point we can't
1049            // tell whether they are registry distributions or which url they use.
1050            if urls.any_url(name) {
1051                continue;
1052            }
1053            // Avoid visiting packages that may use an explicit index.
1054            if indexes.contains_key(name) {
1055                continue;
1056            }
1057            request_sink.blocking_send(Request::Prefetch(
1058                name.clone(),
1059                range.clone(),
1060                python_requirement.clone(),
1061            ))?;
1062        }
1063        Ok(())
1064    }
1065
1066    /// Given a candidate package, choose the next version in range to try.
1067    ///
1068    /// Returns `None` when there are no versions in the given range, rejecting the current partial
1069    /// solution.
1070    // TODO(konsti): re-enable tracing. This trace is crucial to understanding the
1071    // tracing-durations-export diagrams, but it took ~5% resolver thread runtime for apache-airflow
1072    // when I last measured.
1073    #[cfg_attr(feature = "tracing-durations-export", instrument(skip_all, fields(%package)))]
1074    fn choose_version(
1075        &self,
1076        package: &PubGrubPackage,
1077        id: Id<PubGrubPackage>,
1078        index: Option<&IndexUrl>,
1079        range: &Range<Version>,
1080        pins: &mut FilePins,
1081        preferences: &Preferences,
1082        fork_urls: &ForkUrls,
1083        env: &ResolverEnvironment,
1084        python_requirement: &PythonRequirement,
1085        pubgrub: &State<UvDependencyProvider>,
1086        visited: &mut FxHashSet<PackageName>,
1087        request_sink: &Sender<Request>,
1088    ) -> Result<Option<ResolverVersion>, ResolveError> {
1089        match &**package {
1090            PubGrubPackageInner::Root(_) => {
1091                Ok(Some(ResolverVersion::Unforked(MIN_VERSION.clone())))
1092            }
1093
1094            PubGrubPackageInner::Python(_) => {
1095                // Dependencies on Python are only added when a package is incompatible; as such,
1096                // we don't need to do anything here.
1097                Ok(None)
1098            }
1099
1100            PubGrubPackageInner::System(_) => {
1101                // We don't care what the actual version is here, just that it's consistent across
1102                // the dependency graph.
1103                let Some(version) = range.as_singleton() else {
1104                    return Ok(None);
1105                };
1106                Ok(Some(ResolverVersion::Unforked(version.clone())))
1107            }
1108
1109            PubGrubPackageInner::Marker { name, .. }
1110            | PubGrubPackageInner::Extra { name, .. }
1111            | PubGrubPackageInner::Group { name, .. }
1112            | PubGrubPackageInner::Package { name, .. } => {
1113                if let Some(url) = package.name().and_then(|name| fork_urls.get(name)) {
1114                    self.choose_version_url(id, name, range, url, env, python_requirement, pubgrub)
1115                } else {
1116                    self.choose_version_registry(
1117                        package,
1118                        id,
1119                        name,
1120                        index,
1121                        range,
1122                        preferences,
1123                        env,
1124                        python_requirement,
1125                        pubgrub,
1126                        pins,
1127                        visited,
1128                        request_sink,
1129                    )
1130                }
1131            }
1132        }
1133    }
1134
1135    /// Select a version for a URL requirement. Since there is only one version per URL, we return
1136    /// that version if it is in range and `None` otherwise.
1137    fn choose_version_url(
1138        &self,
1139        id: Id<PubGrubPackage>,
1140        name: &PackageName,
1141        range: &Range<Version>,
1142        url: &VerbatimParsedUrl,
1143        env: &ResolverEnvironment,
1144        python_requirement: &PythonRequirement,
1145        pubgrub: &State<UvDependencyProvider>,
1146    ) -> Result<Option<ResolverVersion>, ResolveError> {
1147        debug!(
1148            "Searching for a compatible version of {name} @ {} ({range})",
1149            url.verbatim
1150        );
1151
1152        let dist = Dist::from_url(name.clone(), url.clone())?;
1153        let distribution_id = dist.distribution_id();
1154        let response = self
1155            .index
1156            .distributions()
1157            .wait_blocking(&distribution_id)
1158            .ok_or_else(|| ResolveError::UnregisteredTask(dist.to_string()))?;
1159
1160        // If we failed to fetch the metadata for a URL, we can't proceed.
1161        let metadata = match &*response {
1162            MetadataResponse::Found(archive) => &archive.metadata,
1163            MetadataResponse::Unavailable(reason) => {
1164                self.unavailable_packages
1165                    .insert(name.clone(), reason.into());
1166                return Ok(None);
1167            }
1168            // TODO(charlie): Add derivation chain for URL dependencies. In practice, this isn't
1169            // critical since we fetch URL dependencies _prior_ to invoking the resolver.
1170            MetadataResponse::Error(dist, err) => {
1171                return Err(ResolveError::Dist(
1172                    DistErrorKind::from_requested_dist(dist, &**err),
1173                    dist.clone(),
1174                    DerivationChain::default(),
1175                    err.clone(),
1176                ));
1177            }
1178        };
1179
1180        let version = &metadata.version;
1181
1182        // The version is incompatible with the requirement.
1183        if !range.contains(version) {
1184            return Ok(None);
1185        }
1186
1187        // If the URL points to a pre-built wheel, and the wheel's supported Python versions don't
1188        // match our `Requires-Python`, mark it as incompatible.
1189        if let Dist::Built(dist) = &dist {
1190            let filename = match &dist {
1191                BuiltDist::Registry(dist) => &dist.best_wheel().filename,
1192                BuiltDist::DirectUrl(dist) => &dist.filename,
1193                BuiltDist::Path(dist) => &dist.filename,
1194            };
1195
1196            // If the wheel does _not_ cover a required platform, it's incompatible.
1197            if env.marker_environment().is_none() && !self.options.required_environments.is_empty()
1198            {
1199                let wheel_marker = implied_markers(filename);
1200                // If the user explicitly marked a platform as required, ensure it has coverage.
1201                for environment_marker in self.options.required_environments.iter().copied() {
1202                    // If the platform is part of the current environment...
1203                    if env.included_by_marker(environment_marker)
1204                        && !find_environments(id, pubgrub).is_disjoint(environment_marker)
1205                    {
1206                        // ...but the wheel doesn't support it, it's incompatible.
1207                        if wheel_marker.is_disjoint(environment_marker) {
1208                            return Ok(Some(ResolverVersion::Unavailable(
1209                                version.clone(),
1210                                UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1211                                    IncompatibleWheel::MissingPlatform(environment_marker),
1212                                )),
1213                            )));
1214                        }
1215                    }
1216                }
1217            }
1218
1219            // If the wheel's Python tag doesn't match the target Python, it's incompatible.
1220            if !python_requirement.target().matches_wheel_tag(filename) {
1221                return Ok(Some(ResolverVersion::Unavailable(
1222                    filename.version.clone(),
1223                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1224                        IncompatibleWheel::Tag(IncompatibleTag::AbiPythonVersion),
1225                    )),
1226                )));
1227            }
1228        }
1229
1230        // The version is incompatible due to its `Requires-Python` requirement.
1231        if let Some(requires_python) = metadata.requires_python.as_ref() {
1232            if !python_requirement.target().is_contained_by(requires_python) {
1233                let kind = if python_requirement.installed() == python_requirement.target() {
1234                    PythonRequirementKind::Installed
1235                } else {
1236                    PythonRequirementKind::Target
1237                };
1238                return Ok(Some(ResolverVersion::Unavailable(
1239                    version.clone(),
1240                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1241                        IncompatibleSource::RequiresPython(requires_python.clone(), kind),
1242                    )),
1243                )));
1244            }
1245        }
1246
1247        Ok(Some(ResolverVersion::Unforked(version.clone())))
1248    }
1249
1250    /// Given a candidate registry requirement, choose the next version in range to try, or `None`
1251    /// if there is no version in this range.
1252    fn choose_version_registry(
1253        &self,
1254        package: &PubGrubPackage,
1255        id: Id<PubGrubPackage>,
1256        name: &PackageName,
1257        index: Option<&IndexUrl>,
1258        range: &Range<Version>,
1259        preferences: &Preferences,
1260        env: &ResolverEnvironment,
1261        python_requirement: &PythonRequirement,
1262        pubgrub: &State<UvDependencyProvider>,
1263        pins: &mut FilePins,
1264        visited: &mut FxHashSet<PackageName>,
1265        request_sink: &Sender<Request>,
1266    ) -> Result<Option<ResolverVersion>, ResolveError> {
1267        // Wait for the metadata to be available.
1268        let versions_response = if let Some(index) = index {
1269            self.index
1270                .explicit()
1271                .wait_blocking(&(name.clone(), index.clone()))
1272                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1273        } else {
1274            self.index
1275                .implicit()
1276                .wait_blocking(name)
1277                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1278        };
1279        visited.insert(name.clone());
1280
1281        let version_maps = match *versions_response {
1282            VersionsResponse::Found(ref version_maps) => version_maps.as_slice(),
1283            VersionsResponse::NoIndex => {
1284                self.unavailable_packages
1285                    .insert(name.clone(), UnavailablePackage::NoIndex);
1286                &[]
1287            }
1288            VersionsResponse::Offline => {
1289                self.unavailable_packages
1290                    .insert(name.clone(), UnavailablePackage::Offline);
1291                &[]
1292            }
1293            VersionsResponse::NotFound => {
1294                self.unavailable_packages
1295                    .insert(name.clone(), UnavailablePackage::NotFound);
1296                &[]
1297            }
1298        };
1299
1300        debug!("Searching for a compatible version of {package} ({range})");
1301
1302        // Find a version.
1303        let Some(candidate) = self.selector.select(
1304            name,
1305            range,
1306            version_maps,
1307            preferences,
1308            &self.installed_packages,
1309            &self.exclusions,
1310            index,
1311            env,
1312            self.tags.as_ref(),
1313        ) else {
1314            // Short circuit: we couldn't find _any_ versions for a package.
1315            return Ok(None);
1316        };
1317
1318        let dist = match candidate.dist() {
1319            CandidateDist::Compatible(dist) => dist,
1320            CandidateDist::Incompatible {
1321                incompatible_dist: incompatibility,
1322                prioritized_dist: _,
1323            } => {
1324                // If the version is incompatible because no distributions are compatible, exit early.
1325                return Ok(Some(ResolverVersion::Unavailable(
1326                    candidate.version().clone(),
1327                    // TODO(charlie): We can avoid this clone; the candidate is dropped here and
1328                    // owns the incompatibility.
1329                    UnavailableVersion::IncompatibleDist(incompatibility.clone()),
1330                )));
1331            }
1332        };
1333
1334        // Check whether the version is incompatible due to its Python requirement.
1335        if let Some((requires_python, incompatibility)) =
1336            Self::check_requires_python(dist, python_requirement)
1337        {
1338            if matches!(self.options.fork_strategy, ForkStrategy::RequiresPython) {
1339                if env.marker_environment().is_none() {
1340                    let forks = fork_version_by_python_requirement(
1341                        requires_python,
1342                        python_requirement,
1343                        env,
1344                    );
1345                    if !forks.is_empty() {
1346                        debug!(
1347                            "Forking Python requirement `{}` on `{}` for {}=={} ({})",
1348                            python_requirement.target(),
1349                            requires_python,
1350                            name,
1351                            candidate.version(),
1352                            forks
1353                                .iter()
1354                                .map(ToString::to_string)
1355                                .collect::<Vec<_>>()
1356                                .join(", ")
1357                        );
1358                        let forks = forks
1359                            .into_iter()
1360                            .map(|env| VersionFork {
1361                                env,
1362                                id,
1363                                version: None,
1364                            })
1365                            .collect();
1366                        return Ok(Some(ResolverVersion::Forked(forks)));
1367                    }
1368                }
1369            }
1370
1371            return Ok(Some(ResolverVersion::Unavailable(
1372                candidate.version().clone(),
1373                UnavailableVersion::IncompatibleDist(incompatibility),
1374            )));
1375        }
1376
1377        // Check whether this version covers all supported platforms; and, if not, generate a fork.
1378        if let Some(forked) = self.fork_version_registry(
1379            &candidate,
1380            dist,
1381            version_maps,
1382            package,
1383            id,
1384            name,
1385            index,
1386            range,
1387            preferences,
1388            env,
1389            pubgrub,
1390            pins,
1391            request_sink,
1392        )? {
1393            return Ok(Some(forked));
1394        }
1395
1396        let filename = match dist.for_installation() {
1397            ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1398                .filename()
1399                .unwrap_or(Cow::Borrowed("unknown filename")),
1400            ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1401                .filename()
1402                .unwrap_or(Cow::Borrowed("unknown filename")),
1403            ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1404        };
1405
1406        debug!(
1407            "Selecting: {}=={} [{}] ({})",
1408            name,
1409            candidate.version(),
1410            candidate.choice_kind(),
1411            filename,
1412        );
1413        self.visit_candidate(&candidate, dist, package, name, pins, request_sink)?;
1414
1415        let version = candidate.version().clone();
1416        Ok(Some(ResolverVersion::Unforked(version)))
1417    }
1418
1419    /// Determine whether a candidate covers all supported platforms; and, if not, generate a fork.
1420    ///
1421    /// This only ever applies to versions that lack source distributions And, for now, we only
1422    /// apply it in two cases:
1423    ///
1424    /// 1. Local versions, where the non-local version has greater platform coverage. The intent is
1425    ///    such that, if we're resolving PyTorch, and we choose `torch==2.5.2+cpu`, we want to
1426    ///    fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't
1427    ///    include any macOS wheels).
1428    /// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user
1429    ///    might require that the generated resolution always includes wheels for x86 macOS, and
1430    ///    fails entirely if the platform is unsupported.
1431    fn fork_version_registry(
1432        &self,
1433        candidate: &Candidate,
1434        dist: &CompatibleDist,
1435        version_maps: &[VersionMap],
1436        package: &PubGrubPackage,
1437        id: Id<PubGrubPackage>,
1438        name: &PackageName,
1439        index: Option<&IndexUrl>,
1440        range: &Range<Version>,
1441        preferences: &Preferences,
1442        env: &ResolverEnvironment,
1443        pubgrub: &State<UvDependencyProvider>,
1444        pins: &mut FilePins,
1445        request_sink: &Sender<Request>,
1446    ) -> Result<Option<ResolverVersion>, ResolveError> {
1447        // This only applies to universal resolutions.
1448        if env.marker_environment().is_some() {
1449            return Ok(None);
1450        }
1451
1452        // If the package is already compatible with all environments (as is the case for
1453        // packages that include a source distribution), we don't need to fork.
1454        if dist.implied_markers().is_true() {
1455            return Ok(None);
1456        }
1457
1458        // If the user explicitly marked a platform as required, ensure it has coverage.
1459        for marker in self.options.required_environments.iter().copied() {
1460            // If the platform is part of the current environment...
1461            if env.included_by_marker(marker) {
1462                // But isn't supported by the distribution...
1463                if dist.implied_markers().is_disjoint(marker)
1464                    && !find_environments(id, pubgrub).is_disjoint(marker)
1465                {
1466                    // Then we need to fork.
1467                    let Some((left, right)) = fork_version_by_marker(env, marker) else {
1468                        return Ok(Some(ResolverVersion::Unavailable(
1469                            candidate.version().clone(),
1470                            UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1471                                IncompatibleWheel::MissingPlatform(marker),
1472                            )),
1473                        )));
1474                    };
1475
1476                    debug!(
1477                        "Forking on required platform `{}` for {}=={} ({})",
1478                        marker.try_to_string().unwrap_or_else(|| "true".to_string()),
1479                        name,
1480                        candidate.version(),
1481                        [&left, &right]
1482                            .iter()
1483                            .map(ToString::to_string)
1484                            .collect::<Vec<_>>()
1485                            .join(", ")
1486                    );
1487                    let forks = vec![
1488                        VersionFork {
1489                            env: left,
1490                            id,
1491                            version: None,
1492                        },
1493                        VersionFork {
1494                            env: right,
1495                            id,
1496                            version: None,
1497                        },
1498                    ];
1499                    return Ok(Some(ResolverVersion::Forked(forks)));
1500                }
1501            }
1502        }
1503
1504        // For now, we only apply this to local versions.
1505        if !candidate.version().is_local() {
1506            return Ok(None);
1507        }
1508
1509        debug!(
1510            "Looking at local version: {}=={}",
1511            name,
1512            candidate.version()
1513        );
1514
1515        // If there's a non-local version...
1516        let range = range.clone().intersection(&Range::singleton(
1517            candidate.version().clone().without_local(),
1518        ));
1519
1520        let Some(base_candidate) = self.selector.select(
1521            name,
1522            &range,
1523            version_maps,
1524            preferences,
1525            &self.installed_packages,
1526            &self.exclusions,
1527            index,
1528            env,
1529            self.tags.as_ref(),
1530        ) else {
1531            return Ok(None);
1532        };
1533        let CandidateDist::Compatible(base_dist) = base_candidate.dist() else {
1534            return Ok(None);
1535        };
1536
1537        // ...and the non-local version has greater platform support...
1538        let mut remainder = {
1539            let mut remainder = base_dist.implied_markers();
1540            remainder.and(dist.implied_markers().negate());
1541            remainder
1542        };
1543        if remainder.is_false() {
1544            return Ok(None);
1545        }
1546
1547        // If the remainder isn't relevant to the current environment, there's no need to fork.
1548        // For example, if we're solving for `sys_platform == 'darwin'` but the remainder is
1549        // `sys_platform == 'linux'`, we don't need to fork.
1550        if !env.included_by_marker(remainder) {
1551            return Ok(None);
1552        }
1553
1554        // Similarly, if the local distribution is incompatible with the current environment, then
1555        // use the base distribution instead (but don't fork).
1556        if !env.included_by_marker(dist.implied_markers()) {
1557            let filename = match dist.for_installation() {
1558                ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1559                    .filename()
1560                    .unwrap_or(Cow::Borrowed("unknown filename")),
1561                ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1562                    .filename()
1563                    .unwrap_or(Cow::Borrowed("unknown filename")),
1564                ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1565            };
1566
1567            debug!(
1568                "Preferring non-local candidate: {}=={} [{}] ({})",
1569                name,
1570                base_candidate.version(),
1571                base_candidate.choice_kind(),
1572                filename,
1573            );
1574            self.visit_candidate(
1575                &base_candidate,
1576                base_dist,
1577                package,
1578                name,
1579                pins,
1580                request_sink,
1581            )?;
1582
1583            return Ok(Some(ResolverVersion::Unforked(
1584                base_candidate.version().clone(),
1585            )));
1586        }
1587
1588        // If the implied markers includes _some_ macOS environments, but the remainder doesn't,
1589        // then we can extend the implied markers to include _all_ macOS environments. Same goes for
1590        // Linux and Windows.
1591        //
1592        // The idea here is that the base version could support (e.g.) ARM macOS, but not Intel
1593        // macOS. But if _neither_ version supports Intel macOS, we'd rather use `sys_platform == 'darwin'`
1594        // instead of `sys_platform == 'darwin' and platform_machine == 'arm64'`, since it's much
1595        // simpler, and _neither_ version will succeed with Intel macOS anyway.
1596        for value in [
1597            arcstr::literal!("darwin"),
1598            arcstr::literal!("linux"),
1599            arcstr::literal!("win32"),
1600        ] {
1601            let sys_platform = MarkerTree::expression(MarkerExpression::String {
1602                key: MarkerValueString::SysPlatform,
1603                operator: MarkerOperator::Equal,
1604                value,
1605            });
1606            if dist.implied_markers().is_disjoint(sys_platform)
1607                && !remainder.is_disjoint(sys_platform)
1608            {
1609                remainder.or(sys_platform);
1610            }
1611        }
1612
1613        // Otherwise, we need to fork.
1614        let Some((base_env, local_env)) = fork_version_by_marker(env, remainder) else {
1615            return Ok(None);
1616        };
1617
1618        debug!(
1619            "Forking platform for {}=={} ({})",
1620            name,
1621            candidate.version(),
1622            [&base_env, &local_env]
1623                .iter()
1624                .map(ToString::to_string)
1625                .collect::<Vec<_>>()
1626                .join(", ")
1627        );
1628        self.visit_candidate(candidate, dist, package, name, pins, request_sink)?;
1629        self.visit_candidate(
1630            &base_candidate,
1631            base_dist,
1632            package,
1633            name,
1634            pins,
1635            request_sink,
1636        )?;
1637
1638        let forks = vec![
1639            VersionFork {
1640                env: base_env.clone(),
1641                id,
1642                version: Some(base_candidate.version().clone()),
1643            },
1644            VersionFork {
1645                env: local_env.clone(),
1646                id,
1647                version: Some(candidate.version().clone()),
1648            },
1649        ];
1650        Ok(Some(ResolverVersion::Forked(forks)))
1651    }
1652
1653    /// Visit a selected candidate.
1654    fn visit_candidate(
1655        &self,
1656        candidate: &Candidate,
1657        dist: &CompatibleDist,
1658        package: &PubGrubPackage,
1659        name: &PackageName,
1660        pins: &mut FilePins,
1661        request_sink: &Sender<Request>,
1662    ) -> Result<(), ResolveError> {
1663        // We want to return a package pinned to a specific version; but we _also_ want to
1664        // store the exact file that we selected to satisfy that version.
1665        pins.insert(candidate, dist);
1666
1667        // Emit a request to fetch the metadata for this version.
1668        if matches!(&**package, PubGrubPackageInner::Package { .. }) {
1669            if self.dependency_mode.is_transitive() {
1670                let dist = dist.for_resolution();
1671                if self.index.distributions().register(dist.distribution_id()) {
1672                    if name != dist.name() {
1673                        return Err(ResolveError::MismatchedPackageName {
1674                            request: "distribution",
1675                            expected: name.clone(),
1676                            actual: dist.name().clone(),
1677                        });
1678                    }
1679                    // Verify that the package is allowed under the hash-checking policy.
1680                    if !self
1681                        .hasher
1682                        .allows_package(candidate.name(), candidate.version())
1683                    {
1684                        return Err(ResolveError::UnhashedPackage(candidate.name().clone()));
1685                    }
1686
1687                    let request = Request::from(dist);
1688                    request_sink.blocking_send(request)?;
1689                }
1690            }
1691        }
1692
1693        Ok(())
1694    }
1695
1696    /// Check if the distribution is incompatible with the Python requirement, and if so, return
1697    /// the incompatibility.
1698    fn check_requires_python<'dist>(
1699        dist: &'dist CompatibleDist,
1700        python_requirement: &PythonRequirement,
1701    ) -> Option<(&'dist VersionSpecifiers, IncompatibleDist)> {
1702        let requires_python = dist.requires_python()?;
1703        if python_requirement.target().is_contained_by(requires_python) {
1704            None
1705        } else {
1706            let incompatibility = if matches!(dist, CompatibleDist::CompatibleWheel { .. }) {
1707                IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(
1708                    requires_python.clone(),
1709                    if python_requirement.installed() == python_requirement.target() {
1710                        PythonRequirementKind::Installed
1711                    } else {
1712                        PythonRequirementKind::Target
1713                    },
1714                ))
1715            } else {
1716                IncompatibleDist::Source(IncompatibleSource::RequiresPython(
1717                    requires_python.clone(),
1718                    if python_requirement.installed() == python_requirement.target() {
1719                        PythonRequirementKind::Installed
1720                    } else {
1721                        PythonRequirementKind::Target
1722                    },
1723                ))
1724            };
1725            Some((requires_python, incompatibility))
1726        }
1727    }
1728
1729    /// Given a candidate package and version, return its dependencies.
1730    #[instrument(skip_all, fields(%package, %version))]
1731    fn get_dependencies_forking(
1732        &self,
1733        id: Id<PubGrubPackage>,
1734        package: &PubGrubPackage,
1735        version: &Version,
1736        pins: &FilePins,
1737        fork_urls: &ForkUrls,
1738        env: &ResolverEnvironment,
1739        python_requirement: &PythonRequirement,
1740        pubgrub: &State<UvDependencyProvider>,
1741    ) -> Result<ForkedDependencies, ResolveError> {
1742        let result = self.get_dependencies(
1743            id,
1744            package,
1745            version,
1746            pins,
1747            fork_urls,
1748            env,
1749            python_requirement,
1750            pubgrub,
1751        );
1752        if env.marker_environment().is_some() {
1753            result.map(|deps| match deps {
1754                Dependencies::Available(deps) | Dependencies::Unforkable(deps) => {
1755                    ForkedDependencies::Unforked(deps)
1756                }
1757                Dependencies::Unavailable(err) => ForkedDependencies::Unavailable(err),
1758            })
1759        } else {
1760            Ok(result?.fork(env, python_requirement, &self.conflicts))
1761        }
1762    }
1763
1764    /// Given a candidate package and version, return its dependencies.
1765    #[instrument(skip_all, fields(%package, %version))]
1766    fn get_dependencies(
1767        &self,
1768        id: Id<PubGrubPackage>,
1769        package: &PubGrubPackage,
1770        version: &Version,
1771        pins: &FilePins,
1772        fork_urls: &ForkUrls,
1773        env: &ResolverEnvironment,
1774        python_requirement: &PythonRequirement,
1775        pubgrub: &State<UvDependencyProvider>,
1776    ) -> Result<Dependencies, ResolveError> {
1777        let dependencies = match &**package {
1778            PubGrubPackageInner::Root(_) => {
1779                let no_dev_deps = BTreeMap::default();
1780                let requirements = self.flatten_requirements(
1781                    &self.requirements,
1782                    &no_dev_deps,
1783                    None,
1784                    None,
1785                    None,
1786                    env,
1787                    python_requirement,
1788                );
1789
1790                requirements
1791                    .flat_map(move |requirement| {
1792                        PubGrubDependency::from_requirement(
1793                            &self.conflicts,
1794                            requirement,
1795                            None,
1796                            Some(package),
1797                        )
1798                    })
1799                    .collect()
1800            }
1801
1802            PubGrubPackageInner::Package {
1803                name,
1804                extra,
1805                group,
1806                marker: _,
1807            } => {
1808                // If we're excluding transitive dependencies, short-circuit.
1809                if self.dependency_mode.is_direct() {
1810                    return Ok(Dependencies::Unforkable(Vec::default()));
1811                }
1812
1813                let (distribution_id, task) = if let Some(distribution_id) =
1814                    pins.metadata_id(name, version)
1815                {
1816                    (
1817                        distribution_id.clone(),
1818                        pins.get(name, version)
1819                            .map_or_else(|| format!("{name}=={version}"), ToString::to_string),
1820                    )
1821                } else if let Some(url) = fork_urls.get(name) {
1822                    let dist = Dist::from_url(name.clone(), url.clone())?;
1823                    (dist.distribution_id(), dist.to_string())
1824                } else {
1825                    debug_assert!(
1826                        false,
1827                        "Dependencies were requested for a package without a pinned distribution"
1828                    );
1829                    return Err(ResolveError::UnregisteredTask(format!("{name}=={version}")));
1830                };
1831
1832                // If the package does not exist in the registry or locally, we cannot fetch its dependencies
1833                if self.dependency_mode.is_transitive()
1834                    && self.unavailable_packages.get(name).is_some()
1835                    && self.installed_packages.get_packages(name).is_empty()
1836                {
1837                    debug_assert!(
1838                        false,
1839                        "Dependencies were requested for a package that is not available"
1840                    );
1841                    return Err(ResolveError::PackageUnavailable(name.clone()));
1842                }
1843
1844                // Wait for the metadata to be available.
1845                let response = self
1846                    .index
1847                    .distributions()
1848                    .wait_blocking(&distribution_id)
1849                    .ok_or_else(|| ResolveError::UnregisteredTask(task))?;
1850
1851                let metadata = match &*response {
1852                    MetadataResponse::Found(archive) => &archive.metadata,
1853                    MetadataResponse::Unavailable(reason) => {
1854                        let unavailable_version = UnavailableVersion::from(reason);
1855                        let message = unavailable_version.singular_message();
1856                        if let Some(err) = reason.source() {
1857                            // Show the detailed error for metadata parse errors.
1858                            warn!("{name} {message}: {err}");
1859                        } else {
1860                            warn!("{name} {message}");
1861                        }
1862                        self.incomplete_packages
1863                            .entry(name.clone())
1864                            .or_default()
1865                            .insert(version.clone(), reason.clone());
1866                        return Ok(Dependencies::Unavailable(unavailable_version));
1867                    }
1868                    MetadataResponse::Error(dist, err) => {
1869                        let chain = DerivationChainBuilder::from_state(id, version, pubgrub)
1870                            .unwrap_or_default();
1871                        return Err(ResolveError::Dist(
1872                            DistErrorKind::from_requested_dist(dist, &**err),
1873                            dist.clone(),
1874                            chain,
1875                            err.clone(),
1876                        ));
1877                    }
1878                };
1879
1880                // If there was no requires-python on the index page, we may have an incompatible
1881                // distribution.
1882                if let Some(requires_python) = &metadata.requires_python {
1883                    if !python_requirement.target().is_contained_by(requires_python) {
1884                        return Ok(Dependencies::Unavailable(
1885                            UnavailableVersion::RequiresPython(requires_python.clone()),
1886                        ));
1887                    }
1888                }
1889
1890                // Identify any system dependencies based on the index URL.
1891                let system_dependencies = self
1892                    .options
1893                    .torch_backend
1894                    .as_ref()
1895                    .filter(|torch_backend| matches!(torch_backend, TorchStrategy::Cuda { .. }))
1896                    .filter(|torch_backend| torch_backend.has_system_dependency(name))
1897                    .and_then(|_| pins.get(name, version).and_then(ResolvedDist::index))
1898                    .map(IndexUrl::url)
1899                    .and_then(SystemDependency::from_index)
1900                    .into_iter()
1901                    .inspect(|system_dependency| {
1902                        debug!(
1903                            "Adding system dependency `{}` for `{package}@{version}`",
1904                            system_dependency
1905                        );
1906                    })
1907                    .map(PubGrubDependency::from);
1908
1909                let requirements = self.flatten_requirements(
1910                    &metadata.requires_dist,
1911                    &metadata.dependency_groups,
1912                    extra.as_ref(),
1913                    group.as_ref(),
1914                    Some(name),
1915                    env,
1916                    python_requirement,
1917                );
1918
1919                requirements
1920                    .filter(|requirement| !self.excludes.contains(&requirement.name))
1921                    .flat_map(|requirement| {
1922                        PubGrubDependency::from_requirement(
1923                            &self.conflicts,
1924                            requirement,
1925                            group.as_ref(),
1926                            Some(package),
1927                        )
1928                    })
1929                    .chain(system_dependencies)
1930                    .collect()
1931            }
1932
1933            PubGrubPackageInner::Python(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1934
1935            PubGrubPackageInner::System(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1936
1937            // Add a dependency on both the marker and base package.
1938            PubGrubPackageInner::Marker { name, marker } => {
1939                return Ok(Dependencies::Unforkable(
1940                    [MarkerTree::TRUE, *marker]
1941                        .into_iter()
1942                        .map(move |marker| PubGrubDependency {
1943                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1944                                name: name.clone(),
1945                                extra: None,
1946                                group: None,
1947                                marker,
1948                            }),
1949                            version: Range::singleton(version.clone()),
1950                            parent: None,
1951                            url: None,
1952                        })
1953                        .collect(),
1954                ));
1955            }
1956
1957            // Add a dependency on both the extra and base package, with and without the marker.
1958            PubGrubPackageInner::Extra {
1959                name,
1960                extra,
1961                marker,
1962            } => {
1963                return Ok(Dependencies::Unforkable(
1964                    [MarkerTree::TRUE, *marker]
1965                        .into_iter()
1966                        .dedup()
1967                        .flat_map(move |marker| {
1968                            [None, Some(extra)]
1969                                .into_iter()
1970                                .map(move |extra| PubGrubDependency {
1971                                    package: PubGrubPackage::from(PubGrubPackageInner::Package {
1972                                        name: name.clone(),
1973                                        extra: extra.cloned(),
1974                                        group: None,
1975                                        marker,
1976                                    }),
1977                                    version: Range::singleton(version.clone()),
1978                                    parent: None,
1979                                    url: None,
1980                                })
1981                        })
1982                        .collect(),
1983                ));
1984            }
1985
1986            // Add a dependency on the dependency group, with and without the marker.
1987            PubGrubPackageInner::Group {
1988                name,
1989                group,
1990                marker,
1991            } => {
1992                return Ok(Dependencies::Unforkable(
1993                    [MarkerTree::TRUE, *marker]
1994                        .into_iter()
1995                        .dedup()
1996                        .map(|marker| PubGrubDependency {
1997                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1998                                name: name.clone(),
1999                                extra: None,
2000                                group: Some(group.clone()),
2001                                marker,
2002                            }),
2003                            version: Range::singleton(version.clone()),
2004                            parent: None,
2005                            url: None,
2006                        })
2007                        .collect(),
2008                ));
2009            }
2010        };
2011        Ok(Dependencies::Available(dependencies))
2012    }
2013
2014    /// The regular and dev dependencies filtered by Python version and the markers of this fork,
2015    /// plus the extras dependencies of the current package (e.g., `black` depending on
2016    /// `black[colorama]`).
2017    fn flatten_requirements<'a>(
2018        &'a self,
2019        dependencies: &'a [Requirement],
2020        dev_dependencies: &'a BTreeMap<GroupName, Box<[Requirement]>>,
2021        extra: Option<&'a ExtraName>,
2022        dev: Option<&'a GroupName>,
2023        name: Option<&PackageName>,
2024        env: &'a ResolverEnvironment,
2025        python_requirement: &'a PythonRequirement,
2026    ) -> impl Iterator<Item = Cow<'a, Requirement>> {
2027        let python_marker = python_requirement.to_marker_tree();
2028
2029        if let Some(dev) = dev {
2030            // Dependency groups can include the project itself, so no need to flatten recursive
2031            // dependencies.
2032            Either::Left(Either::Left(self.requirements_for_extra(
2033                dev_dependencies.get(dev).into_iter().flatten(),
2034                extra,
2035                env,
2036                python_marker,
2037                python_requirement,
2038            )))
2039        } else if !dependencies
2040            .iter()
2041            .any(|req| name == Some(&req.name) && !req.extras.is_empty())
2042        {
2043            // If the project doesn't define any recursive dependencies, take the fast path.
2044            Either::Left(Either::Right(self.requirements_for_extra(
2045                dependencies.iter(),
2046                extra,
2047                env,
2048                python_marker,
2049                python_requirement,
2050            )))
2051        } else {
2052            let mut requirements = self
2053                .requirements_for_extra(
2054                    dependencies.iter(),
2055                    extra,
2056                    env,
2057                    python_marker,
2058                    python_requirement,
2059                )
2060                .collect::<Vec<_>>();
2061
2062            // Transitively process all extras that are recursively included, starting with the current
2063            // extra.
2064            let mut seen = FxHashSet::<(ExtraName, MarkerTree)>::default();
2065            let mut queue: VecDeque<_> = requirements
2066                .iter()
2067                .filter(|req| name == Some(&req.name))
2068                .flat_map(|req| req.extras.iter().cloned().map(|extra| (extra, req.marker)))
2069                .collect();
2070            while let Some((extra, marker)) = queue.pop_front() {
2071                if !seen.insert((extra.clone(), marker)) {
2072                    continue;
2073                }
2074                for requirement in self.requirements_for_extra(
2075                    dependencies,
2076                    Some(&extra),
2077                    env,
2078                    python_marker,
2079                    python_requirement,
2080                ) {
2081                    let requirement = match requirement {
2082                        Cow::Owned(mut requirement) => {
2083                            requirement.marker.and(marker);
2084                            requirement
2085                        }
2086                        Cow::Borrowed(requirement) => {
2087                            let mut marker = marker;
2088                            marker.and(requirement.marker);
2089                            Requirement {
2090                                name: requirement.name.clone(),
2091                                extras: requirement.extras.clone(),
2092                                groups: requirement.groups.clone(),
2093                                source: requirement.source.clone(),
2094                                origin: requirement.origin.clone(),
2095                                marker: marker.simplify_extras(slice::from_ref(&extra)),
2096                            }
2097                        }
2098                    };
2099                    if name == Some(&requirement.name) {
2100                        // Add each transitively included extra.
2101                        queue.extend(
2102                            requirement
2103                                .extras
2104                                .iter()
2105                                .cloned()
2106                                .map(|extra| (extra, requirement.marker)),
2107                        );
2108                    } else {
2109                        // Add the requirements for that extra.
2110                        requirements.push(Cow::Owned(requirement));
2111                    }
2112                }
2113            }
2114
2115            // Retain any self-constraints for that extra, e.g., if `project[foo]` includes
2116            // `project[bar]>1.0`, as a dependency, we need to propagate `project>1.0`, in addition to
2117            // transitively expanding `project[bar]`.
2118            let mut self_constraints = vec![];
2119            for req in &requirements {
2120                if name == Some(&req.name) && !req.source.is_empty() {
2121                    self_constraints.push(Requirement {
2122                        name: req.name.clone(),
2123                        extras: Box::new([]),
2124                        groups: req.groups.clone(),
2125                        source: req.source.clone(),
2126                        origin: req.origin.clone(),
2127                        marker: req.marker,
2128                    });
2129                }
2130            }
2131
2132            // Drop all the self-requirements now that we flattened them out.
2133            requirements.retain(|req| name != Some(&req.name) || req.extras.is_empty());
2134            requirements.extend(self_constraints.into_iter().map(Cow::Owned));
2135
2136            Either::Right(requirements.into_iter())
2137        }
2138    }
2139
2140    /// The set of the regular and dev dependencies, filtered by Python version,
2141    /// the markers of this fork and the requested extra.
2142    fn requirements_for_extra<'data, 'parameters>(
2143        &'data self,
2144        dependencies: impl IntoIterator<Item = &'data Requirement> + 'parameters,
2145        extra: Option<&'parameters ExtraName>,
2146        env: &'parameters ResolverEnvironment,
2147        python_marker: MarkerTree,
2148        python_requirement: &'parameters PythonRequirement,
2149    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2150    where
2151        'data: 'parameters,
2152    {
2153        self.overrides
2154            .apply(dependencies)
2155            .filter(move |requirement| {
2156                Self::is_requirement_applicable(
2157                    requirement,
2158                    extra,
2159                    env,
2160                    python_marker,
2161                    python_requirement,
2162                )
2163            })
2164            .flat_map(move |requirement| {
2165                iter::once(requirement.clone()).chain(self.constraints_for_requirement(
2166                    requirement,
2167                    extra,
2168                    env,
2169                    python_marker,
2170                    python_requirement,
2171                ))
2172            })
2173    }
2174
2175    /// Whether a requirement is applicable for the Python version, the markers of this fork and the
2176    /// requested extra.
2177    fn is_requirement_applicable(
2178        requirement: &Requirement,
2179        extra: Option<&ExtraName>,
2180        env: &ResolverEnvironment,
2181        python_marker: MarkerTree,
2182        python_requirement: &PythonRequirement,
2183    ) -> bool {
2184        // If the requirement isn't relevant for the current platform, skip it.
2185        match extra {
2186            Some(source_extra) => {
2187                // Only include requirements that are relevant for the current extra.
2188                if requirement.evaluate_markers(env.marker_environment(), &[]) {
2189                    return false;
2190                }
2191                if !requirement
2192                    .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2193                {
2194                    return false;
2195                }
2196                if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2197                {
2198                    return false;
2199                }
2200            }
2201            None => {
2202                if !requirement.evaluate_markers(env.marker_environment(), &[]) {
2203                    return false;
2204                }
2205            }
2206        }
2207
2208        // If the requirement would not be selected with any Python version
2209        // supported by the root, skip it.
2210        if python_marker.is_disjoint(requirement.marker) {
2211            trace!(
2212                "Skipping {requirement} because of Requires-Python: {requires_python}",
2213                requires_python = python_requirement.target(),
2214            );
2215            return false;
2216        }
2217
2218        // If we're in a fork in universal mode, ignore any dependency that isn't part of
2219        // this fork (but will be part of another fork).
2220        if !env.included_by_marker(requirement.marker) {
2221            trace!("Skipping {requirement} because of {env}");
2222            return false;
2223        }
2224
2225        true
2226    }
2227
2228    /// The constraints applicable to the requirement, filtered by Python version, the markers of
2229    /// this fork and the requested extra.
2230    fn constraints_for_requirement<'data, 'parameters>(
2231        &'data self,
2232        requirement: Cow<'data, Requirement>,
2233        extra: Option<&'parameters ExtraName>,
2234        env: &'parameters ResolverEnvironment,
2235        python_marker: MarkerTree,
2236        python_requirement: &'parameters PythonRequirement,
2237    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2238    where
2239        'data: 'parameters,
2240    {
2241        self.constraints
2242            .get(&requirement.name)
2243            .into_iter()
2244            .flatten()
2245            .filter_map(move |constraint| {
2246                // If the requirement would not be selected with any Python version
2247                // supported by the root, skip it.
2248                let constraint = if constraint.marker.is_true() {
2249                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2250                    // and the constraint is `requests ; python_version == '3.6'`, the
2251                    // constraint should only apply when _both_ markers are true.
2252                    if requirement.marker.is_true() {
2253                        Cow::Borrowed(constraint)
2254                    } else {
2255                        let mut marker = constraint.marker;
2256                        marker.and(requirement.marker);
2257
2258                        if marker.is_false() {
2259                            trace!(
2260                                "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2261                                constraint.marker.try_to_string().unwrap(),
2262                                requirement.marker.try_to_string().unwrap(),
2263                            );
2264                            return None;
2265                        }
2266
2267                        Cow::Owned(Requirement {
2268                            name: constraint.name.clone(),
2269                            extras: constraint.extras.clone(),
2270                            groups: constraint.groups.clone(),
2271                            source: constraint.source.clone(),
2272                            origin: constraint.origin.clone(),
2273                            marker,
2274                        })
2275                    }
2276                } else {
2277                    let requires_python = python_requirement.target();
2278
2279                    let mut marker = constraint.marker;
2280                    marker.and(requirement.marker);
2281
2282                    if marker.is_false() {
2283                        trace!(
2284                            "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2285                            constraint.marker.try_to_string().unwrap(),
2286                            requirement.marker.try_to_string().unwrap(),
2287                        );
2288                        return None;
2289                    }
2290
2291                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2292                    // and the constraint is `requests ; python_version == '3.6'`, the
2293                    // constraint should only apply when _both_ markers are true.
2294                    if python_marker.is_disjoint(marker) {
2295                        trace!(
2296                            "Skipping constraint {requirement} because of Requires-Python: {requires_python}"
2297                        );
2298                        return None;
2299                    }
2300
2301                    if marker == constraint.marker {
2302                        Cow::Borrowed(constraint)
2303                    } else {
2304                        Cow::Owned(Requirement {
2305                            name: constraint.name.clone(),
2306                            extras: constraint.extras.clone(),
2307                            groups: constraint.groups.clone(),
2308                            source: constraint.source.clone(),
2309                            origin: constraint.origin.clone(),
2310                            marker,
2311                        })
2312                    }
2313                };
2314
2315                // If we're in a fork in universal mode, ignore any dependency that isn't part of
2316                // this fork (but will be part of another fork).
2317                if !env.included_by_marker(constraint.marker) {
2318                    trace!("Skipping {constraint} because of {env}");
2319                    return None;
2320                }
2321
2322                // If the constraint isn't relevant for the current platform, skip it.
2323                match extra {
2324                    Some(source_extra) => {
2325                        if !constraint
2326                            .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2327                        {
2328                            return None;
2329                        }
2330                        if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2331                        {
2332                            return None;
2333                        }
2334                    }
2335                    None => {
2336                        if !constraint.evaluate_markers(env.marker_environment(), &[]) {
2337                            return None;
2338                        }
2339                    }
2340                }
2341
2342                Some(constraint)
2343            })
2344    }
2345
2346    /// Fetch the metadata for a stream of packages and versions.
2347    async fn fetch<Provider: ResolverProvider>(
2348        self: Arc<Self>,
2349        provider: Arc<Provider>,
2350        request_stream: Receiver<Request>,
2351    ) -> Result<(), ResolveError> {
2352        let mut response_stream = ReceiverStream::new(request_stream)
2353            .map(|request| self.process_request(request, &*provider).boxed_local())
2354            // Allow as many futures as possible to start in the background.
2355            // Backpressure is provided by at a more granular level by `DistributionDatabase`
2356            // and `SourceDispatch`, as well as the bounded request channel.
2357            .buffer_unordered(usize::MAX);
2358
2359        while let Some(response) = response_stream.next().await {
2360            match response? {
2361                Some(Response::Package(name, index, version_map)) => {
2362                    trace!("Received package metadata for: {name}");
2363                    if let Some(index) = index {
2364                        self.index
2365                            .explicit()
2366                            .done((name, index), Arc::new(version_map));
2367                    } else {
2368                        self.index.implicit().done(name, Arc::new(version_map));
2369                    }
2370                }
2371                Some(Response::Installed { dist, metadata }) => {
2372                    trace!("Received installed distribution metadata for: {dist}");
2373                    self.index
2374                        .distributions()
2375                        .done(dist.distribution_id(), Arc::new(metadata));
2376                }
2377                Some(Response::Dist { dist, metadata }) => {
2378                    let dist_kind = match dist {
2379                        Dist::Built(_) => "built",
2380                        Dist::Source(_) => "source",
2381                    };
2382                    trace!("Received {dist_kind} distribution metadata for: {dist}");
2383                    if let MetadataResponse::Unavailable(reason) = &metadata {
2384                        let message = UnavailableVersion::from(reason).singular_message();
2385                        if let Some(err) = reason.source() {
2386                            // Show the detailed error for metadata parse errors.
2387                            warn!("{dist} {message}: {err}");
2388                        } else {
2389                            warn!("{dist} {message}");
2390                        }
2391                    }
2392                    self.index
2393                        .distributions()
2394                        .done(dist.distribution_id(), Arc::new(metadata));
2395                }
2396                None => {}
2397            }
2398        }
2399
2400        Ok::<(), ResolveError>(())
2401    }
2402
2403    #[instrument(skip_all, fields(%request))]
2404    async fn process_request<Provider: ResolverProvider>(
2405        &self,
2406        request: Request,
2407        provider: &Provider,
2408    ) -> Result<Option<Response>, ResolveError> {
2409        match request {
2410            // Fetch package metadata from the registry.
2411            Request::Package(package_name, index) => {
2412                let package_versions = provider
2413                    .get_package_versions(&package_name, index.as_ref())
2414                    .boxed_local()
2415                    .await
2416                    .map_err(ResolveError::Client)?;
2417
2418                Ok(Some(Response::Package(
2419                    package_name,
2420                    index.map(IndexMetadata::into_url),
2421                    package_versions,
2422                )))
2423            }
2424
2425            // Fetch distribution metadata from the distribution database.
2426            Request::Dist(dist) => {
2427                if let Some(version) = dist.version() {
2428                    if let Some(index) = dist.index() {
2429                        // Check the implicit indexes for pre-provided metadata.
2430                        let versions_response = self.index.implicit().get(dist.name());
2431                        if let Some(VersionsResponse::Found(version_maps)) =
2432                            versions_response.as_deref()
2433                        {
2434                            for version_map in version_maps {
2435                                if version_map.index() == Some(index) {
2436                                    let Some(metadata) = version_map.get_metadata(version) else {
2437                                        continue;
2438                                    };
2439                                    debug!("Found registry-provided metadata for: {dist}");
2440                                    return Ok(Some(Response::Dist {
2441                                        dist,
2442                                        metadata: MetadataResponse::Found(
2443                                            ArchiveMetadata::from_metadata23(metadata.clone()),
2444                                        ),
2445                                    }));
2446                                }
2447                            }
2448                        }
2449
2450                        // Check the explicit indexes for pre-provided metadata.
2451                        let versions_response = self
2452                            .index
2453                            .explicit()
2454                            .get(&(dist.name().clone(), index.clone()));
2455                        if let Some(VersionsResponse::Found(version_maps)) =
2456                            versions_response.as_deref()
2457                        {
2458                            for version_map in version_maps {
2459                                let Some(metadata) = version_map.get_metadata(version) else {
2460                                    continue;
2461                                };
2462                                debug!("Found registry-provided metadata for: {dist}");
2463                                return Ok(Some(Response::Dist {
2464                                    dist,
2465                                    metadata: MetadataResponse::Found(
2466                                        ArchiveMetadata::from_metadata23(metadata.clone()),
2467                                    ),
2468                                }));
2469                            }
2470                        }
2471                    }
2472                }
2473
2474                let metadata = provider
2475                    .get_or_build_wheel_metadata(&dist)
2476                    .boxed_local()
2477                    .await?;
2478
2479                if let MetadataResponse::Found(metadata) = &metadata {
2480                    if &metadata.metadata.name != dist.name() {
2481                        return Err(ResolveError::MismatchedPackageName {
2482                            request: "distribution metadata",
2483                            expected: dist.name().clone(),
2484                            actual: metadata.metadata.name.clone(),
2485                        });
2486                    }
2487                }
2488
2489                Ok(Some(Response::Dist { dist, metadata }))
2490            }
2491
2492            Request::Installed(dist) => {
2493                let metadata = provider.get_installed_metadata(&dist).boxed_local().await?;
2494
2495                if let MetadataResponse::Found(metadata) = &metadata {
2496                    if &metadata.metadata.name != dist.name() {
2497                        return Err(ResolveError::MismatchedPackageName {
2498                            request: "installed metadata",
2499                            expected: dist.name().clone(),
2500                            actual: metadata.metadata.name.clone(),
2501                        });
2502                    }
2503                }
2504
2505                Ok(Some(Response::Installed { dist, metadata }))
2506            }
2507
2508            // Pre-fetch the package and distribution metadata.
2509            Request::Prefetch(package_name, range, python_requirement) => {
2510                // Wait for the package metadata to become available.
2511                let versions_response = self
2512                    .index
2513                    .implicit()
2514                    .wait(&package_name)
2515                    .await
2516                    .ok_or_else(|| ResolveError::UnregisteredTask(package_name.to_string()))?;
2517
2518                let version_map = match *versions_response {
2519                    VersionsResponse::Found(ref version_map) => version_map,
2520                    // Short-circuit if we did not find any versions for the package
2521                    VersionsResponse::NoIndex => {
2522                        self.unavailable_packages
2523                            .insert(package_name.clone(), UnavailablePackage::NoIndex);
2524
2525                        return Ok(None);
2526                    }
2527                    VersionsResponse::Offline => {
2528                        self.unavailable_packages
2529                            .insert(package_name.clone(), UnavailablePackage::Offline);
2530
2531                        return Ok(None);
2532                    }
2533                    VersionsResponse::NotFound => {
2534                        self.unavailable_packages
2535                            .insert(package_name.clone(), UnavailablePackage::NotFound);
2536
2537                        return Ok(None);
2538                    }
2539                };
2540
2541                // We don't have access to the fork state when prefetching, so assume that
2542                // pre-release versions are allowed.
2543                let env = ResolverEnvironment::universal(vec![]);
2544
2545                // Try to find a compatible version. If there aren't any compatible versions,
2546                // short-circuit.
2547                let Some(candidate) = self.selector.select(
2548                    &package_name,
2549                    &range,
2550                    version_map,
2551                    &self.preferences,
2552                    &self.installed_packages,
2553                    &self.exclusions,
2554                    None,
2555                    &env,
2556                    self.tags.as_ref(),
2557                ) else {
2558                    return Ok(None);
2559                };
2560
2561                // If there is not a compatible distribution, short-circuit.
2562                let Some(dist) = candidate.compatible() else {
2563                    return Ok(None);
2564                };
2565
2566                // If the registry provided metadata for this distribution, use it.
2567                for version_map in version_map {
2568                    if let Some(metadata) = version_map.get_metadata(candidate.version()) {
2569                        let dist = dist.for_resolution();
2570                        if version_map.index() == dist.index() {
2571                            debug!("Found registry-provided metadata for: {dist}");
2572
2573                            let metadata = MetadataResponse::Found(
2574                                ArchiveMetadata::from_metadata23(metadata.clone()),
2575                            );
2576
2577                            let dist = dist.to_owned();
2578                            if &package_name != dist.name() {
2579                                return Err(ResolveError::MismatchedPackageName {
2580                                    request: "distribution",
2581                                    expected: package_name,
2582                                    actual: dist.name().clone(),
2583                                });
2584                            }
2585
2586                            let response = match dist {
2587                                ResolvedDist::Installable { dist, .. } => Response::Dist {
2588                                    dist: (*dist).clone(),
2589                                    metadata,
2590                                },
2591                                ResolvedDist::Installed { dist } => Response::Installed {
2592                                    dist: (*dist).clone(),
2593                                    metadata,
2594                                },
2595                            };
2596
2597                            return Ok(Some(response));
2598                        }
2599                    }
2600                }
2601
2602                // Avoid prefetching source distributions with unbounded lower-bound ranges. This
2603                // often leads to failed attempts to build legacy versions of packages that are
2604                // incompatible with modern build tools.
2605                if dist.wheel().is_none() {
2606                    if !self.selector.use_highest_version(&package_name, &env) {
2607                        if let Some((lower, _)) = range.iter().next() {
2608                            if lower == &Bound::Unbounded {
2609                                debug!(
2610                                    "Skipping prefetch for unbounded minimum-version range: {package_name} ({range})"
2611                                );
2612                                return Ok(None);
2613                            }
2614                        }
2615                    }
2616                }
2617
2618                // Validate the Python requirement.
2619                let requires_python = match dist {
2620                    CompatibleDist::InstalledDist(_) => None,
2621                    CompatibleDist::SourceDist { sdist, .. }
2622                    | CompatibleDist::IncompatibleWheel { sdist, .. } => {
2623                        sdist.file.requires_python.as_ref()
2624                    }
2625                    CompatibleDist::CompatibleWheel { wheel, .. } => {
2626                        wheel.file.requires_python.as_ref()
2627                    }
2628                };
2629                if let Some(requires_python) = requires_python.as_ref() {
2630                    if !python_requirement.target().is_contained_by(requires_python) {
2631                        return Ok(None);
2632                    }
2633                }
2634
2635                // Verify that the package is allowed under the hash-checking policy.
2636                if !self
2637                    .hasher
2638                    .allows_package(candidate.name(), candidate.version())
2639                {
2640                    return Ok(None);
2641                }
2642
2643                // Emit a request to fetch the metadata for this version.
2644                let dist = dist.for_resolution();
2645                if self.index.distributions().register(dist.distribution_id()) {
2646                    let dist = dist.to_owned();
2647                    if &package_name != dist.name() {
2648                        return Err(ResolveError::MismatchedPackageName {
2649                            request: "distribution",
2650                            expected: package_name,
2651                            actual: dist.name().clone(),
2652                        });
2653                    }
2654
2655                    let response = match dist {
2656                        ResolvedDist::Installable { dist, .. } => {
2657                            let metadata = provider
2658                                .get_or_build_wheel_metadata(&dist)
2659                                .boxed_local()
2660                                .await?;
2661
2662                            Response::Dist {
2663                                dist: (*dist).clone(),
2664                                metadata,
2665                            }
2666                        }
2667                        ResolvedDist::Installed { dist } => {
2668                            let metadata =
2669                                provider.get_installed_metadata(&dist).boxed_local().await?;
2670
2671                            Response::Installed {
2672                                dist: (*dist).clone(),
2673                                metadata,
2674                            }
2675                        }
2676                    };
2677
2678                    Ok(Some(response))
2679                } else {
2680                    Ok(None)
2681                }
2682            }
2683        }
2684    }
2685
2686    fn convert_no_solution_err(
2687        &self,
2688        mut err: pubgrub::NoSolutionError<UvDependencyProvider>,
2689        fork_urls: ForkUrls,
2690        fork_indexes: ForkIndexes,
2691        env: ResolverEnvironment,
2692        current_environment: MarkerEnvironment,
2693        exclude_newer: Option<&ExcludeNewer>,
2694        visited: &FxHashSet<PackageName>,
2695    ) -> ResolveError {
2696        err = NoSolutionError::collapse_local_version_segments(NoSolutionError::collapse_proxies(
2697            err,
2698        ));
2699
2700        let mut unavailable_packages = FxHashMap::default();
2701        for package in err.packages() {
2702            if let PubGrubPackageInner::Package { name, .. } = &**package {
2703                if let Some(reason) = self.unavailable_packages.get(name) {
2704                    unavailable_packages.insert(name.clone(), reason.clone());
2705                }
2706            }
2707        }
2708
2709        let mut incomplete_packages = FxHashMap::default();
2710        for package in err.packages() {
2711            if let PubGrubPackageInner::Package { name, .. } = &**package {
2712                if let Some(versions) = self.incomplete_packages.get(name) {
2713                    for entry in versions.iter() {
2714                        let (version, reason) = entry.pair();
2715                        incomplete_packages
2716                            .entry(name.clone())
2717                            .or_insert_with(BTreeMap::default)
2718                            .insert(version.clone(), reason.clone());
2719                    }
2720                }
2721            }
2722        }
2723
2724        let mut available_indexes = FxHashMap::default();
2725        let mut available_versions = FxHashMap::default();
2726        for package in err.packages() {
2727            let Some(name) = package.name() else { continue };
2728            if !visited.contains(name) {
2729                // Avoid including available versions for packages that exist in the derivation
2730                // tree, but were never visited during resolution. We _may_ have metadata for
2731                // these packages, but it's non-deterministic, and omitting them ensures that
2732                // we represent the self of the resolver at the time of failure.
2733                continue;
2734            }
2735            let versions_response = if let Some(index) = fork_indexes.get(name) {
2736                self.index
2737                    .explicit()
2738                    .get(&(name.clone(), index.url().clone()))
2739            } else {
2740                self.index.implicit().get(name)
2741            };
2742            if let Some(response) = versions_response {
2743                if let VersionsResponse::Found(ref version_maps) = *response {
2744                    // Track the available versions, across all indexes.
2745                    for version_map in version_maps {
2746                        let package_versions = available_versions
2747                            .entry(name.clone())
2748                            .or_insert_with(BTreeSet::new);
2749
2750                        for (version, dists) in version_map.iter(&Ranges::full()) {
2751                            // Don't show versions removed by excluded-newer in hints.
2752                            if let Some(exclude_newer) =
2753                                exclude_newer.and_then(|en| en.exclude_newer_package(name))
2754                            {
2755                                let Some(prioritized_dist) = dists.prioritized_dist() else {
2756                                    continue;
2757                                };
2758                                if prioritized_dist.files().all(|file| {
2759                                    file.upload_time_utc_ms.is_none_or(|upload_time| {
2760                                        upload_time >= exclude_newer.timestamp_millis()
2761                                    })
2762                                }) {
2763                                    continue;
2764                                }
2765                            }
2766
2767                            package_versions.insert(version.clone());
2768                        }
2769                    }
2770
2771                    // Track the indexes in which the package is available.
2772                    available_indexes
2773                        .entry(name.clone())
2774                        .or_insert(BTreeSet::new())
2775                        .extend(
2776                            version_maps
2777                                .iter()
2778                                .filter_map(|version_map| version_map.index().cloned()),
2779                        );
2780                }
2781            }
2782        }
2783
2784        ResolveError::NoSolution(Box::new(NoSolutionError::new(
2785            err,
2786            self.index.clone(),
2787            available_versions,
2788            available_indexes,
2789            self.selector.clone(),
2790            self.python_requirement.clone(),
2791            self.locations.clone(),
2792            self.capabilities.clone(),
2793            unavailable_packages,
2794            incomplete_packages,
2795            fork_urls,
2796            fork_indexes,
2797            env,
2798            current_environment,
2799            self.tags.clone(),
2800            self.workspace_members.clone(),
2801            self.options.clone(),
2802        )))
2803    }
2804
2805    fn on_progress(&self, package: &PubGrubPackage, version: &Version) {
2806        if let Some(reporter) = self.reporter.as_ref() {
2807            match &**package {
2808                PubGrubPackageInner::Root(_) => {}
2809                PubGrubPackageInner::Python(_) => {}
2810                PubGrubPackageInner::System(_) => {}
2811                PubGrubPackageInner::Marker { .. } => {}
2812                PubGrubPackageInner::Extra { .. } => {}
2813                PubGrubPackageInner::Group { .. } => {}
2814                PubGrubPackageInner::Package { name, .. } => {
2815                    reporter.on_progress(name, &VersionOrUrlRef::Version(version));
2816                }
2817            }
2818        }
2819    }
2820
2821    fn on_complete(&self) {
2822        if let Some(reporter) = self.reporter.as_ref() {
2823            reporter.on_complete();
2824        }
2825    }
2826}
2827
2828/// State that is used during unit propagation in the resolver, one instance per fork.
2829#[derive(Clone)]
2830pub(crate) struct ForkState {
2831    /// The internal state used by the resolver.
2832    ///
2833    /// Note that not all parts of this state are strictly internal. For
2834    /// example, the edges in the dependency graph generated as part of the
2835    /// output of resolution are derived from the "incompatibilities" tracked
2836    /// in this state. We also ultimately retrieve the final set of version
2837    /// assignments (to packages) from this state's "partial solution."
2838    pubgrub: State<UvDependencyProvider>,
2839    /// The initial package to select. If set, the first iteration over this state will avoid
2840    /// asking PubGrub for the highest-priority package, and will instead use the provided package.
2841    initial_id: Option<Id<PubGrubPackage>>,
2842    /// The initial version to select. If set, the first iteration over this state will avoid
2843    /// asking PubGrub for the highest-priority version, and will instead use the provided version.
2844    initial_version: Option<Version>,
2845    /// The next package on which to run unit propagation.
2846    next: Id<PubGrubPackage>,
2847    /// The set of pinned versions we accrue throughout resolution.
2848    ///
2849    /// The key of this map is a package name, and each package name maps to
2850    /// a set of versions for that package. Each version in turn is mapped
2851    /// to the concrete distribution selected for installation, along with the
2852    /// concrete distribution whose metadata was used during resolution.
2853    /// After resolution is finished, this map is consulted to recover both the
2854    /// locked artifact and the metadata backing the resolved dependency edges.
2855    pins: FilePins,
2856    /// Ensure we don't have duplicate URLs in any branch.
2857    ///
2858    /// Unlike [`Urls`], we add only the URLs we have seen in this branch, and there can be only
2859    /// one URL per package. By prioritizing direct URL dependencies over registry dependencies,
2860    /// this map is populated for all direct URL packages before we look at any registry packages.
2861    fork_urls: ForkUrls,
2862    /// Ensure we don't have duplicate indexes in any branch.
2863    ///
2864    /// Unlike [`Indexes`], we add only the indexes we have seen in this branch, and there can be
2865    /// only one index per package.
2866    fork_indexes: ForkIndexes,
2867    /// When dependencies for a package are retrieved, this map of priorities
2868    /// is updated based on how each dependency was specified. Certain types
2869    /// of dependencies have more "priority" than others (like direct URL
2870    /// dependencies). These priorities help determine which package to
2871    /// consider next during resolution.
2872    priorities: PubGrubPriorities,
2873    /// This keeps track of the set of versions for each package that we've
2874    /// already visited during resolution. This avoids doing redundant work.
2875    added_dependencies: FxHashMap<Id<PubGrubPackage>, FxHashSet<Version>>,
2876    /// The marker expression that created this state.
2877    ///
2878    /// The root state always corresponds to a marker expression that is always
2879    /// `true` for every `MarkerEnvironment`.
2880    ///
2881    /// In non-universal mode, forking never occurs and so this marker
2882    /// expression is always `true`.
2883    ///
2884    /// Whenever dependencies are fetched, all requirement specifications
2885    /// are checked for disjointness with the marker expression of the fork
2886    /// in which those dependencies were fetched. If a requirement has a
2887    /// completely disjoint marker expression (i.e., it can never be true given
2888    /// that the marker expression that provoked the fork is true), then that
2889    /// dependency is completely ignored.
2890    env: ResolverEnvironment,
2891    /// The Python requirement for this fork. Defaults to the Python requirement for
2892    /// the resolution, but may be narrowed if a `python_version` marker is present
2893    /// in a given fork.
2894    ///
2895    /// For example, in:
2896    /// ```text
2897    /// numpy >=1.26 ; python_version >= "3.9"
2898    /// numpy <1.26 ; python_version < "3.9"
2899    /// ```
2900    ///
2901    /// The top fork has a narrower Python compatibility range, and thus can find a
2902    /// solution that omits Python 3.8 support.
2903    python_requirement: PythonRequirement,
2904    conflict_tracker: ConflictTracker,
2905    /// Prefetch package versions for packages with many rejected versions.
2906    ///
2907    /// Tracked on the fork state to avoid counting each identical version between forks as new try.
2908    prefetcher: BatchPrefetcher,
2909}
2910
2911impl ForkState {
2912    fn new(
2913        pubgrub: State<UvDependencyProvider>,
2914        env: ResolverEnvironment,
2915        python_requirement: PythonRequirement,
2916        prefetcher: BatchPrefetcher,
2917    ) -> Self {
2918        Self {
2919            initial_id: None,
2920            initial_version: None,
2921            next: pubgrub.root_package,
2922            pubgrub,
2923            pins: FilePins::default(),
2924            fork_urls: ForkUrls::default(),
2925            fork_indexes: ForkIndexes::default(),
2926            priorities: PubGrubPriorities::default(),
2927            added_dependencies: FxHashMap::default(),
2928            env,
2929            python_requirement,
2930            conflict_tracker: ConflictTracker::default(),
2931            prefetcher,
2932        }
2933    }
2934
2935    /// Visit the dependencies for the selected version of the current package, incorporating any
2936    /// relevant URLs and pinned indexes into the [`ForkState`].
2937    fn visit_package_version_dependencies(
2938        &mut self,
2939        for_package: Id<PubGrubPackage>,
2940        for_version: &Version,
2941        urls: &Urls,
2942        indexes: &Indexes,
2943        dependencies: &[PubGrubDependency],
2944        git: &GitResolver,
2945        workspace_members: &BTreeSet<PackageName>,
2946        resolution_strategy: &ResolutionStrategy,
2947    ) -> Result<(), ResolveError> {
2948        for dependency in dependencies {
2949            let PubGrubDependency {
2950                package,
2951                version,
2952                parent: _,
2953                url,
2954            } = dependency;
2955
2956            let mut has_url = false;
2957            if let Some(name) = package.name() {
2958                // From the [`Requirement`] to [`PubGrubDependency`] conversion, we get a URL if the
2959                // requirement was a URL requirement. `Urls` applies canonicalization to this and
2960                // override URLs to both URL and registry requirements, which we then check for
2961                // conflicts using [`ForkUrl`].
2962                for url in urls.get_url(&self.env, name, url.as_ref(), git)? {
2963                    self.fork_urls.insert(name, url, &self.env)?;
2964                    has_url = true;
2965                }
2966
2967                // If the package is pinned to an exact index, add it to the fork.
2968                for index in indexes.get(name, &self.env) {
2969                    self.fork_indexes.insert(name, index, &self.env)?;
2970                }
2971            }
2972
2973            if let Some(name) = self.pubgrub.package_store[for_package]
2974                .name_no_root()
2975                .filter(|name| !workspace_members.contains(name))
2976            {
2977                debug!(
2978                    "Adding transitive dependency for {name}=={for_version}: {package}{version}"
2979                );
2980            } else {
2981                // A dependency from the root package or `requirements.txt`.
2982                debug!("Adding direct dependency: {package}{version}");
2983
2984                // Warn the user if a direct dependency lacks a lower bound in `--lowest` resolution.
2985                let missing_lower_bound = version
2986                    .bounding_range()
2987                    .map(|(lowest, _highest)| lowest == Bound::Unbounded)
2988                    .unwrap_or(true);
2989                let strategy_lowest = matches!(
2990                    resolution_strategy,
2991                    ResolutionStrategy::Lowest | ResolutionStrategy::LowestDirect(..)
2992                );
2993
2994                if !has_url && missing_lower_bound && strategy_lowest {
2995                    let name = package.name_no_root().unwrap();
2996                    // Handle cases where a package is listed both without and with a lower bound.
2997                    // Example:
2998                    // ```
2999                    // "coverage[toml] ; python_version < '3.11'",
3000                    // "coverage >= 7.10.0",
3001                    // ```
3002                    let bound_on_other_package = dependencies.iter().any(|other| {
3003                        Some(name) == other.package.name()
3004                            && !other
3005                                .version
3006                                .bounding_range()
3007                                .map(|(lowest, _highest)| lowest == Bound::Unbounded)
3008                                .unwrap_or(true)
3009                    });
3010
3011                    if !bound_on_other_package {
3012                        warn_user_once!(
3013                            "The direct dependency `{name}` is unpinned. \
3014                            Consider setting a lower bound when using `--resolution lowest` \
3015                            or `--resolution lowest-direct` to avoid using outdated versions.",
3016                        );
3017                    }
3018                }
3019            }
3020
3021            // Update the package priorities.
3022            self.priorities.insert(package, version, &self.fork_urls);
3023            // As we're adding an incompatibility from the proxy package to the base package,
3024            // we need to register the base package.
3025            if let Some(base_package) = package.base_package() {
3026                self.priorities
3027                    .insert(&base_package, version, &self.fork_urls);
3028            }
3029        }
3030
3031        Ok(())
3032    }
3033
3034    /// Add the dependencies for the selected version of the current package.
3035    fn add_package_version_dependencies(
3036        &mut self,
3037        for_package: Id<PubGrubPackage>,
3038        for_version: &Version,
3039        dependencies: Vec<PubGrubDependency>,
3040    ) {
3041        for dependency in &dependencies {
3042            let PubGrubDependency {
3043                package,
3044                version,
3045                parent: _,
3046                url: _,
3047            } = dependency;
3048
3049            let Some(base_package) = package.base_package() else {
3050                continue;
3051            };
3052
3053            let proxy_package = self.pubgrub.package_store.alloc(package.clone());
3054            let base_package_id = self.pubgrub.package_store.alloc(base_package.clone());
3055            self.pubgrub
3056                .add_proxy_package(proxy_package, base_package_id, version.clone());
3057        }
3058
3059        let conflict = self.pubgrub.add_package_version_dependencies(
3060            self.next,
3061            for_version.clone(),
3062            dependencies.into_iter().map(|dependency| {
3063                let PubGrubDependency {
3064                    package,
3065                    version,
3066                    parent: _,
3067                    url: _,
3068                } = dependency;
3069                (package, version)
3070            }),
3071        );
3072
3073        // Conflict tracking: If the version was rejected due to its dependencies, record culprit
3074        // and affected.
3075        if let Some(incompatibility) = conflict {
3076            self.record_conflict(for_package, Some(for_version), incompatibility);
3077        }
3078    }
3079
3080    fn record_conflict(
3081        &mut self,
3082        affected: Id<PubGrubPackage>,
3083        version: Option<&Version>,
3084        incompatibility: IncompId<PubGrubPackage, Ranges<Version>, UnavailableReason>,
3085    ) {
3086        let mut culprit_is_real = false;
3087        for (incompatible, _term) in self.pubgrub.incompatibility_store[incompatibility].iter() {
3088            if incompatible == affected {
3089                continue;
3090            }
3091            if self.pubgrub.package_store[affected].name()
3092                == self.pubgrub.package_store[incompatible].name()
3093            {
3094                // Don't track conflicts between a marker package and the main package, when the
3095                // marker is "copying" the obligations from the main package through conflicts.
3096                continue;
3097            }
3098            culprit_is_real = true;
3099            let culprit_count = self
3100                .conflict_tracker
3101                .culprit
3102                .entry(incompatible)
3103                .or_default();
3104            *culprit_count += 1;
3105            if *culprit_count == CONFLICT_THRESHOLD {
3106                self.conflict_tracker.deprioritize.push(incompatible);
3107            }
3108        }
3109        // Don't track conflicts between a marker package and the main package, when the
3110        // marker is "copying" the obligations from the main package through conflicts.
3111        if culprit_is_real {
3112            if tracing::enabled!(Level::DEBUG) {
3113                let incompatibility = self.pubgrub.incompatibility_store[incompatibility]
3114                    .iter()
3115                    .map(|(package, _term)| &self.pubgrub.package_store[package])
3116                    .join(", ");
3117                if let Some(version) = version {
3118                    debug!(
3119                        "Recording dependency conflict of {}=={} from incompatibility of ({})",
3120                        self.pubgrub.package_store[affected], version, incompatibility
3121                    );
3122                } else {
3123                    debug!(
3124                        "Recording unit propagation conflict of {} from incompatibility of ({})",
3125                        self.pubgrub.package_store[affected], incompatibility
3126                    );
3127                }
3128            }
3129
3130            let affected_count = self.conflict_tracker.affected.entry(self.next).or_default();
3131            *affected_count += 1;
3132            if *affected_count == CONFLICT_THRESHOLD {
3133                self.conflict_tracker.prioritize.push(self.next);
3134            }
3135        }
3136    }
3137
3138    fn add_unavailable_version(&mut self, version: Version, reason: UnavailableVersion) {
3139        // Incompatible requires-python versions are special in that we track
3140        // them as incompatible dependencies instead of marking the package version
3141        // as unavailable directly.
3142        if let UnavailableVersion::IncompatibleDist(
3143            IncompatibleDist::Source(IncompatibleSource::RequiresPython(requires_python, kind))
3144            | IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(requires_python, kind)),
3145        ) = reason
3146        {
3147            let package = &self.next;
3148            let python = self.pubgrub.package_store.alloc(PubGrubPackage::from(
3149                PubGrubPackageInner::Python(match kind {
3150                    PythonRequirementKind::Installed => PubGrubPython::Installed,
3151                    PythonRequirementKind::Target => PubGrubPython::Target,
3152                }),
3153            ));
3154            self.pubgrub
3155                .add_incompatibility(Incompatibility::from_dependency(
3156                    *package,
3157                    Range::singleton(version.clone()),
3158                    (python, release_specifiers_to_ranges(requires_python)),
3159                ));
3160            self.pubgrub
3161                .partial_solution
3162                .add_decision(self.next, version);
3163            return;
3164        }
3165        self.pubgrub
3166            .add_incompatibility(Incompatibility::custom_version(
3167                self.next,
3168                version.clone(),
3169                UnavailableReason::Version(reason),
3170            ));
3171    }
3172
3173    /// Subset the current markers with the new markers and update the python requirements fields
3174    /// accordingly.
3175    ///
3176    /// If the fork should be dropped (e.g., because its markers can never be true for its
3177    /// Python requirement), then this returns `None`.
3178    fn with_env(mut self, env: ResolverEnvironment) -> Self {
3179        self.env = env;
3180        // If the fork contains a narrowed Python requirement, apply it.
3181        if let Some(req) = self.env.narrow_python_requirement(&self.python_requirement) {
3182            debug!("Narrowed `requires-python` bound to: {}", req.target());
3183            self.python_requirement = req;
3184        }
3185        self
3186    }
3187
3188    /// Returns the URL or index for a package and version.
3189    ///
3190    /// In practice, exactly one of the returned values will be `Some`.
3191    fn source(
3192        &self,
3193        name: &PackageName,
3194        version: &Version,
3195    ) -> (Option<&VerbatimParsedUrl>, Option<&IndexUrl>) {
3196        let url = self.fork_urls.get(name);
3197        let index = url
3198            .is_none()
3199            .then(|| {
3200                self.pins
3201                    .get(name, version)
3202                    .expect("Every package should be pinned")
3203                    .index()
3204            })
3205            .flatten();
3206        (url, index)
3207    }
3208
3209    fn into_resolution(self) -> Resolution {
3210        let solution: FxHashMap<_, _> = self.pubgrub.partial_solution.extract_solution().collect();
3211        let edge_count: usize = solution
3212            .keys()
3213            .map(|package| self.pubgrub.incompatibilities[package].len())
3214            .sum();
3215        let mut edges: Vec<ResolutionDependencyEdge> = Vec::with_capacity(edge_count);
3216        for (package, self_version) in &solution {
3217            for id in &self.pubgrub.incompatibilities[package] {
3218                let pubgrub::Kind::FromDependencyOf(
3219                    self_package,
3220                    ref self_range,
3221                    dependency_package,
3222                    ref dependency_range,
3223                ) = self.pubgrub.incompatibility_store[*id].kind
3224                else {
3225                    continue;
3226                };
3227                if *package != self_package {
3228                    continue;
3229                }
3230                if !self_range.contains(self_version) {
3231                    continue;
3232                }
3233                let Some(dependency_version) = solution.get(&dependency_package) else {
3234                    continue;
3235                };
3236                if !dependency_range.contains(dependency_version) {
3237                    continue;
3238                }
3239
3240                let self_package = &self.pubgrub.package_store[self_package];
3241                let dependency_package = &self.pubgrub.package_store[dependency_package];
3242
3243                let (self_name, self_extra, self_group) = match &**self_package {
3244                    PubGrubPackageInner::Package {
3245                        name: self_name,
3246                        extra: self_extra,
3247                        group: self_group,
3248                        marker: _,
3249                    } => (Some(self_name), self_extra.as_ref(), self_group.as_ref()),
3250
3251                    PubGrubPackageInner::Root(_) => (None, None, None),
3252
3253                    _ => continue,
3254                };
3255
3256                let (self_url, self_index) = self_name
3257                    .map(|self_name| self.source(self_name, self_version))
3258                    .unwrap_or((None, None));
3259
3260                match **dependency_package {
3261                    PubGrubPackageInner::Package {
3262                        name: ref dependency_name,
3263                        extra: ref dependency_extra,
3264                        group: ref dependency_dev,
3265                        marker: ref dependency_marker,
3266                    } => {
3267                        debug_assert!(
3268                            dependency_extra.is_none(),
3269                            "Packages should depend on an extra proxy"
3270                        );
3271                        debug_assert!(
3272                            dependency_dev.is_none(),
3273                            "Packages should depend on a group proxy"
3274                        );
3275
3276                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3277                        // but allow groups to depend on other groups, or on the package itself.
3278                        if self_group.is_none() {
3279                            if self_name == Some(dependency_name) {
3280                                continue;
3281                            }
3282                        }
3283
3284                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3285
3286                        let edge = ResolutionDependencyEdge {
3287                            from: self_name.cloned(),
3288                            from_version: self_version.clone(),
3289                            from_url: self_url.cloned(),
3290                            from_index: self_index.cloned(),
3291                            from_extra: self_extra.cloned(),
3292                            from_group: self_group.cloned(),
3293                            to: dependency_name.clone(),
3294                            to_version: dependency_version.clone(),
3295                            to_url: to_url.cloned(),
3296                            to_index: to_index.cloned(),
3297                            to_extra: dependency_extra.clone(),
3298                            to_group: dependency_dev.clone(),
3299                            marker: *dependency_marker,
3300                        };
3301                        edges.push(edge);
3302                    }
3303
3304                    PubGrubPackageInner::Marker {
3305                        name: ref dependency_name,
3306                        marker: ref dependency_marker,
3307                    } => {
3308                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3309                        // but allow groups to depend on other groups, or on the package itself.
3310                        if self_group.is_none() {
3311                            if self_name == Some(dependency_name) {
3312                                continue;
3313                            }
3314                        }
3315
3316                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3317
3318                        let edge = ResolutionDependencyEdge {
3319                            from: self_name.cloned(),
3320                            from_version: self_version.clone(),
3321                            from_url: self_url.cloned(),
3322                            from_index: self_index.cloned(),
3323                            from_extra: self_extra.cloned(),
3324                            from_group: self_group.cloned(),
3325                            to: dependency_name.clone(),
3326                            to_version: dependency_version.clone(),
3327                            to_url: to_url.cloned(),
3328                            to_index: to_index.cloned(),
3329                            to_extra: None,
3330                            to_group: None,
3331                            marker: *dependency_marker,
3332                        };
3333                        edges.push(edge);
3334                    }
3335
3336                    PubGrubPackageInner::Extra {
3337                        name: ref dependency_name,
3338                        extra: ref dependency_extra,
3339                        marker: ref dependency_marker,
3340                    } => {
3341                        if self_group.is_none() {
3342                            debug_assert!(
3343                                self_name != Some(dependency_name),
3344                                "Extras should be flattened"
3345                            );
3346                        }
3347                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3348
3349                        // Insert an edge from the dependent package to the extra package.
3350                        let edge = ResolutionDependencyEdge {
3351                            from: self_name.cloned(),
3352                            from_version: self_version.clone(),
3353                            from_url: self_url.cloned(),
3354                            from_index: self_index.cloned(),
3355                            from_extra: self_extra.cloned(),
3356                            from_group: self_group.cloned(),
3357                            to: dependency_name.clone(),
3358                            to_version: dependency_version.clone(),
3359                            to_url: to_url.cloned(),
3360                            to_index: to_index.cloned(),
3361                            to_extra: Some(dependency_extra.clone()),
3362                            to_group: None,
3363                            marker: *dependency_marker,
3364                        };
3365                        edges.push(edge);
3366
3367                        // Insert an edge from the dependent package to the base package.
3368                        let edge = ResolutionDependencyEdge {
3369                            from: self_name.cloned(),
3370                            from_version: self_version.clone(),
3371                            from_url: self_url.cloned(),
3372                            from_index: self_index.cloned(),
3373                            from_extra: self_extra.cloned(),
3374                            from_group: self_group.cloned(),
3375                            to: dependency_name.clone(),
3376                            to_version: dependency_version.clone(),
3377                            to_url: to_url.cloned(),
3378                            to_index: to_index.cloned(),
3379                            to_extra: None,
3380                            to_group: None,
3381                            marker: *dependency_marker,
3382                        };
3383                        edges.push(edge);
3384                    }
3385
3386                    PubGrubPackageInner::Group {
3387                        name: ref dependency_name,
3388                        group: ref dependency_group,
3389                        marker: ref dependency_marker,
3390                    } => {
3391                        debug_assert!(
3392                            self_name != Some(dependency_name),
3393                            "Groups should be flattened"
3394                        );
3395
3396                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3397
3398                        // Add an edge from the dependent package to the dev package, but _not_ the
3399                        // base package.
3400                        let edge = ResolutionDependencyEdge {
3401                            from: self_name.cloned(),
3402                            from_version: self_version.clone(),
3403                            from_url: self_url.cloned(),
3404                            from_index: self_index.cloned(),
3405                            from_extra: self_extra.cloned(),
3406                            from_group: self_group.cloned(),
3407                            to: dependency_name.clone(),
3408                            to_version: dependency_version.clone(),
3409                            to_url: to_url.cloned(),
3410                            to_index: to_index.cloned(),
3411                            to_extra: None,
3412                            to_group: Some(dependency_group.clone()),
3413                            marker: *dependency_marker,
3414                        };
3415                        edges.push(edge);
3416                    }
3417
3418                    _ => {}
3419                }
3420            }
3421        }
3422
3423        let nodes = solution
3424            .into_iter()
3425            .filter_map(|(package, version)| {
3426                if let PubGrubPackageInner::Package {
3427                    name,
3428                    extra,
3429                    group,
3430                    marker: MarkerTree::TRUE,
3431                } = &*self.pubgrub.package_store[package]
3432                {
3433                    let (url, index) = self.source(name, &version);
3434                    Some((
3435                        ResolutionPackage {
3436                            name: name.clone(),
3437                            extra: extra.clone(),
3438                            dev: group.clone(),
3439                            url: url.cloned(),
3440                            index: index.cloned(),
3441                        },
3442                        version,
3443                    ))
3444                } else {
3445                    None
3446                }
3447            })
3448            .collect();
3449
3450        Resolution {
3451            nodes,
3452            edges,
3453            pins: self.pins,
3454            env: self.env,
3455        }
3456    }
3457}
3458
3459/// The resolution from a single fork including the virtual packages and the edges between them.
3460#[derive(Debug)]
3461pub(crate) struct Resolution {
3462    pub(crate) nodes: FxHashMap<ResolutionPackage, Version>,
3463    /// The directed connections between the nodes, where the marker is the node weight. We don't
3464    /// store the requirement itself, but it can be retrieved from the package metadata.
3465    pub(crate) edges: Vec<ResolutionDependencyEdge>,
3466    /// Map each package name, version tuple from `packages` to a distribution.
3467    pub(crate) pins: FilePins,
3468    /// The environment setting this resolution was found under.
3469    pub(crate) env: ResolverEnvironment,
3470}
3471
3472/// Package representation we used during resolution where each extra and also the dev-dependencies
3473/// group are their own package.
3474#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3475pub(crate) struct ResolutionPackage {
3476    pub(crate) name: PackageName,
3477    pub(crate) extra: Option<ExtraName>,
3478    pub(crate) dev: Option<GroupName>,
3479    /// For registry packages, this is `None`; otherwise, the direct URL of the distribution.
3480    pub(crate) url: Option<VerbatimParsedUrl>,
3481    /// For URL packages, this is `None`; otherwise, the index URL of the distribution.
3482    pub(crate) index: Option<IndexUrl>,
3483}
3484
3485/// The `from_` fields and the `to_` fields allow mapping to the originating and target
3486///  [`ResolutionPackage`] respectively. The `marker` is the edge weight.
3487#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3488pub(crate) struct ResolutionDependencyEdge {
3489    /// This value is `None` if the dependency comes from the root package.
3490    pub(crate) from: Option<PackageName>,
3491    pub(crate) from_version: Version,
3492    pub(crate) from_url: Option<VerbatimParsedUrl>,
3493    pub(crate) from_index: Option<IndexUrl>,
3494    pub(crate) from_extra: Option<ExtraName>,
3495    pub(crate) from_group: Option<GroupName>,
3496    pub(crate) to: PackageName,
3497    pub(crate) to_version: Version,
3498    pub(crate) to_url: Option<VerbatimParsedUrl>,
3499    pub(crate) to_index: Option<IndexUrl>,
3500    pub(crate) to_extra: Option<ExtraName>,
3501    pub(crate) to_group: Option<GroupName>,
3502    pub(crate) marker: MarkerTree,
3503}
3504
3505impl ResolutionDependencyEdge {
3506    pub(crate) fn universal_marker(&self) -> UniversalMarker {
3507        // We specifically do not account for conflict
3508        // markers here. Instead, those are computed via
3509        // a traversal on the resolution graph.
3510        UniversalMarker::new(self.marker, ConflictMarker::TRUE)
3511    }
3512}
3513
3514/// Fetch the metadata for an item
3515#[derive(Debug)]
3516#[expect(clippy::large_enum_variant)]
3517pub(crate) enum Request {
3518    /// A request to fetch the metadata for a package.
3519    Package(PackageName, Option<IndexMetadata>),
3520    /// A request to fetch the metadata for a built or source distribution.
3521    Dist(Dist),
3522    /// A request to fetch the metadata from an already-installed distribution.
3523    Installed(InstalledDist),
3524    /// A request to pre-fetch the metadata for a package and the best-guess distribution.
3525    Prefetch(PackageName, Range<Version>, PythonRequirement),
3526}
3527
3528impl<'a> From<ResolvedDistRef<'a>> for Request {
3529    fn from(dist: ResolvedDistRef<'a>) -> Self {
3530        // N.B. This is almost identical to `ResolvedDistRef::to_owned`, but
3531        // creates a `Request` instead of a `ResolvedDist`. There's probably
3532        // some room for DRYing this up a bit. The obvious way would be to
3533        // add a method to create a `Dist`, but a `Dist` cannot be represented
3534        // as an installed dist.
3535        match dist {
3536            ResolvedDistRef::InstallableRegistrySourceDist { sdist, prioritized } => {
3537                // This is okay because we're only here if the prioritized dist
3538                // has an sdist, so this always succeeds.
3539                let source = prioritized.source_dist().expect("a source distribution");
3540                assert_eq!(
3541                    (&sdist.name, &sdist.version),
3542                    (&source.name, &source.version),
3543                    "expected chosen sdist to match prioritized sdist"
3544                );
3545                Self::Dist(Dist::Source(SourceDist::Registry(source)))
3546            }
3547            ResolvedDistRef::InstallableRegistryBuiltDist {
3548                wheel, prioritized, ..
3549            } => {
3550                assert_eq!(
3551                    Some(&wheel.filename),
3552                    prioritized.best_wheel().map(|(wheel, _)| &wheel.filename),
3553                    "expected chosen wheel to match best wheel"
3554                );
3555                // This is okay because we're only here if the prioritized dist
3556                // has at least one wheel, so this always succeeds.
3557                let built = prioritized.built_dist().expect("at least one wheel");
3558                Self::Dist(Dist::Built(BuiltDist::Registry(built)))
3559            }
3560            ResolvedDistRef::Installed { dist } => Self::Installed(dist.clone()),
3561        }
3562    }
3563}
3564
3565impl Display for Request {
3566    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
3567        match self {
3568            Self::Package(package_name, _) => {
3569                write!(f, "Versions {package_name}")
3570            }
3571            Self::Dist(dist) => {
3572                write!(f, "Metadata {dist}")
3573            }
3574            Self::Installed(dist) => {
3575                write!(f, "Installed metadata {dist}")
3576            }
3577            Self::Prefetch(package_name, range, _) => {
3578                write!(f, "Prefetch {package_name} {range}")
3579            }
3580        }
3581    }
3582}
3583
3584#[derive(Debug)]
3585#[expect(clippy::large_enum_variant)]
3586enum Response {
3587    /// The returned metadata for a package hosted on a registry.
3588    Package(PackageName, Option<IndexUrl>, VersionsResponse),
3589    /// The returned metadata for a distribution.
3590    Dist {
3591        dist: Dist,
3592        metadata: MetadataResponse,
3593    },
3594    /// The returned metadata for an already-installed distribution.
3595    Installed {
3596        dist: InstalledDist,
3597        metadata: MetadataResponse,
3598    },
3599}
3600
3601/// Information about the dependencies for a particular package.
3602///
3603/// This effectively distills the dependency metadata of a package down into
3604/// its pubgrub specific constituent parts: each dependency package has a range
3605/// of possible versions.
3606enum Dependencies {
3607    /// Package dependencies are not available.
3608    Unavailable(UnavailableVersion),
3609    /// Container for all available package versions.
3610    ///
3611    /// Note that in universal mode, it is possible and allowed for multiple
3612    /// `PubGrubPackage` values in this list to have the same package name.
3613    /// These conflicts are resolved via `Dependencies::fork`.
3614    Available(Vec<PubGrubDependency>),
3615    /// Dependencies that should never result in a fork.
3616    ///
3617    /// For example, the dependencies of a `Marker` package will have the
3618    /// same name and version, but differ according to marker expressions.
3619    /// But we never want this to result in a fork.
3620    Unforkable(Vec<PubGrubDependency>),
3621}
3622
3623impl Dependencies {
3624    /// Turn this flat list of dependencies into a potential set of forked
3625    /// groups of dependencies.
3626    ///
3627    /// A fork *only* occurs when there are multiple dependencies with the same
3628    /// name *and* those dependency specifications have corresponding marker
3629    /// expressions that are completely disjoint with one another.
3630    fn fork(
3631        self,
3632        env: &ResolverEnvironment,
3633        python_requirement: &PythonRequirement,
3634        conflicts: &Conflicts,
3635    ) -> ForkedDependencies {
3636        let deps = match self {
3637            Self::Available(deps) => deps,
3638            Self::Unforkable(deps) => return ForkedDependencies::Unforked(deps),
3639            Self::Unavailable(err) => return ForkedDependencies::Unavailable(err),
3640        };
3641        let mut name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>> = BTreeMap::new();
3642        for dep in deps {
3643            let name = dep
3644                .package
3645                .name()
3646                .expect("dependency always has a name")
3647                .clone();
3648            name_to_deps.entry(name).or_default().push(dep);
3649        }
3650        let Forks {
3651            mut forks,
3652            diverging_packages,
3653        } = Forks::new(name_to_deps, env, python_requirement, conflicts);
3654        if forks.is_empty() {
3655            ForkedDependencies::Unforked(vec![])
3656        } else if forks.len() == 1 {
3657            ForkedDependencies::Unforked(forks.pop().unwrap().dependencies)
3658        } else {
3659            ForkedDependencies::Forked {
3660                forks,
3661                diverging_packages: diverging_packages.into_iter().collect(),
3662            }
3663        }
3664    }
3665}
3666
3667/// Information about the (possibly forked) dependencies for a particular
3668/// package.
3669///
3670/// This is like `Dependencies` but with an extra variant that only occurs when
3671/// a `Dependencies` list has multiple dependency specifications with the same
3672/// name and non-overlapping marker expressions (i.e., a fork occurs).
3673#[derive(Debug)]
3674enum ForkedDependencies {
3675    /// Package dependencies are not available.
3676    Unavailable(UnavailableVersion),
3677    /// No forking occurred.
3678    ///
3679    /// This is the same as `Dependencies::Available`.
3680    Unforked(Vec<PubGrubDependency>),
3681    /// Forked containers for all available package versions.
3682    ///
3683    /// Note that there is always at least two forks. If there would
3684    /// be fewer than 2 forks, then there is no fork at all and the
3685    /// `Unforked` variant is used instead.
3686    Forked {
3687        forks: Vec<Fork>,
3688        /// The package(s) with different requirements for disjoint markers.
3689        diverging_packages: Vec<PackageName>,
3690    },
3691}
3692
3693/// A list of forks determined from the dependencies of a single package.
3694///
3695/// Any time a marker expression is seen that is not true for all possible
3696/// marker environments, it is possible for it to introduce a new fork.
3697#[derive(Debug, Default)]
3698struct Forks {
3699    /// The forks discovered among the dependencies.
3700    forks: Vec<Fork>,
3701    /// The package(s) that provoked at least one additional fork.
3702    diverging_packages: BTreeSet<PackageName>,
3703}
3704
3705impl Forks {
3706    fn new(
3707        name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>>,
3708        env: &ResolverEnvironment,
3709        python_requirement: &PythonRequirement,
3710        conflicts: &Conflicts,
3711    ) -> Self {
3712        let python_marker = python_requirement.to_marker_tree();
3713
3714        let mut forks = vec![Fork::new(env.clone())];
3715        let mut diverging_packages = BTreeSet::new();
3716        for (name, mut deps) in name_to_deps {
3717            assert!(!deps.is_empty(), "every name has at least one dependency");
3718            // We never fork if there's only one dependency
3719            // specification for a given package name. This particular
3720            // strategy results in a "conservative" approach to forking
3721            // that gives up correctness in some cases in exchange for
3722            // more limited forking. More limited forking results in
3723            // simpler-and-easier-to-understand lock files and faster
3724            // resolving. The correctness we give up manifests when
3725            // two transitive non-sibling dependencies conflict. In
3726            // that case, we don't detect the fork ahead of time (at
3727            // present).
3728            if let [dep] = deps.as_slice() {
3729                // There's one exception: if the requirement increases the minimum-supported Python
3730                // version, we also fork in order to respect that minimum in the subsequent
3731                // resolution.
3732                //
3733                // For example, given `requires-python = ">=3.7"` and `uv ; python_version >= "3.8"`,
3734                // where uv itself only supports Python 3.8 and later, we need to fork to ensure
3735                // that the resolution can find a solution.
3736                if marker::requires_python(dep.package.marker())
3737                    .is_none_or(|bound| !python_requirement.raises(&bound))
3738                {
3739                    let dep = deps.pop().unwrap();
3740                    let marker = dep.package.marker();
3741                    for fork in &mut forks {
3742                        if fork.env.included_by_marker(marker) {
3743                            fork.add_dependency(dep.clone());
3744                        }
3745                    }
3746                    continue;
3747                }
3748            } else {
3749                // If all dependencies have the same markers, we should also avoid forking.
3750                if let Some(dep) = deps.first() {
3751                    let marker = dep.package.marker();
3752                    if deps.iter().all(|dep| marker == dep.package.marker()) {
3753                        // Unless that "same marker" is a Python requirement that is stricter than
3754                        // the current Python requirement. In that case, we need to fork to respect
3755                        // the stricter requirement.
3756                        if marker::requires_python(marker)
3757                            .is_none_or(|bound| !python_requirement.raises(&bound))
3758                        {
3759                            for dep in deps {
3760                                for fork in &mut forks {
3761                                    if fork.env.included_by_marker(marker) {
3762                                        fork.add_dependency(dep.clone());
3763                                    }
3764                                }
3765                            }
3766                            continue;
3767                        }
3768                    }
3769                }
3770            }
3771            for dep in deps {
3772                let mut forker = match ForkingPossibility::new(env, &dep) {
3773                    ForkingPossibility::Possible(forker) => forker,
3774                    ForkingPossibility::DependencyAlwaysExcluded => {
3775                        // If the markers can never be satisfied by the parent
3776                        // fork, then we can drop this dependency unceremoniously.
3777                        continue;
3778                    }
3779                    ForkingPossibility::NoForkingPossible => {
3780                        // Or, if the markers are always true, then we just
3781                        // add the dependency to every fork unconditionally.
3782                        for fork in &mut forks {
3783                            fork.add_dependency(dep.clone());
3784                        }
3785                        continue;
3786                    }
3787                };
3788                // Otherwise, we *should* need to add a new fork...
3789                diverging_packages.insert(name.clone());
3790
3791                let mut new = vec![];
3792                for fork in std::mem::take(&mut forks) {
3793                    let Some((remaining_forker, envs)) = forker.fork(&fork.env) else {
3794                        new.push(fork);
3795                        continue;
3796                    };
3797                    forker = remaining_forker;
3798
3799                    for fork_env in envs {
3800                        let mut new_fork = fork.clone();
3801                        new_fork.set_env(fork_env);
3802                        // We only add the dependency to this fork if it
3803                        // satisfies the fork's markers. Some forks are
3804                        // specifically created to exclude this dependency,
3805                        // so this isn't always true!
3806                        if forker.included(&new_fork.env) {
3807                            new_fork.add_dependency(dep.clone());
3808                        }
3809                        // Filter out any forks we created that are disjoint with our
3810                        // Python requirement.
3811                        if new_fork.env.included_by_marker(python_marker) {
3812                            new.push(new_fork);
3813                        }
3814                    }
3815                }
3816                forks = new;
3817            }
3818        }
3819        // When there is a conflicting group configuration, we need
3820        // to potentially add more forks. Each fork added contains an
3821        // exclusion list of conflicting groups where dependencies with
3822        // the corresponding package and extra name are forcefully
3823        // excluded from that group.
3824        //
3825        // We specifically iterate on conflicting groups and
3826        // potentially re-generate all forks for each one. We do it
3827        // this way in case there are multiple sets of conflicting
3828        // groups that impact the forks here.
3829        //
3830        // For example, if we have conflicting groups {x1, x2} and {x3,
3831        // x4}, we need to make sure the forks generated from one set
3832        // also account for the other set.
3833        for set in conflicts.iter() {
3834            let mut new = vec![];
3835            for fork in std::mem::take(&mut forks) {
3836                // Check if this conflict set is relevant to this fork. We need two conditions:
3837                //
3838                // 1. At least one item has dependencies in this fork (otherwise there's nothing to
3839                //    fork on).
3840                // 2. At least two items are not already excluded in this fork's environment
3841                //    (otherwise the conflict constraint is already satisfied and no fork is
3842                //    needed).
3843                let mut has_conflicting_dependency = false;
3844                for item in set.iter() {
3845                    if fork.contains_conflicting_item(item.as_ref()) {
3846                        has_conflicting_dependency = true;
3847                        diverging_packages.insert(item.package().clone());
3848                        break;
3849                    }
3850                }
3851                if !has_conflicting_dependency {
3852                    new.push(fork);
3853                    continue;
3854                }
3855
3856                // If fewer than two items in this conflict set are still possible (not already
3857                // excluded) in this fork, the conflict constraint is already satisfied by prior
3858                // forking. We can skip the full N+1 fork split if the single remaining non-excluded
3859                // item doesn't appear in any other conflict set (since it would never need its own
3860                // "excluded" variant).
3861                let non_excluded: Vec<_> = set
3862                    .iter()
3863                    .filter(|item| fork.env.included_by_group(item.as_ref()))
3864                    .collect();
3865                if non_excluded.len() < 2 {
3866                    // Check if any non-excluded item still has a live conflict in another set —
3867                    // i.e., another set where this item AND at least one other non-excluded item
3868                    // both appear. If so, we still need to fork to create the "excluded" variant
3869                    // for that item.
3870                    let dominated = non_excluded.iter().all(|item| {
3871                        !conflicts.iter().any(|other_set| {
3872                            !std::ptr::eq(set, other_set)
3873                                && other_set.contains(item.package(), item.kind().as_ref())
3874                                && other_set
3875                                    .iter()
3876                                    .filter(|other_item| {
3877                                        other_item.package() != item.package()
3878                                            || other_item.kind() != item.kind()
3879                                    })
3880                                    .any(|other_item| {
3881                                        fork.env.included_by_group(other_item.as_ref())
3882                                    })
3883                        })
3884                    });
3885                    if dominated {
3886                        // When dependencies are added to forks, we check `included_by_marker` but
3887                        // not on whether the dependency's conflict item is included by the fork's
3888                        // environment so there may be extraneous dependencies and we need to filter
3889                        // the fork to clean up dependencies gated on already-excluded extras.
3890                        let rules: Vec<_> = set
3891                            .iter()
3892                            .filter(|item| !fork.env.included_by_group(item.as_ref()))
3893                            .cloned()
3894                            .map(Err)
3895                            .collect();
3896                        if let Some(filtered) = fork.filter(rules) {
3897                            new.push(filtered);
3898                        }
3899                        continue;
3900                    }
3901                }
3902
3903                // Create a fork that excludes ALL conflicts.
3904                if let Some(fork_none) = fork.clone().filter(set.iter().cloned().map(Err)) {
3905                    new.push(fork_none);
3906                }
3907
3908                // Now create a fork for each conflicting group, where
3909                // that fork excludes every *other* conflicting group.
3910                //
3911                // So if we have conflicting extras foo, bar and baz,
3912                // then this creates three forks: one that excludes
3913                // {foo, bar}, one that excludes {foo, baz} and one
3914                // that excludes {bar, baz}.
3915                for (i, _) in set.iter().enumerate() {
3916                    let fork_allows_group = fork.clone().filter(
3917                        set.iter()
3918                            .cloned()
3919                            .enumerate()
3920                            .map(|(j, group)| if i == j { Ok(group) } else { Err(group) }),
3921                    );
3922                    if let Some(fork_allows_group) = fork_allows_group {
3923                        new.push(fork_allows_group);
3924                    }
3925                }
3926            }
3927            forks = new;
3928        }
3929        Self {
3930            forks,
3931            diverging_packages,
3932        }
3933    }
3934}
3935
3936/// A single fork in a list of dependencies.
3937///
3938/// A fork corresponds to the full list of dependencies for a package,
3939/// but with any conflicting dependency specifications omitted. For
3940/// example, if we have `a<2 ; sys_platform == 'foo'` and `a>=2 ;
3941/// sys_platform == 'bar'`, then because the dependency specifications
3942/// have the same name and because the marker expressions are disjoint,
3943/// a fork occurs. One fork will contain `a<2` but not `a>=2`, while
3944/// the other fork will contain `a>=2` but not `a<2`.
3945#[derive(Clone, Debug)]
3946struct Fork {
3947    /// The list of dependencies for this fork, guaranteed to be conflict
3948    /// free. (i.e., There are no two packages with the same name with
3949    /// non-overlapping marker expressions.)
3950    ///
3951    /// Note that callers shouldn't mutate this sequence directly. Instead,
3952    /// they should use `add_forked_package` or `add_nonfork_package`. Namely,
3953    /// it should be impossible for a package with a marker expression that is
3954    /// disjoint from the marker expression on this fork to be added.
3955    dependencies: Vec<PubGrubDependency>,
3956    /// The conflicting groups in this fork.
3957    ///
3958    /// This exists to make some access patterns more efficient. Namely,
3959    /// it makes it easy to check whether there's a dependency with a
3960    /// particular conflicting group in this fork.
3961    conflicts: crate::FxHashbrownSet<ConflictItem>,
3962    /// The resolver environment for this fork.
3963    ///
3964    /// Principally, this corresponds to the markers in this for. So in the
3965    /// example above, the `a<2` fork would have `sys_platform == 'foo'`, while
3966    /// the `a>=2` fork would have `sys_platform == 'bar'`.
3967    ///
3968    /// If this fork was generated from another fork, then this *includes*
3969    /// the criteria from its parent. i.e., Its marker expression represents
3970    /// the intersection of the marker expression from its parent and any
3971    /// additional marker expression generated by addition forking based on
3972    /// conflicting dependency specifications.
3973    env: ResolverEnvironment,
3974}
3975
3976impl Fork {
3977    /// Create a new fork with no dependencies with the given resolver
3978    /// environment.
3979    fn new(env: ResolverEnvironment) -> Self {
3980        Self {
3981            dependencies: vec![],
3982            conflicts: crate::FxHashbrownSet::default(),
3983            env,
3984        }
3985    }
3986
3987    /// Add a dependency to this fork.
3988    fn add_dependency(&mut self, dep: PubGrubDependency) {
3989        if let Some(conflicting_item) = dep.conflicting_item() {
3990            self.conflicts.insert(conflicting_item.to_owned());
3991        }
3992        self.dependencies.push(dep);
3993    }
3994
3995    /// Sets the resolver environment to the one given.
3996    ///
3997    /// Any dependency in this fork that does not satisfy the given environment
3998    /// is removed.
3999    fn set_env(&mut self, env: ResolverEnvironment) {
4000        self.env = env;
4001        self.dependencies.retain(|dep| {
4002            let marker = dep.package.marker();
4003            if self.env.included_by_marker(marker) {
4004                return true;
4005            }
4006            if let Some(conflicting_item) = dep.conflicting_item() {
4007                self.conflicts.remove(&conflicting_item);
4008            }
4009            false
4010        });
4011    }
4012
4013    /// Returns true if any of the dependencies in this fork contain a
4014    /// dependency with the given package and extra values.
4015    fn contains_conflicting_item(&self, item: ConflictItemRef<'_>) -> bool {
4016        self.conflicts.contains(&item)
4017    }
4018
4019    /// Include or Exclude the given groups from this fork.
4020    ///
4021    /// This removes all dependencies matching the given conflicting groups.
4022    ///
4023    /// If the exclusion rules would result in a fork with an unsatisfiable
4024    /// resolver environment, then this returns `None`.
4025    fn filter(
4026        mut self,
4027        rules: impl IntoIterator<Item = Result<ConflictItem, ConflictItem>>,
4028    ) -> Option<Self> {
4029        self.env = self.env.filter_by_group(rules)?;
4030        self.dependencies.retain(|dep| {
4031            let Some(conflicting_item) = dep.conflicting_item() else {
4032                return true;
4033            };
4034            if self.env.included_by_group(conflicting_item) {
4035                return true;
4036            }
4037            match conflicting_item.kind() {
4038                // We should not filter entire projects unless they're a top-level dependency
4039                // Otherwise, we'll fail to solve for children of the project, like extras
4040                ConflictKindRef::Project => {
4041                    if dep.parent.is_some() {
4042                        return true;
4043                    }
4044                }
4045                ConflictKindRef::Group(_) => {}
4046                ConflictKindRef::Extra(_) => {}
4047            }
4048            self.conflicts.remove(&conflicting_item);
4049            false
4050        });
4051        Some(self)
4052    }
4053
4054    /// Compare forks, preferring forks with g `requires-python` requirements.
4055    fn cmp_requires_python(&self, other: &Self) -> Ordering {
4056        // A higher `requires-python` requirement indicates a _higher-priority_ fork.
4057        //
4058        // This ordering ensures that we prefer choosing the highest version for each fork based on
4059        // its `requires-python` requirement.
4060        //
4061        // The reverse would prefer choosing fewer versions, at the cost of using older package
4062        // versions on newer Python versions. For example, if reversed, we'd prefer to solve `<3.7
4063        // before solving `>=3.7`, since the resolution produced by the former might work for the
4064        // latter, but the inverse is unlikely to be true.
4065        let self_bound = self.env.requires_python().unwrap_or_default();
4066        let other_bound = other.env.requires_python().unwrap_or_default();
4067        self_bound.lower().cmp(other_bound.lower())
4068    }
4069
4070    /// Compare forks, preferring forks with upper bounds.
4071    fn cmp_upper_bounds(&self, other: &Self) -> Ordering {
4072        // We'd prefer to solve `numpy <= 2` before solving `numpy >= 1`, since the resolution
4073        // produced by the former might work for the latter, but the inverse is unlikely to be true
4074        // due to maximum version selection. (Selecting `numpy==2.0.0` would satisfy both forks, but
4075        // selecting the latest `numpy` would not.)
4076        let self_upper_bounds = self
4077            .dependencies
4078            .iter()
4079            .filter(|dep| {
4080                dep.version
4081                    .bounding_range()
4082                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4083            })
4084            .count();
4085        let other_upper_bounds = other
4086            .dependencies
4087            .iter()
4088            .filter(|dep| {
4089                dep.version
4090                    .bounding_range()
4091                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4092            })
4093            .count();
4094
4095        self_upper_bounds.cmp(&other_upper_bounds)
4096    }
4097}
4098
4099impl Eq for Fork {}
4100
4101impl PartialEq for Fork {
4102    fn eq(&self, other: &Self) -> bool {
4103        self.dependencies == other.dependencies && self.env == other.env
4104    }
4105}
4106
4107#[derive(Debug, Clone)]
4108pub(crate) struct VersionFork {
4109    /// The environment to use in the fork.
4110    env: ResolverEnvironment,
4111    /// The initial package to select in the fork.
4112    id: Id<PubGrubPackage>,
4113    /// The initial version to set for the selected package in the fork.
4114    version: Option<Version>,
4115}
4116
4117/// Enrich a [`ResolveError`] with additional information about why a given package was included.
4118fn enrich_dependency_error(
4119    error: ResolveError,
4120    id: Id<PubGrubPackage>,
4121    version: &Version,
4122    pubgrub: &State<UvDependencyProvider>,
4123) -> ResolveError {
4124    let Some(name) = pubgrub.package_store[id].name_no_root() else {
4125        return error;
4126    };
4127    let chain = DerivationChainBuilder::from_state(id, version, pubgrub).unwrap_or_default();
4128    ResolveError::Dependencies(Box::new(error), name.clone(), version.clone(), chain)
4129}
4130
4131/// Compute the set of markers for which a package is known to be relevant.
4132fn find_environments(id: Id<PubGrubPackage>, state: &State<UvDependencyProvider>) -> MarkerTree {
4133    let package = &state.package_store[id];
4134    if package.is_root() {
4135        return MarkerTree::TRUE;
4136    }
4137
4138    // Retrieve the incompatibilities for the current package.
4139    let Some(incompatibilities) = state.incompatibilities.get(&id) else {
4140        return MarkerTree::FALSE;
4141    };
4142
4143    // Find all dependencies on the current package.
4144    let mut marker = MarkerTree::FALSE;
4145    for index in incompatibilities {
4146        let incompat = &state.incompatibility_store[*index];
4147        if let Kind::FromDependencyOf(id1, _, id2, _) = &incompat.kind {
4148            if id == *id2 {
4149                marker.or({
4150                    let mut marker = package.marker();
4151                    marker.and(find_environments(*id1, state));
4152                    marker
4153                });
4154            }
4155        }
4156    }
4157    marker
4158}
4159
4160#[derive(Debug, Default, Clone)]
4161struct ConflictTracker {
4162    /// How often a decision on the package was discarded due to another package decided earlier.
4163    affected: FxHashMap<Id<PubGrubPackage>, usize>,
4164    /// Package(s) to be prioritized after the next unit propagation
4165    ///
4166    /// Distilled from `affected` for fast checking in the hot loop.
4167    prioritize: Vec<Id<PubGrubPackage>>,
4168    /// How often a package was decided earlier and caused another package to be discarded.
4169    culprit: FxHashMap<Id<PubGrubPackage>, usize>,
4170    /// Package(s) to be de-prioritized after the next unit propagation
4171    ///
4172    /// Distilled from `culprit` for fast checking in the hot loop.
4173    deprioritize: Vec<Id<PubGrubPackage>>,
4174}