Skip to main content

uv_resolver/resolver/
mod.rs

1//! Given a set of requirements, find a set of compatible packages.
2
3use std::borrow::Cow;
4use std::cmp::Ordering;
5use std::collections::{BTreeMap, BTreeSet, VecDeque};
6use std::fmt::{Display, Formatter, Write};
7use std::ops::Bound;
8use std::sync::Arc;
9use std::time::Instant;
10use std::{iter, slice, thread};
11
12use dashmap::DashMap;
13use either::Either;
14use futures::{FutureExt, StreamExt};
15use itertools::Itertools;
16use pubgrub::{Id, IncompId, Incompatibility, Kind, Range, Ranges, State};
17use rustc_hash::{FxHashMap, FxHashSet};
18use tokio::sync::mpsc::{self, Receiver, Sender};
19use tokio::sync::oneshot;
20use tokio_stream::wrappers::ReceiverStream;
21use tracing::{Level, debug, info, instrument, trace, warn};
22
23use uv_configuration::{Constraints, Excludes, Overrides};
24use uv_distribution::{ArchiveMetadata, DistributionDatabase};
25use uv_distribution_types::{
26    BuiltDist, CompatibleDist, DerivationChain, Dist, DistErrorKind, Identifier, IncompatibleDist,
27    IncompatibleSource, IncompatibleWheel, IndexCapabilities, IndexLocations, IndexMetadata,
28    IndexUrl, InstalledDist, Name, PythonRequirementKind, RemoteSource, Requirement, ResolvedDist,
29    ResolvedDistRef, SourceDist, VersionOrUrlRef, implied_markers,
30};
31use uv_git::GitResolver;
32use uv_normalize::{ExtraName, GroupName, PackageName};
33use uv_pep440::{MIN_VERSION, Version, VersionSpecifiers, release_specifiers_to_ranges};
34use uv_pep508::{
35    MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree, MarkerValueString,
36};
37use uv_platform_tags::{IncompatibleTag, Tags};
38use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKindRef, Conflicts, VerbatimParsedUrl};
39use uv_torch::TorchStrategy;
40use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
41use uv_warnings::warn_user_once;
42
43use crate::candidate_selector::{Candidate, CandidateDist, CandidateSelector};
44use crate::dependency_provider::UvDependencyProvider;
45use crate::error::{NoSolutionError, ResolveError};
46use crate::fork_indexes::ForkIndexes;
47use crate::fork_strategy::ForkStrategy;
48use crate::fork_urls::ForkUrls;
49use crate::manifest::Manifest;
50use crate::pins::FilePins;
51use crate::preferences::{PreferenceSource, Preferences};
52use crate::pubgrub::{
53    DependencySource, PubGrubDependency, PubGrubPackage, PubGrubPackageInner, PubGrubPriorities,
54    PubGrubPython,
55};
56use crate::python_requirement::PythonRequirement;
57use crate::resolution::ResolverOutput;
58use crate::resolution_mode::ResolutionStrategy;
59pub(crate) use crate::resolver::availability::{
60    ResolverVersion, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
61    UnavailableVersion,
62};
63use crate::resolver::batch_prefetch::BatchPrefetcher;
64pub use crate::resolver::derivation::DerivationChainBuilder;
65pub use crate::resolver::environment::ResolverEnvironment;
66use crate::resolver::environment::{
67    ForkingPossibility, fork_version_by_marker, fork_version_by_python_requirement,
68};
69pub(crate) use crate::resolver::fork_map::{ForkMap, ForkSet};
70pub use crate::resolver::index::InMemoryIndex;
71use crate::resolver::indexes::Indexes;
72pub use crate::resolver::provider::{
73    DefaultResolverProvider, MetadataResponse, PackageVersionsResult, ResolverProvider,
74    VersionsResponse, WheelMetadataResult,
75};
76pub use crate::resolver::reporter::{BuildId, Reporter};
77use crate::resolver::system::SystemDependency;
78pub(crate) use crate::resolver::urls::Urls;
79use crate::universal_marker::{ConflictMarker, UniversalMarker};
80use crate::yanks::AllowedYanks;
81use crate::{
82    DependencyMode, ExcludeNewer, Exclusions, FlatIndex, Options, ResolutionMode, VersionMap,
83    marker,
84};
85pub(crate) use provider::MetadataUnavailable;
86
87mod availability;
88mod batch_prefetch;
89mod derivation;
90mod environment;
91mod fork_map;
92mod index;
93mod indexes;
94mod provider;
95mod reporter;
96mod system;
97mod urls;
98
99/// The number of conflicts a package may accumulate before we re-prioritize and backtrack.
100const CONFLICT_THRESHOLD: usize = 5;
101
102pub struct Resolver<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
103    state: ResolverState<InstalledPackages>,
104    provider: Provider,
105}
106
107/// State that is shared between the prefetcher and the PubGrub solver during
108/// resolution, across all forks.
109struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
110    project: Option<PackageName>,
111    requirements: Vec<Requirement>,
112    constraints: Constraints,
113    overrides: Overrides,
114    excludes: Excludes,
115    preferences: Preferences,
116    git: GitResolver,
117    capabilities: IndexCapabilities,
118    locations: IndexLocations,
119    exclusions: Exclusions,
120    urls: Urls,
121    indexes: Indexes,
122    dependency_mode: DependencyMode,
123    hasher: HashStrategy,
124    env: ResolverEnvironment,
125    // The environment of the current Python interpreter.
126    current_environment: MarkerEnvironment,
127    tags: Option<Tags>,
128    python_requirement: PythonRequirement,
129    conflicts: Conflicts,
130    workspace_members: BTreeSet<PackageName>,
131    selector: CandidateSelector,
132    index: InMemoryIndex,
133    installed_packages: InstalledPackages,
134    /// Incompatibilities for packages that are entirely unavailable.
135    unavailable_packages: DashMap<PackageName, UnavailablePackage>,
136    /// Incompatibilities for packages that are unavailable at specific versions.
137    incomplete_packages: DashMap<PackageName, DashMap<Version, MetadataUnavailable>>,
138    /// The options that were used to configure this resolver.
139    options: Options,
140    /// The reporter to use for this resolver.
141    reporter: Option<Arc<dyn Reporter>>,
142}
143
144impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
145    Resolver<DefaultResolverProvider<'a, Context>, InstalledPackages>
146{
147    /// Initialize a new resolver using the default backend doing real requests.
148    ///
149    /// Reads the flat index entries.
150    ///
151    /// # Marker environment
152    ///
153    /// The marker environment is optional.
154    ///
155    /// When a marker environment is not provided, the resolver is said to be
156    /// in "universal" mode. When in universal mode, the resolution produced
157    /// may contain multiple versions of the same package. And thus, in order
158    /// to use the resulting resolution, there must be a "universal"-aware
159    /// reader of the resolution that knows to exclude distributions that can't
160    /// be used in the current environment.
161    ///
162    /// When a marker environment is provided, the resolver is in
163    /// "non-universal" mode, which corresponds to standard `pip` behavior that
164    /// works only for a specific marker environment.
165    pub fn new(
166        manifest: Manifest,
167        options: Options,
168        python_requirement: &'a PythonRequirement,
169        env: ResolverEnvironment,
170        current_environment: &MarkerEnvironment,
171        conflicts: Conflicts,
172        tags: Option<&'a Tags>,
173        flat_index: &'a FlatIndex,
174        index: &'a InMemoryIndex,
175        hasher: &'a HashStrategy,
176        build_context: &'a Context,
177        installed_packages: InstalledPackages,
178        database: DistributionDatabase<'a, Context>,
179    ) -> Result<Self, ResolveError> {
180        let provider = DefaultResolverProvider::new(
181            database,
182            flat_index,
183            tags,
184            python_requirement.target(),
185            AllowedYanks::from_manifest(&manifest, &env, options.dependency_mode),
186            hasher,
187            options.exclude_newer.clone(),
188            build_context.build_options(),
189            build_context.capabilities(),
190        );
191
192        Self::new_custom_io(
193            manifest,
194            options,
195            hasher,
196            env,
197            current_environment,
198            tags.cloned(),
199            python_requirement,
200            conflicts,
201            index,
202            build_context.git(),
203            build_context.capabilities(),
204            build_context.locations(),
205            provider,
206            installed_packages,
207        )
208    }
209}
210
211impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
212    Resolver<Provider, InstalledPackages>
213{
214    /// Initialize a new resolver using a user provided backend.
215    pub fn new_custom_io(
216        manifest: Manifest,
217        options: Options,
218        hasher: &HashStrategy,
219        env: ResolverEnvironment,
220        current_environment: &MarkerEnvironment,
221        tags: Option<Tags>,
222        python_requirement: &PythonRequirement,
223        conflicts: Conflicts,
224        index: &InMemoryIndex,
225        git: &GitResolver,
226        capabilities: &IndexCapabilities,
227        locations: &IndexLocations,
228        provider: Provider,
229        installed_packages: InstalledPackages,
230    ) -> Result<Self, ResolveError> {
231        let state = ResolverState {
232            index: index.clone(),
233            git: git.clone(),
234            capabilities: capabilities.clone(),
235            selector: CandidateSelector::for_resolution(&options, &manifest, &env),
236            dependency_mode: options.dependency_mode,
237            urls: Urls::from_manifest(&manifest, &env, git, options.dependency_mode),
238            indexes: Indexes::from_manifest(&manifest, &env, options.dependency_mode),
239            project: manifest.project,
240            workspace_members: manifest.workspace_members,
241            requirements: manifest.requirements,
242            constraints: manifest.constraints,
243            overrides: manifest.overrides,
244            excludes: manifest.excludes,
245            preferences: manifest.preferences,
246            exclusions: manifest.exclusions,
247            hasher: hasher.clone(),
248            locations: locations.clone(),
249            env,
250            current_environment: current_environment.clone(),
251            tags,
252            python_requirement: python_requirement.clone(),
253            conflicts,
254            installed_packages,
255            unavailable_packages: DashMap::default(),
256            incomplete_packages: DashMap::default(),
257            options,
258            reporter: None,
259        };
260        Ok(Self { state, provider })
261    }
262
263    /// Set the [`Reporter`] to use for this installer.
264    #[must_use]
265    pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
266        Self {
267            state: ResolverState {
268                reporter: Some(reporter.clone()),
269                ..self.state
270            },
271            provider: self
272                .provider
273                .with_reporter(reporter.into_distribution_reporter()),
274        }
275    }
276
277    /// Resolve a set of requirements into a set of pinned versions.
278    pub async fn resolve(self) -> Result<ResolverOutput, ResolveError> {
279        let state = Arc::new(self.state);
280        let provider = Arc::new(self.provider);
281
282        // A channel to fetch package metadata (e.g., given `flask`, fetch all versions) and version
283        // metadata (e.g., given `flask==1.0.0`, fetch the metadata for that version).
284        // Channel size is set large to accommodate batch prefetching.
285        let (request_sink, request_stream) = mpsc::channel(300);
286
287        // Run the fetcher.
288        let requests_fut = state.clone().fetch(provider.clone(), request_stream).fuse();
289
290        // Spawn the PubGrub solver on a dedicated thread.
291        let solver = state.clone();
292        let (tx, rx) = oneshot::channel();
293        thread::Builder::new()
294            .name("uv-resolver".into())
295            .spawn(move || {
296                let result = solver.solve(&request_sink);
297
298                // This may fail if the main thread returned early due to an error.
299                let _ = tx.send(result);
300            })
301            .unwrap();
302
303        let resolve_fut = async move { rx.await.map_err(|_| ResolveError::ChannelClosed) };
304
305        // Wait for both to complete.
306        let ((), resolution) = tokio::try_join!(requests_fut, resolve_fut)?;
307
308        state.on_complete();
309        resolution
310    }
311}
312
313impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackages> {
314    #[instrument(skip_all)]
315    fn solve(
316        self: Arc<Self>,
317        request_sink: &Sender<Request>,
318    ) -> Result<ResolverOutput, ResolveError> {
319        debug!(
320            "Solving with installed Python version: {}",
321            self.python_requirement.exact()
322        );
323        debug!(
324            "Solving with target Python version: {}",
325            self.python_requirement.target()
326        );
327        if !self.options.exclude_newer.is_empty() {
328            debug!("Solving with exclude-newer: {}", self.options.exclude_newer);
329        }
330
331        let mut visited = FxHashSet::default();
332
333        let root = PubGrubPackage::from(PubGrubPackageInner::Root(self.project.clone()));
334        let pubgrub = State::init(root.clone(), MIN_VERSION.clone());
335        let prefetcher = BatchPrefetcher::new(
336            self.capabilities.clone(),
337            self.index.clone(),
338            request_sink.clone(),
339        );
340        let state = ForkState::new(
341            pubgrub,
342            self.env.clone(),
343            self.python_requirement.clone(),
344            prefetcher,
345        );
346        let mut preferences = self.preferences.clone();
347        let mut forked_states = self.env.initial_forked_states(state)?;
348        let mut resolutions = vec![];
349
350        'FORK: while let Some(mut state) = forked_states.pop() {
351            if let Some(split) = state.env.end_user_fork_display() {
352                let requires_python = state.python_requirement.target();
353                debug!("Solving {split} (requires-python: {requires_python:?})");
354            }
355            let start = Instant::now();
356            loop {
357                let highest_priority_pkg =
358                    if let Some(initial) = state.initial_id.take() {
359                        // If we just forked based on `requires-python`, we can skip unit
360                        // propagation, since we already propagated the package that initiated
361                        // the fork.
362                        initial
363                    } else {
364                        // Run unit propagation.
365                        let result = state.pubgrub.unit_propagation(state.next);
366                        match result {
367                            Err(err) => {
368                                // If unit propagation failed, there is no solution.
369                                return Err(self.convert_no_solution_err(
370                                    err,
371                                    state.fork_urls,
372                                    state.fork_indexes,
373                                    state.env,
374                                    self.current_environment.clone(),
375                                    Some(&self.options.exclude_newer),
376                                    &visited,
377                                ));
378                            }
379                            Ok(conflicts) => {
380                                for (affected, incompatibility) in conflicts {
381                                    // Conflict tracking: If there was a conflict, track affected and
382                                    // culprit for all root cause incompatibilities
383                                    state.record_conflict(affected, None, incompatibility);
384                                }
385                            }
386                        }
387
388                        // Pre-visit all candidate packages, to allow metadata to be fetched in parallel.
389                        if self.dependency_mode.is_transitive() {
390                            Self::pre_visit(
391                                state
392                                    .pubgrub
393                                    .partial_solution
394                                    .prioritized_packages()
395                                    .map(|(id, range)| (&state.pubgrub.package_store[id], range)),
396                                &self.urls,
397                                &self.indexes,
398                                &state.python_requirement,
399                                request_sink,
400                            )?;
401                        }
402
403                        Self::reprioritize_conflicts(&mut state);
404
405                        trace!(
406                            "Assigned packages: {}",
407                            state
408                                .pubgrub
409                                .partial_solution
410                                .extract_solution()
411                                .filter(|(p, _)| !state.pubgrub.package_store[*p].is_proxy())
412                                .map(|(p, v)| format!("{}=={}", state.pubgrub.package_store[p], v))
413                                .join(", ")
414                        );
415                        // Choose a package.
416                        // We aren't allowed to use the term intersection as it would extend the
417                        // mutable borrow of `state`.
418                        let Some((highest_priority_pkg, _)) =
419                            state.pubgrub.partial_solution.pick_highest_priority_pkg(
420                                |id, _range| state.priorities.get(&state.pubgrub.package_store[id]),
421                            )
422                        else {
423                            // All packages have been assigned, the fork has been successfully resolved
424                            if tracing::enabled!(Level::DEBUG) {
425                                state.prefetcher.log_tried_versions();
426                            }
427                            debug!(
428                                "{} resolution took {:.3}s",
429                                state.env,
430                                start.elapsed().as_secs_f32()
431                            );
432
433                            let resolution = state.into_resolution();
434
435                            // Walk over the selected versions, and mark them as preferences. We have to
436                            // add forks back as to not override the preferences from the lockfile for
437                            // the next fork
438                            //
439                            // If we're using a resolution mode that varies based on whether a dependency is
440                            // direct or transitive, skip preferences, as we risk adding a preference from
441                            // one fork (in which it's a transitive dependency) to another fork (in which
442                            // it's direct).
443                            if matches!(
444                                self.options.resolution_mode,
445                                ResolutionMode::Lowest | ResolutionMode::Highest
446                            ) {
447                                for (package, version) in &resolution.nodes {
448                                    preferences.insert(
449                                        package.name.clone(),
450                                        package.index.clone(),
451                                        resolution
452                                            .env
453                                            .try_universal_markers()
454                                            .unwrap_or(UniversalMarker::TRUE),
455                                        version.clone(),
456                                        PreferenceSource::Resolver,
457                                    );
458                                }
459                            }
460
461                            resolutions.push(resolution);
462                            continue 'FORK;
463                        };
464                        trace!(
465                            "Chose package for decision: {}. remaining choices: {}",
466                            state.pubgrub.package_store[highest_priority_pkg],
467                            state
468                                .pubgrub
469                                .partial_solution
470                                .undecided_packages()
471                                .filter(|(p, _)| !state.pubgrub.package_store[**p].is_proxy())
472                                .map(|(p, _)| state.pubgrub.package_store[*p].to_string())
473                                .join(", ")
474                        );
475
476                        highest_priority_pkg
477                    };
478
479                state.next = highest_priority_pkg;
480
481                // TODO(charlie): Remove as many usages of `next_package` as we can.
482                let next_id = state.next;
483                let next_package = &state.pubgrub.package_store[state.next];
484
485                let url = next_package
486                    .name()
487                    .and_then(|name| state.fork_urls.get(name));
488                let index = next_package
489                    .name()
490                    .and_then(|name| state.fork_indexes.get(name));
491
492                // Consider:
493                // ```toml
494                // dependencies = [
495                //   "iniconfig == 1.1.1 ; python_version < '3.12'",
496                //   "iniconfig @ https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl ; python_version >= '3.12'",
497                // ]
498                // ```
499                // In the `python_version < '3.12'` case, we haven't pre-visited `iniconfig` yet,
500                // since we weren't sure whether it might also be a URL requirement when
501                // transforming the requirements. For that case, we do another request here
502                // (idempotent due to caching).
503                self.request_package(next_package, url, index, request_sink)?;
504
505                let version = if let Some(version) = state.initial_version.take() {
506                    // If we just forked based on platform support, we can skip version selection,
507                    // since the fork operation itself already selected the appropriate version for
508                    // the platform.
509                    version
510                } else {
511                    let term_intersection = state
512                        .pubgrub
513                        .partial_solution
514                        .term_intersection_for_package(next_id)
515                        .expect("a package was chosen but we don't have a term");
516                    let decision = self.choose_version(
517                        next_package,
518                        next_id,
519                        index.map(IndexMetadata::url),
520                        term_intersection.unwrap_positive(),
521                        &mut state.pins,
522                        &preferences,
523                        &state.fork_urls,
524                        &state.env,
525                        &state.python_requirement,
526                        &state.pubgrub,
527                        &mut visited,
528                        request_sink,
529                    )?;
530
531                    // Pick the next compatible version.
532                    let Some(version) = decision else {
533                        debug!("No compatible version found for: {next_package}");
534
535                        let term_intersection = state
536                            .pubgrub
537                            .partial_solution
538                            .term_intersection_for_package(next_id)
539                            .expect("a package was chosen but we don't have a term");
540
541                        if let PubGrubPackageInner::Package { name, .. } = &**next_package {
542                            // Check if the decision was due to the package being unavailable
543                            if let Some(entry) = self.unavailable_packages.get(name) {
544                                state
545                                    .pubgrub
546                                    .add_incompatibility(Incompatibility::custom_term(
547                                        next_id,
548                                        term_intersection.clone(),
549                                        UnavailableReason::Package(entry.clone()),
550                                    ));
551                                continue;
552                            }
553                        }
554
555                        state
556                            .pubgrub
557                            .add_incompatibility(Incompatibility::no_versions(
558                                next_id,
559                                term_intersection.clone(),
560                            ));
561                        continue;
562                    };
563
564                    let version = match version {
565                        ResolverVersion::Unforked(version) => version,
566                        ResolverVersion::Forked(forks) => {
567                            forked_states.extend(self.version_forks_to_fork_states(state, forks));
568                            continue 'FORK;
569                        }
570                        ResolverVersion::Unavailable(version, reason) => {
571                            state.add_unavailable_version(version, reason);
572                            continue;
573                        }
574                    };
575
576                    // Only consider registry packages for prefetch.
577                    if url.is_none() {
578                        state.prefetcher.prefetch_batches(
579                            next_package,
580                            index,
581                            &version,
582                            term_intersection.unwrap_positive(),
583                            state
584                                .pubgrub
585                                .partial_solution
586                                .unchanging_term_for_package(next_id),
587                            &state.python_requirement,
588                            &self.selector,
589                            &state.env,
590                        )?;
591                    }
592
593                    version
594                };
595
596                state.prefetcher.version_tried(next_package, &version);
597
598                self.on_progress(next_package, &version);
599
600                if !state
601                    .added_dependencies
602                    .entry(next_id)
603                    .or_default()
604                    .insert(version.clone())
605                {
606                    // `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
607                    // terms and can add the decision directly.
608                    state
609                        .pubgrub
610                        .partial_solution
611                        .add_decision(next_id, version);
612                    continue;
613                }
614
615                // Retrieve that package dependencies.
616                let forked_deps = self.get_dependencies_forking(
617                    next_id,
618                    next_package,
619                    &version,
620                    &state.pins,
621                    &state.fork_urls,
622                    &state.env,
623                    &state.python_requirement,
624                    &state.pubgrub,
625                )?;
626
627                match forked_deps {
628                    ForkedDependencies::Unavailable(reason) => {
629                        // Then here, if we get a reason that we consider unrecoverable, we should
630                        // show the derivation chain.
631                        state
632                            .pubgrub
633                            .add_incompatibility(Incompatibility::custom_version(
634                                next_id,
635                                version.clone(),
636                                UnavailableReason::Version(reason),
637                            ));
638                    }
639                    ForkedDependencies::Unforked(dependencies) => {
640                        // Enrich the state with any URLs, etc.
641                        state
642                            .visit_package_version_dependencies(
643                                next_id,
644                                &version,
645                                &self.urls,
646                                &self.indexes,
647                                &dependencies,
648                                &self.git,
649                                &self.workspace_members,
650                                self.selector.resolution_strategy(),
651                            )
652                            .map_err(|err| {
653                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
654                            })?;
655
656                        // Emit a request to fetch the metadata for each registry package.
657                        self.visit_dependencies(&dependencies, &state, request_sink)
658                            .map_err(|err| {
659                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
660                            })?;
661
662                        // Add the dependencies to the state.
663                        state.add_package_version_dependencies(next_id, &version, dependencies);
664                    }
665                    ForkedDependencies::Forked {
666                        mut forks,
667                        diverging_packages,
668                    } => {
669                        debug!(
670                            "Pre-fork {} took {:.3}s",
671                            state.env,
672                            start.elapsed().as_secs_f32()
673                        );
674
675                        // Prioritize the forks.
676                        match (self.options.fork_strategy, self.options.resolution_mode) {
677                            (ForkStrategy::Fewest, _) | (_, ResolutionMode::Lowest) => {
678                                // Prefer solving forks with lower Python bounds, since they're more
679                                // likely to produce solutions that work for forks with higher
680                                // Python bounds (whereas the inverse is not true).
681                                forks.sort_by(|a, b| {
682                                    a.cmp_requires_python(b)
683                                        .reverse()
684                                        .then_with(|| a.cmp_upper_bounds(b))
685                                });
686                            }
687                            (ForkStrategy::RequiresPython, _) => {
688                                // Otherwise, prefer solving forks with higher Python bounds, since
689                                // we want to prioritize choosing the latest-compatible package
690                                // version for each Python version.
691                                forks.sort_by(|a, b| {
692                                    a.cmp_requires_python(b).then_with(|| a.cmp_upper_bounds(b))
693                                });
694                            }
695                        }
696
697                        for new_fork_state in self.forks_to_fork_states(
698                            state,
699                            &version,
700                            forks,
701                            request_sink,
702                            &diverging_packages,
703                        ) {
704                            forked_states.push(new_fork_state?);
705                        }
706                        continue 'FORK;
707                    }
708                }
709            }
710        }
711        if resolutions.len() > 1 {
712            info!(
713                "Solved your requirements for {} environments",
714                resolutions.len()
715            );
716        }
717        if tracing::enabled!(Level::DEBUG) {
718            for resolution in &resolutions {
719                if let Some(env) = resolution.env.end_user_fork_display() {
720                    let packages: FxHashSet<_> = resolution
721                        .nodes
722                        .keys()
723                        .map(|package| &package.name)
724                        .collect();
725                    debug!(
726                        "Distinct solution for {env} with {} package(s)",
727                        packages.len()
728                    );
729                }
730            }
731        }
732        for resolution in &resolutions {
733            Self::trace_resolution(resolution);
734        }
735        ResolverOutput::from_state(
736            &resolutions,
737            &self.requirements,
738            &self.constraints,
739            &self.overrides,
740            &self.preferences,
741            &self.index,
742            &self.git,
743            &self.python_requirement,
744            &self.conflicts,
745            self.selector.resolution_strategy(),
746            self.options.clone(),
747        )
748    }
749
750    /// Change the priority of often conflicting packages and backtrack.
751    ///
752    /// To be called after unit propagation.
753    fn reprioritize_conflicts(state: &mut ForkState) {
754        for package in state.conflict_tracker.prioritize.drain(..) {
755            let changed = state
756                .priorities
757                .mark_conflict_early(&state.pubgrub.package_store[package]);
758            if changed {
759                debug!(
760                    "Package {} has too many conflicts (affected), prioritizing",
761                    &state.pubgrub.package_store[package]
762                );
763            } else {
764                debug!(
765                    "Package {} has too many conflicts (affected), already {:?}",
766                    state.pubgrub.package_store[package],
767                    state.priorities.get(&state.pubgrub.package_store[package])
768                );
769            }
770        }
771
772        for package in state.conflict_tracker.deprioritize.drain(..) {
773            let changed = state
774                .priorities
775                .mark_conflict_late(&state.pubgrub.package_store[package]);
776            if changed {
777                debug!(
778                    "Package {} has too many conflicts (culprit), deprioritizing and backtracking",
779                    state.pubgrub.package_store[package],
780                );
781                let backtrack_level = state.pubgrub.backtrack_package(package);
782                if let Some(backtrack_level) = backtrack_level {
783                    debug!("Backtracked {backtrack_level} decisions");
784                } else {
785                    debug!(
786                        "Package {} is not decided, cannot backtrack",
787                        state.pubgrub.package_store[package]
788                    );
789                }
790            } else {
791                debug!(
792                    "Package {} has too many conflicts (culprit), already {:?}",
793                    state.pubgrub.package_store[package],
794                    state.priorities.get(&state.pubgrub.package_store[package])
795                );
796            }
797        }
798    }
799
800    /// When trace level logging is enabled, we dump the final
801    /// set of resolutions, including markers, to help with
802    /// debugging. Namely, this tells use precisely the state
803    /// emitted by the resolver before going off to construct a
804    /// resolution graph.
805    fn trace_resolution(combined: &Resolution) {
806        if !tracing::enabled!(Level::TRACE) {
807            return;
808        }
809        trace!("Resolution: {:?}", combined.env);
810        for edge in &combined.edges {
811            trace!(
812                "Resolution edge: {} -> {}",
813                edge.from
814                    .as_ref()
815                    .map(PackageName::as_str)
816                    .unwrap_or("ROOT"),
817                edge.to,
818            );
819            // The unwraps below are OK because `write`ing to
820            // a String can never fail (except for OOM).
821            let mut msg = String::new();
822            write!(msg, "{}", edge.from_version).unwrap();
823            if let Some(ref extra) = edge.from_extra {
824                write!(msg, " (extra: {extra})").unwrap();
825            }
826            if let Some(ref dev) = edge.from_group {
827                write!(msg, " (group: {dev})").unwrap();
828            }
829
830            write!(msg, " -> ").unwrap();
831
832            write!(msg, "{}", edge.to_version).unwrap();
833            if let Some(ref extra) = edge.to_extra {
834                write!(msg, " (extra: {extra})").unwrap();
835            }
836            if let Some(ref dev) = edge.to_group {
837                write!(msg, " (group: {dev})").unwrap();
838            }
839            if let Some(marker) = edge.marker.contents() {
840                write!(msg, " ; {marker}").unwrap();
841            }
842            trace!("Resolution edge:     {msg}");
843        }
844    }
845
846    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
847    fn forks_to_fork_states<'a>(
848        &'a self,
849        current_state: ForkState,
850        version: &'a Version,
851        forks: Vec<Fork>,
852        request_sink: &'a Sender<Request>,
853        diverging_packages: &'a [PackageName],
854    ) -> impl Iterator<Item = Result<ForkState, ResolveError>> + 'a {
855        debug!(
856            "Splitting resolution on {}=={} over {} into {} resolution{} with separate markers",
857            current_state.pubgrub.package_store[current_state.next],
858            version,
859            diverging_packages
860                .iter()
861                .map(ToString::to_string)
862                .join(", "),
863            forks.len(),
864            if forks.len() == 1 { "" } else { "s" }
865        );
866        assert!(forks.len() >= 2);
867        // This is a somewhat tortured technique to ensure
868        // that our resolver state is only cloned as much
869        // as it needs to be. We basically move the state
870        // into `forked_states`, and then only clone it if
871        // there is at least one more fork to visit.
872        let package = current_state.next;
873        let mut cur_state = Some(current_state);
874        let forks_len = forks.len();
875        forks
876            .into_iter()
877            .enumerate()
878            .map(move |(i, fork)| {
879                let is_last = i == forks_len - 1;
880                let forked_state = cur_state.take().unwrap();
881                if !is_last {
882                    cur_state = Some(forked_state.clone());
883                }
884
885                let env = fork.env.clone();
886                (fork, forked_state.with_env(env))
887            })
888            .map(move |(fork, mut forked_state)| {
889                // Enrich the state with any URLs, etc.
890                forked_state
891                    .visit_package_version_dependencies(
892                        package,
893                        version,
894                        &self.urls,
895                        &self.indexes,
896                        &fork.dependencies,
897                        &self.git,
898                        &self.workspace_members,
899                        self.selector.resolution_strategy(),
900                    )
901                    .map_err(|err| {
902                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
903                    })?;
904
905                // Emit a request to fetch the metadata for each registry package.
906                self.visit_dependencies(&fork.dependencies, &forked_state, request_sink)
907                    .map_err(|err| {
908                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
909                    })?;
910
911                // Add the dependencies to the state.
912                forked_state.add_package_version_dependencies(package, version, fork.dependencies);
913
914                Ok(forked_state)
915            })
916    }
917
918    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
919    #[expect(clippy::unused_self)]
920    fn version_forks_to_fork_states(
921        &self,
922        current_state: ForkState,
923        forks: Vec<VersionFork>,
924    ) -> impl Iterator<Item = ForkState> + '_ {
925        // This is a somewhat tortured technique to ensure
926        // that our resolver state is only cloned as much
927        // as it needs to be. We basically move the state
928        // into `forked_states`, and then only clone it if
929        // there is at least one more fork to visit.
930        let mut cur_state = Some(current_state);
931        let forks_len = forks.len();
932        forks.into_iter().enumerate().map(move |(i, fork)| {
933            let is_last = i == forks_len - 1;
934            let mut forked_state = cur_state.take().unwrap();
935            if !is_last {
936                cur_state = Some(forked_state.clone());
937            }
938            forked_state.initial_id = Some(fork.id);
939            forked_state.initial_version = fork.version;
940            forked_state.with_env(fork.env)
941        })
942    }
943
944    /// Visit a set of [`PubGrubDependency`] entities prior to selection.
945    fn visit_dependencies(
946        &self,
947        dependencies: &[PubGrubDependency],
948        state: &ForkState,
949        request_sink: &Sender<Request>,
950    ) -> Result<(), ResolveError> {
951        for dependency in dependencies {
952            let PubGrubDependency {
953                package,
954                version: _,
955                parent: _,
956                source: _,
957            } = dependency;
958            let url = package.name().and_then(|name| state.fork_urls.get(name));
959            let index = package.name().and_then(|name| state.fork_indexes.get(name));
960            self.visit_package(package, url, index, request_sink)?;
961        }
962        Ok(())
963    }
964
965    /// Visit a [`PubGrubPackage`] prior to selection. This should be called on a [`PubGrubPackage`]
966    /// before it is selected, to allow metadata to be fetched in parallel.
967    fn visit_package(
968        &self,
969        package: &PubGrubPackage,
970        url: Option<&VerbatimParsedUrl>,
971        index: Option<&IndexMetadata>,
972        request_sink: &Sender<Request>,
973    ) -> Result<(), ResolveError> {
974        // Ignore unresolved URL packages, i.e., packages that use a direct URL in some forks.
975        if url.is_none()
976            && package
977                .name()
978                .map(|name| self.urls.any_url(name))
979                .unwrap_or(true)
980        {
981            return Ok(());
982        }
983
984        self.request_package(package, url, index, request_sink)
985    }
986
987    fn request_package(
988        &self,
989        package: &PubGrubPackage,
990        url: Option<&VerbatimParsedUrl>,
991        index: Option<&IndexMetadata>,
992        request_sink: &Sender<Request>,
993    ) -> Result<(), ResolveError> {
994        // Only request real packages.
995        let Some(name) = package.name_no_root() else {
996            return Ok(());
997        };
998
999        if let Some(url) = url {
1000            // Verify that the package is allowed under the hash-checking policy.
1001            if !self.hasher.allows_url(&url.verbatim) {
1002                return Err(ResolveError::UnhashedPackage(name.clone()));
1003            }
1004
1005            // Emit a request to fetch the metadata for this distribution.
1006            let dist = Dist::from_url(name.clone(), url.clone())?;
1007            if self.index.distributions().register(dist.distribution_id()) {
1008                request_sink.blocking_send(Request::Dist(dist))?;
1009            }
1010        } else if let Some(index) = index {
1011            // Emit a request to fetch the metadata for this package on the index.
1012            if self
1013                .index
1014                .explicit()
1015                .register((name.clone(), index.url().clone()))
1016            {
1017                request_sink.blocking_send(Request::Package(name.clone(), Some(index.clone())))?;
1018            }
1019        } else {
1020            // Emit a request to fetch the metadata for this package.
1021            if self.index.implicit().register(name.clone()) {
1022                request_sink.blocking_send(Request::Package(name.clone(), None))?;
1023            }
1024        }
1025        Ok(())
1026    }
1027
1028    /// Visit the set of [`PubGrubPackage`] candidates prior to selection. This allows us to fetch
1029    /// metadata for all packages in parallel.
1030    fn pre_visit<'data>(
1031        packages: impl Iterator<Item = (&'data PubGrubPackage, &'data Range<Version>)>,
1032        urls: &Urls,
1033        indexes: &Indexes,
1034        python_requirement: &PythonRequirement,
1035        request_sink: &Sender<Request>,
1036    ) -> Result<(), ResolveError> {
1037        // Iterate over the potential packages, and fetch file metadata for any of them. These
1038        // represent our current best guesses for the versions that we _might_ select.
1039        for (package, range) in packages {
1040            let PubGrubPackageInner::Package {
1041                name,
1042                extra: None,
1043                group: None,
1044                marker: MarkerTree::TRUE,
1045            } = &**package
1046            else {
1047                continue;
1048            };
1049            // Avoid pre-visiting packages that have any URLs in any fork. At this point we can't
1050            // tell whether they are registry distributions or which url they use.
1051            if urls.any_url(name) {
1052                continue;
1053            }
1054            // Avoid visiting packages that may use an explicit index.
1055            if indexes.contains_key(name) {
1056                continue;
1057            }
1058            request_sink.blocking_send(Request::Prefetch(
1059                name.clone(),
1060                range.clone(),
1061                python_requirement.clone(),
1062            ))?;
1063        }
1064        Ok(())
1065    }
1066
1067    /// Given a candidate package, choose the next version in range to try.
1068    ///
1069    /// Returns `None` when there are no versions in the given range, rejecting the current partial
1070    /// solution.
1071    // TODO(konsti): re-enable tracing. This trace is crucial to understanding the
1072    // tracing-durations-export diagrams, but it took ~5% resolver thread runtime for apache-airflow
1073    // when I last measured.
1074    #[cfg_attr(feature = "tracing-durations-export", instrument(skip_all, fields(%package)))]
1075    fn choose_version(
1076        &self,
1077        package: &PubGrubPackage,
1078        id: Id<PubGrubPackage>,
1079        index: Option<&IndexUrl>,
1080        range: &Range<Version>,
1081        pins: &mut FilePins,
1082        preferences: &Preferences,
1083        fork_urls: &ForkUrls,
1084        env: &ResolverEnvironment,
1085        python_requirement: &PythonRequirement,
1086        pubgrub: &State<UvDependencyProvider>,
1087        visited: &mut FxHashSet<PackageName>,
1088        request_sink: &Sender<Request>,
1089    ) -> Result<Option<ResolverVersion>, ResolveError> {
1090        match &**package {
1091            PubGrubPackageInner::Root(_) => {
1092                Ok(Some(ResolverVersion::Unforked(MIN_VERSION.clone())))
1093            }
1094
1095            PubGrubPackageInner::Python(_) => {
1096                // Dependencies on Python are only added when a package is incompatible; as such,
1097                // we don't need to do anything here.
1098                Ok(None)
1099            }
1100
1101            PubGrubPackageInner::System(_) => {
1102                // We don't care what the actual version is here, just that it's consistent across
1103                // the dependency graph.
1104                let Some(version) = range.as_singleton() else {
1105                    return Ok(None);
1106                };
1107                Ok(Some(ResolverVersion::Unforked(version.clone())))
1108            }
1109
1110            PubGrubPackageInner::Marker { name, .. }
1111            | PubGrubPackageInner::Extra { name, .. }
1112            | PubGrubPackageInner::Group { name, .. }
1113            | PubGrubPackageInner::Package { name, .. } => {
1114                if let Some(url) = package.name().and_then(|name| fork_urls.get(name)) {
1115                    self.choose_version_url(id, name, range, url, env, python_requirement, pubgrub)
1116                } else {
1117                    self.choose_version_registry(
1118                        package,
1119                        id,
1120                        name,
1121                        index,
1122                        range,
1123                        preferences,
1124                        env,
1125                        python_requirement,
1126                        pubgrub,
1127                        pins,
1128                        visited,
1129                        request_sink,
1130                    )
1131                }
1132            }
1133        }
1134    }
1135
1136    /// Select a version for a URL requirement. Since there is only one version per URL, we return
1137    /// that version if it is in range and `None` otherwise.
1138    fn choose_version_url(
1139        &self,
1140        id: Id<PubGrubPackage>,
1141        name: &PackageName,
1142        range: &Range<Version>,
1143        url: &VerbatimParsedUrl,
1144        env: &ResolverEnvironment,
1145        python_requirement: &PythonRequirement,
1146        pubgrub: &State<UvDependencyProvider>,
1147    ) -> Result<Option<ResolverVersion>, ResolveError> {
1148        debug!(
1149            "Searching for a compatible version of {name} @ {} ({range})",
1150            url.verbatim
1151        );
1152
1153        let dist = Dist::from_url(name.clone(), url.clone())?;
1154        let distribution_id = dist.distribution_id();
1155        let response = self
1156            .index
1157            .distributions()
1158            .wait_blocking(&distribution_id)
1159            .ok_or_else(|| ResolveError::UnregisteredTask(dist.to_string()))?;
1160
1161        // If we failed to fetch the metadata for a URL, we can't proceed.
1162        let metadata = match &*response {
1163            MetadataResponse::Found(archive) => &archive.metadata,
1164            MetadataResponse::Unavailable(reason) => {
1165                self.unavailable_packages
1166                    .insert(name.clone(), reason.into());
1167                return Ok(None);
1168            }
1169            // TODO(charlie): Add derivation chain for URL dependencies. In practice, this isn't
1170            // critical since we fetch URL dependencies _prior_ to invoking the resolver.
1171            MetadataResponse::Error(dist, err) => {
1172                return Err(ResolveError::Dist(
1173                    DistErrorKind::from_requested_dist(dist, &**err),
1174                    dist.clone(),
1175                    DerivationChain::default(),
1176                    err.clone(),
1177                ));
1178            }
1179        };
1180
1181        let version = &metadata.version;
1182
1183        // The version is incompatible with the requirement.
1184        if !range.contains(version) {
1185            return Ok(None);
1186        }
1187
1188        // If the URL points to a pre-built wheel, and the wheel's supported Python versions don't
1189        // match our `Requires-Python`, mark it as incompatible.
1190        if let Dist::Built(dist) = &dist {
1191            let filename = match &dist {
1192                BuiltDist::Registry(dist) => &dist.best_wheel().filename,
1193                BuiltDist::DirectUrl(dist) => &dist.filename,
1194                BuiltDist::Path(dist) => &dist.filename,
1195            };
1196
1197            // If the wheel does _not_ cover an environment that requires artifact coverage, it's
1198            // incompatible.
1199            if env.marker_environment().is_none() && !self.options.artifact_environments.is_empty()
1200            {
1201                let wheel_marker = implied_markers(filename);
1202                // If the caller marked an environment as requiring artifact coverage, ensure it
1203                // has coverage.
1204                for environment_marker in self.options.artifact_environments.iter().copied() {
1205                    // If the platform is part of the current environment...
1206                    if env.included_by_marker(environment_marker)
1207                        && !find_environments(id, pubgrub).is_disjoint(environment_marker)
1208                    {
1209                        // ...but the wheel doesn't support it, it's incompatible.
1210                        if wheel_marker.is_disjoint(environment_marker) {
1211                            return Ok(Some(ResolverVersion::Unavailable(
1212                                version.clone(),
1213                                UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1214                                    IncompatibleWheel::MissingPlatform(environment_marker),
1215                                )),
1216                            )));
1217                        }
1218                    }
1219                }
1220            }
1221
1222            // If the wheel's Python tag doesn't match the target Python, it's incompatible.
1223            if !python_requirement.target().matches_wheel_tag(filename) {
1224                return Ok(Some(ResolverVersion::Unavailable(
1225                    filename.version.clone(),
1226                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1227                        IncompatibleWheel::Tag(IncompatibleTag::AbiPythonVersion),
1228                    )),
1229                )));
1230            }
1231        }
1232
1233        // The version is incompatible due to its `Requires-Python` requirement.
1234        if let Some(requires_python) = metadata.requires_python.as_ref() {
1235            if !python_requirement.target().is_contained_by(requires_python) {
1236                let kind = if python_requirement.installed() == python_requirement.target() {
1237                    PythonRequirementKind::Installed
1238                } else {
1239                    PythonRequirementKind::Target
1240                };
1241                return Ok(Some(ResolverVersion::Unavailable(
1242                    version.clone(),
1243                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1244                        IncompatibleSource::RequiresPython(requires_python.clone(), kind),
1245                    )),
1246                )));
1247            }
1248        }
1249
1250        Ok(Some(ResolverVersion::Unforked(version.clone())))
1251    }
1252
1253    /// Given a candidate registry requirement, choose the next version in range to try, or `None`
1254    /// if there is no version in this range.
1255    fn choose_version_registry(
1256        &self,
1257        package: &PubGrubPackage,
1258        id: Id<PubGrubPackage>,
1259        name: &PackageName,
1260        index: Option<&IndexUrl>,
1261        range: &Range<Version>,
1262        preferences: &Preferences,
1263        env: &ResolverEnvironment,
1264        python_requirement: &PythonRequirement,
1265        pubgrub: &State<UvDependencyProvider>,
1266        pins: &mut FilePins,
1267        visited: &mut FxHashSet<PackageName>,
1268        request_sink: &Sender<Request>,
1269    ) -> Result<Option<ResolverVersion>, ResolveError> {
1270        // Wait for the metadata to be available.
1271        let versions_response = if let Some(index) = index {
1272            self.index
1273                .explicit()
1274                .wait_blocking(&(name.clone(), index.clone()))
1275                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1276        } else {
1277            self.index
1278                .implicit()
1279                .wait_blocking(name)
1280                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1281        };
1282        visited.insert(name.clone());
1283
1284        let version_maps = match *versions_response {
1285            VersionsResponse::Found(ref version_maps) => version_maps.as_slice(),
1286            VersionsResponse::NoIndex => {
1287                self.unavailable_packages
1288                    .insert(name.clone(), UnavailablePackage::NoIndex);
1289                &[]
1290            }
1291            VersionsResponse::Offline => {
1292                self.unavailable_packages
1293                    .insert(name.clone(), UnavailablePackage::Offline);
1294                &[]
1295            }
1296            VersionsResponse::NotFound => {
1297                self.unavailable_packages
1298                    .insert(name.clone(), UnavailablePackage::NotFound);
1299                &[]
1300            }
1301        };
1302
1303        debug!("Searching for a compatible version of {package} ({range})");
1304
1305        // Find a version.
1306        let Some(candidate) = self.selector.select(
1307            name,
1308            range,
1309            version_maps,
1310            preferences,
1311            &self.installed_packages,
1312            &self.exclusions,
1313            index,
1314            env,
1315            self.tags.as_ref(),
1316        ) else {
1317            // Short circuit: we couldn't find _any_ versions for a package.
1318            return Ok(None);
1319        };
1320
1321        let dist = match candidate.dist() {
1322            CandidateDist::Compatible(dist) => dist,
1323            CandidateDist::Incompatible {
1324                incompatible_dist: incompatibility,
1325                prioritized_dist: _,
1326            } => {
1327                // If the version is incompatible because no distributions are compatible, exit early.
1328                return Ok(Some(ResolverVersion::Unavailable(
1329                    candidate.version().clone(),
1330                    // TODO(charlie): We can avoid this clone; the candidate is dropped here and
1331                    // owns the incompatibility.
1332                    UnavailableVersion::IncompatibleDist(incompatibility.clone()),
1333                )));
1334            }
1335        };
1336
1337        // Check whether the version is incompatible due to its Python requirement.
1338        if let Some((requires_python, incompatibility)) =
1339            Self::check_requires_python(dist, python_requirement)
1340        {
1341            if matches!(self.options.fork_strategy, ForkStrategy::RequiresPython) {
1342                if env.marker_environment().is_none() {
1343                    let forks = fork_version_by_python_requirement(
1344                        requires_python,
1345                        python_requirement,
1346                        env,
1347                    );
1348                    if !forks.is_empty() {
1349                        debug!(
1350                            "Forking Python requirement `{}` on `{}` for {}=={} ({})",
1351                            python_requirement.target(),
1352                            requires_python,
1353                            name,
1354                            candidate.version(),
1355                            forks
1356                                .iter()
1357                                .map(ToString::to_string)
1358                                .collect::<Vec<_>>()
1359                                .join(", ")
1360                        );
1361                        let forks = forks
1362                            .into_iter()
1363                            .map(|env| VersionFork {
1364                                env,
1365                                id,
1366                                version: None,
1367                            })
1368                            .collect();
1369                        return Ok(Some(ResolverVersion::Forked(forks)));
1370                    }
1371                }
1372            }
1373
1374            return Ok(Some(ResolverVersion::Unavailable(
1375                candidate.version().clone(),
1376                UnavailableVersion::IncompatibleDist(incompatibility),
1377            )));
1378        }
1379
1380        // Check whether this version covers all supported platforms; and, if not, generate a fork.
1381        if let Some(forked) = self.fork_version_registry(
1382            &candidate,
1383            dist,
1384            version_maps,
1385            package,
1386            id,
1387            name,
1388            index,
1389            range,
1390            preferences,
1391            env,
1392            pubgrub,
1393            pins,
1394            request_sink,
1395        )? {
1396            return Ok(Some(forked));
1397        }
1398
1399        let filename = match dist.for_installation() {
1400            ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1401                .filename()
1402                .unwrap_or(Cow::Borrowed("unknown filename")),
1403            ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1404                .filename()
1405                .unwrap_or(Cow::Borrowed("unknown filename")),
1406            ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1407        };
1408
1409        debug!(
1410            "Selecting: {}=={} [{}] ({})",
1411            name,
1412            candidate.version(),
1413            candidate.choice_kind(),
1414            filename,
1415        );
1416        self.visit_candidate(&candidate, dist, package, name, pins, request_sink)?;
1417
1418        let version = candidate.version().clone();
1419        Ok(Some(ResolverVersion::Unforked(version)))
1420    }
1421
1422    /// Determine whether a candidate covers all supported platforms; and, if not, generate a fork.
1423    ///
1424    /// This only ever applies to versions that lack source distributions And, for now, we only
1425    /// apply it in two cases:
1426    ///
1427    /// 1. Local versions, where the non-local version has greater platform coverage. The intent is
1428    ///    such that, if we're resolving PyTorch, and we choose `torch==2.5.2+cpu`, we want to
1429    ///    fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't
1430    ///    include any macOS wheels).
1431    /// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user
1432    ///    might require that the generated resolution always includes wheels for x86 macOS, and
1433    ///    fails entirely if the platform is unsupported.
1434    fn fork_version_registry(
1435        &self,
1436        candidate: &Candidate,
1437        dist: &CompatibleDist,
1438        version_maps: &[VersionMap],
1439        package: &PubGrubPackage,
1440        id: Id<PubGrubPackage>,
1441        name: &PackageName,
1442        index: Option<&IndexUrl>,
1443        range: &Range<Version>,
1444        preferences: &Preferences,
1445        env: &ResolverEnvironment,
1446        pubgrub: &State<UvDependencyProvider>,
1447        pins: &mut FilePins,
1448        request_sink: &Sender<Request>,
1449    ) -> Result<Option<ResolverVersion>, ResolveError> {
1450        // This only applies to universal resolutions.
1451        if env.marker_environment().is_some() {
1452            return Ok(None);
1453        }
1454
1455        // If the package is already compatible with all environments (as is the case for
1456        // packages that include a source distribution), we don't need to fork.
1457        if dist.implied_markers().is_true() {
1458            return Ok(None);
1459        }
1460
1461        // If the caller marked an environment as requiring artifact coverage, ensure it has
1462        // coverage.
1463        for marker in self.options.artifact_environments.iter().copied() {
1464            // If the platform is part of the current environment...
1465            if env.included_by_marker(marker) {
1466                // But isn't supported by the distribution...
1467                if dist.implied_markers().is_disjoint(marker)
1468                    && !find_environments(id, pubgrub).is_disjoint(marker)
1469                {
1470                    // Then we need to fork.
1471                    let Some((left, right)) = fork_version_by_marker(env, marker) else {
1472                        return Ok(Some(ResolverVersion::Unavailable(
1473                            candidate.version().clone(),
1474                            UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1475                                IncompatibleWheel::MissingPlatform(marker),
1476                            )),
1477                        )));
1478                    };
1479
1480                    debug!(
1481                        "Forking on required platform `{}` for {}=={} ({})",
1482                        marker.try_to_string().unwrap_or_else(|| "true".to_string()),
1483                        name,
1484                        candidate.version(),
1485                        [&left, &right]
1486                            .iter()
1487                            .map(ToString::to_string)
1488                            .collect::<Vec<_>>()
1489                            .join(", ")
1490                    );
1491                    let forks = vec![
1492                        VersionFork {
1493                            env: left,
1494                            id,
1495                            version: None,
1496                        },
1497                        VersionFork {
1498                            env: right,
1499                            id,
1500                            version: None,
1501                        },
1502                    ];
1503                    return Ok(Some(ResolverVersion::Forked(forks)));
1504                }
1505            }
1506        }
1507
1508        // For now, we only apply this to local versions.
1509        if !candidate.version().is_local() {
1510            return Ok(None);
1511        }
1512
1513        debug!(
1514            "Looking at local version: {}=={}",
1515            name,
1516            candidate.version()
1517        );
1518
1519        // If there's a non-local version...
1520        let range = range.clone().intersection(&Range::singleton(
1521            candidate.version().clone().without_local(),
1522        ));
1523
1524        let Some(base_candidate) = self.selector.select(
1525            name,
1526            &range,
1527            version_maps,
1528            preferences,
1529            &self.installed_packages,
1530            &self.exclusions,
1531            index,
1532            env,
1533            self.tags.as_ref(),
1534        ) else {
1535            return Ok(None);
1536        };
1537        let CandidateDist::Compatible(base_dist) = base_candidate.dist() else {
1538            return Ok(None);
1539        };
1540
1541        // ...and the non-local version has greater platform support...
1542        let mut remainder = {
1543            let mut remainder = base_dist.implied_markers();
1544            remainder.and(dist.implied_markers().negate());
1545            remainder
1546        };
1547        if remainder.is_false() {
1548            return Ok(None);
1549        }
1550
1551        // If the remainder isn't relevant to the current environment, there's no need to fork.
1552        // For example, if we're solving for `sys_platform == 'darwin'` but the remainder is
1553        // `sys_platform == 'linux'`, we don't need to fork.
1554        if !env.included_by_marker(remainder) {
1555            return Ok(None);
1556        }
1557
1558        // Similarly, if the local distribution is incompatible with the current environment, then
1559        // use the base distribution instead (but don't fork).
1560        if !env.included_by_marker(dist.implied_markers()) {
1561            let filename = match dist.for_installation() {
1562                ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1563                    .filename()
1564                    .unwrap_or(Cow::Borrowed("unknown filename")),
1565                ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1566                    .filename()
1567                    .unwrap_or(Cow::Borrowed("unknown filename")),
1568                ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1569            };
1570
1571            debug!(
1572                "Preferring non-local candidate: {}=={} [{}] ({})",
1573                name,
1574                base_candidate.version(),
1575                base_candidate.choice_kind(),
1576                filename,
1577            );
1578            self.visit_candidate(
1579                &base_candidate,
1580                base_dist,
1581                package,
1582                name,
1583                pins,
1584                request_sink,
1585            )?;
1586
1587            return Ok(Some(ResolverVersion::Unforked(
1588                base_candidate.version().clone(),
1589            )));
1590        }
1591
1592        // If the implied markers includes _some_ macOS environments, but the remainder doesn't,
1593        // then we can extend the implied markers to include _all_ macOS environments. Same goes for
1594        // Linux and Windows.
1595        //
1596        // The idea here is that the base version could support (e.g.) ARM macOS, but not Intel
1597        // macOS. But if _neither_ version supports Intel macOS, we'd rather use `sys_platform == 'darwin'`
1598        // instead of `sys_platform == 'darwin' and platform_machine == 'arm64'`, since it's much
1599        // simpler, and _neither_ version will succeed with Intel macOS anyway.
1600        for value in [
1601            arcstr::literal!("darwin"),
1602            arcstr::literal!("linux"),
1603            arcstr::literal!("win32"),
1604        ] {
1605            let sys_platform = MarkerTree::expression(MarkerExpression::String {
1606                key: MarkerValueString::SysPlatform,
1607                operator: MarkerOperator::Equal,
1608                value,
1609            });
1610            if dist.implied_markers().is_disjoint(sys_platform)
1611                && !remainder.is_disjoint(sys_platform)
1612            {
1613                remainder.or(sys_platform);
1614            }
1615        }
1616
1617        // Otherwise, we need to fork.
1618        let Some((base_env, local_env)) = fork_version_by_marker(env, remainder) else {
1619            return Ok(None);
1620        };
1621
1622        debug!(
1623            "Forking platform for {}=={} ({})",
1624            name,
1625            candidate.version(),
1626            [&base_env, &local_env]
1627                .iter()
1628                .map(ToString::to_string)
1629                .collect::<Vec<_>>()
1630                .join(", ")
1631        );
1632        self.visit_candidate(candidate, dist, package, name, pins, request_sink)?;
1633        self.visit_candidate(
1634            &base_candidate,
1635            base_dist,
1636            package,
1637            name,
1638            pins,
1639            request_sink,
1640        )?;
1641
1642        let forks = vec![
1643            VersionFork {
1644                env: base_env.clone(),
1645                id,
1646                version: Some(base_candidate.version().clone()),
1647            },
1648            VersionFork {
1649                env: local_env.clone(),
1650                id,
1651                version: Some(candidate.version().clone()),
1652            },
1653        ];
1654        Ok(Some(ResolverVersion::Forked(forks)))
1655    }
1656
1657    /// Visit a selected candidate.
1658    fn visit_candidate(
1659        &self,
1660        candidate: &Candidate,
1661        dist: &CompatibleDist,
1662        package: &PubGrubPackage,
1663        name: &PackageName,
1664        pins: &mut FilePins,
1665        request_sink: &Sender<Request>,
1666    ) -> Result<(), ResolveError> {
1667        // We want to return a package pinned to a specific version; but we _also_ want to
1668        // store the exact file that we selected to satisfy that version.
1669        pins.insert(candidate, dist);
1670
1671        // Emit a request to fetch the metadata for this version.
1672        if matches!(&**package, PubGrubPackageInner::Package { .. }) {
1673            if self.dependency_mode.is_transitive() {
1674                let dist = dist.for_resolution();
1675                if self.index.distributions().register(dist.distribution_id()) {
1676                    if name != dist.name() {
1677                        return Err(ResolveError::MismatchedPackageName {
1678                            request: "distribution",
1679                            expected: name.clone(),
1680                            actual: dist.name().clone(),
1681                        });
1682                    }
1683                    // Verify that the package is allowed under the hash-checking policy.
1684                    if !self
1685                        .hasher
1686                        .allows_package(candidate.name(), candidate.version())
1687                    {
1688                        return Err(ResolveError::UnhashedPackage(candidate.name().clone()));
1689                    }
1690
1691                    let request = Request::from(dist);
1692                    request_sink.blocking_send(request)?;
1693                }
1694            }
1695        }
1696
1697        Ok(())
1698    }
1699
1700    /// Check if the distribution is incompatible with the Python requirement, and if so, return
1701    /// the incompatibility.
1702    fn check_requires_python<'dist>(
1703        dist: &'dist CompatibleDist,
1704        python_requirement: &PythonRequirement,
1705    ) -> Option<(&'dist VersionSpecifiers, IncompatibleDist)> {
1706        let requires_python = dist.requires_python()?;
1707        if python_requirement.target().is_contained_by(requires_python) {
1708            None
1709        } else {
1710            let incompatibility = if matches!(dist, CompatibleDist::CompatibleWheel { .. }) {
1711                IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(
1712                    requires_python.clone(),
1713                    if python_requirement.installed() == python_requirement.target() {
1714                        PythonRequirementKind::Installed
1715                    } else {
1716                        PythonRequirementKind::Target
1717                    },
1718                ))
1719            } else {
1720                IncompatibleDist::Source(IncompatibleSource::RequiresPython(
1721                    requires_python.clone(),
1722                    if python_requirement.installed() == python_requirement.target() {
1723                        PythonRequirementKind::Installed
1724                    } else {
1725                        PythonRequirementKind::Target
1726                    },
1727                ))
1728            };
1729            Some((requires_python, incompatibility))
1730        }
1731    }
1732
1733    /// Given a candidate package and version, return its dependencies.
1734    #[instrument(skip_all, fields(%package, %version))]
1735    fn get_dependencies_forking(
1736        &self,
1737        id: Id<PubGrubPackage>,
1738        package: &PubGrubPackage,
1739        version: &Version,
1740        pins: &FilePins,
1741        fork_urls: &ForkUrls,
1742        env: &ResolverEnvironment,
1743        python_requirement: &PythonRequirement,
1744        pubgrub: &State<UvDependencyProvider>,
1745    ) -> Result<ForkedDependencies, ResolveError> {
1746        let result = self.get_dependencies(
1747            id,
1748            package,
1749            version,
1750            pins,
1751            fork_urls,
1752            env,
1753            python_requirement,
1754            pubgrub,
1755        );
1756        if env.marker_environment().is_some() {
1757            result.map(|deps| match deps {
1758                Dependencies::Available(deps) | Dependencies::Unforkable(deps) => {
1759                    ForkedDependencies::Unforked(deps)
1760                }
1761                Dependencies::Unavailable(err) => ForkedDependencies::Unavailable(err),
1762            })
1763        } else {
1764            Ok(result?.fork(env, python_requirement, &self.conflicts))
1765        }
1766    }
1767
1768    /// Given a candidate package and version, return its dependencies.
1769    #[instrument(skip_all, fields(%package, %version))]
1770    fn get_dependencies(
1771        &self,
1772        id: Id<PubGrubPackage>,
1773        package: &PubGrubPackage,
1774        version: &Version,
1775        pins: &FilePins,
1776        fork_urls: &ForkUrls,
1777        env: &ResolverEnvironment,
1778        python_requirement: &PythonRequirement,
1779        pubgrub: &State<UvDependencyProvider>,
1780    ) -> Result<Dependencies, ResolveError> {
1781        let dependencies = match &**package {
1782            PubGrubPackageInner::Root(_) => {
1783                let no_dev_deps = BTreeMap::default();
1784                let requirements = self.flatten_requirements(
1785                    &self.requirements,
1786                    &no_dev_deps,
1787                    None,
1788                    None,
1789                    None,
1790                    env,
1791                    python_requirement,
1792                );
1793
1794                requirements
1795                    .flat_map(move |requirement| {
1796                        PubGrubDependency::from_requirement(
1797                            &self.conflicts,
1798                            requirement,
1799                            None,
1800                            Some(package),
1801                        )
1802                    })
1803                    .collect()
1804            }
1805
1806            PubGrubPackageInner::Package {
1807                name,
1808                extra,
1809                group,
1810                marker: _,
1811            } => {
1812                // If we're excluding transitive dependencies, short-circuit.
1813                if self.dependency_mode.is_direct() {
1814                    return Ok(Dependencies::Unforkable(Vec::default()));
1815                }
1816
1817                // Look up the distribution ID from the pins (common case) or fork URLs.
1818                let owned_id;
1819                let distribution_id = if let Some((_, metadata_id)) =
1820                    pins.dist_and_id(name, version)
1821                {
1822                    metadata_id
1823                } else if let Some(url) = fork_urls.get(name) {
1824                    let dist = Dist::from_url(name.clone(), url.clone())?;
1825                    owned_id = dist.distribution_id();
1826                    &owned_id
1827                } else {
1828                    debug_assert!(
1829                        false,
1830                        "Dependencies were requested for a package without a pinned distribution"
1831                    );
1832                    return Err(ResolveError::UnregisteredTask(format!("{name}=={version}")));
1833                };
1834
1835                // If the package does not exist in the registry or locally, we cannot fetch its dependencies
1836                if self.dependency_mode.is_transitive()
1837                    && self.unavailable_packages.get(name).is_some()
1838                    && self.installed_packages.get_packages(name).is_empty()
1839                {
1840                    debug_assert!(
1841                        false,
1842                        "Dependencies were requested for a package that is not available"
1843                    );
1844                    return Err(ResolveError::PackageUnavailable(name.clone()));
1845                }
1846
1847                // Wait for the metadata to be available.
1848                let response = self
1849                    .index
1850                    .distributions()
1851                    .wait_blocking(distribution_id)
1852                    .ok_or_else(|| ResolveError::UnregisteredTask(format!("{name}=={version}")))?;
1853
1854                let metadata = match &*response {
1855                    MetadataResponse::Found(archive) => &archive.metadata,
1856                    MetadataResponse::Unavailable(reason) => {
1857                        let unavailable_version = UnavailableVersion::from(reason);
1858                        let message = unavailable_version.singular_message();
1859                        if let Some(err) = reason.source() {
1860                            // Show the detailed error for metadata parse errors.
1861                            warn!("{name} {message}: {err}");
1862                        } else {
1863                            warn!("{name} {message}");
1864                        }
1865                        self.incomplete_packages
1866                            .entry(name.clone())
1867                            .or_default()
1868                            .insert(version.clone(), reason.clone());
1869                        return Ok(Dependencies::Unavailable(unavailable_version));
1870                    }
1871                    MetadataResponse::Error(dist, err) => {
1872                        let chain = DerivationChainBuilder::from_state(id, version, pubgrub)
1873                            .unwrap_or_default();
1874                        return Err(ResolveError::Dist(
1875                            DistErrorKind::from_requested_dist(dist, &**err),
1876                            dist.clone(),
1877                            chain,
1878                            err.clone(),
1879                        ));
1880                    }
1881                };
1882
1883                // If there was no requires-python on the index page, we may have an incompatible
1884                // distribution.
1885                if let Some(requires_python) = &metadata.requires_python {
1886                    if !python_requirement.target().is_contained_by(requires_python) {
1887                        return Ok(Dependencies::Unavailable(
1888                            UnavailableVersion::RequiresPython(requires_python.clone()),
1889                        ));
1890                    }
1891                }
1892
1893                // Identify any system dependencies based on the index URL.
1894                let system_dependencies = self
1895                    .options
1896                    .torch_backend
1897                    .as_ref()
1898                    .filter(|torch_backend| matches!(torch_backend, TorchStrategy::Cuda { .. }))
1899                    .filter(|torch_backend| torch_backend.has_system_dependency(name))
1900                    .and_then(|_| pins.get(name, version).and_then(ResolvedDist::index))
1901                    .map(IndexUrl::url)
1902                    .and_then(SystemDependency::from_index)
1903                    .into_iter()
1904                    .inspect(|system_dependency| {
1905                        debug!(
1906                            "Adding system dependency `{}` for `{package}@{version}`",
1907                            system_dependency
1908                        );
1909                    })
1910                    .map(PubGrubDependency::from);
1911
1912                let requirements = self.flatten_requirements(
1913                    &metadata.requires_dist,
1914                    &metadata.dependency_groups,
1915                    extra.as_ref(),
1916                    group.as_ref(),
1917                    Some(name),
1918                    env,
1919                    python_requirement,
1920                );
1921
1922                requirements
1923                    .filter(|requirement| !self.excludes.contains(&requirement.name))
1924                    .flat_map(|requirement| {
1925                        PubGrubDependency::from_requirement(
1926                            &self.conflicts,
1927                            requirement,
1928                            group.as_ref(),
1929                            Some(package),
1930                        )
1931                    })
1932                    .chain(system_dependencies)
1933                    .collect()
1934            }
1935
1936            PubGrubPackageInner::Python(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1937
1938            PubGrubPackageInner::System(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1939
1940            // Add a dependency on both the marker and base package.
1941            PubGrubPackageInner::Marker { name, marker } => {
1942                return Ok(Dependencies::Unforkable(
1943                    [MarkerTree::TRUE, *marker]
1944                        .into_iter()
1945                        .map(move |marker| PubGrubDependency {
1946                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1947                                name: name.clone(),
1948                                extra: None,
1949                                group: None,
1950                                marker,
1951                            }),
1952                            version: Range::singleton(version.clone()),
1953                            parent: None,
1954                            source: DependencySource::Unspecified,
1955                        })
1956                        .collect(),
1957                ));
1958            }
1959
1960            // Add a dependency on both the extra and base package, with and without the marker.
1961            PubGrubPackageInner::Extra {
1962                name,
1963                extra,
1964                marker,
1965            } => {
1966                return Ok(Dependencies::Unforkable(
1967                    [MarkerTree::TRUE, *marker]
1968                        .into_iter()
1969                        .dedup()
1970                        .flat_map(move |marker| {
1971                            [None, Some(extra)]
1972                                .into_iter()
1973                                .map(move |extra| PubGrubDependency {
1974                                    package: PubGrubPackage::from(PubGrubPackageInner::Package {
1975                                        name: name.clone(),
1976                                        extra: extra.cloned(),
1977                                        group: None,
1978                                        marker,
1979                                    }),
1980                                    version: Range::singleton(version.clone()),
1981                                    parent: None,
1982                                    source: DependencySource::Unspecified,
1983                                })
1984                        })
1985                        .collect(),
1986                ));
1987            }
1988
1989            // Add a dependency on the dependency group, with and without the marker.
1990            PubGrubPackageInner::Group {
1991                name,
1992                group,
1993                marker,
1994            } => {
1995                return Ok(Dependencies::Unforkable(
1996                    [MarkerTree::TRUE, *marker]
1997                        .into_iter()
1998                        .dedup()
1999                        .map(|marker| PubGrubDependency {
2000                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
2001                                name: name.clone(),
2002                                extra: None,
2003                                group: Some(group.clone()),
2004                                marker,
2005                            }),
2006                            version: Range::singleton(version.clone()),
2007                            parent: None,
2008                            source: DependencySource::Unspecified,
2009                        })
2010                        .collect(),
2011                ));
2012            }
2013        };
2014        Ok(Dependencies::Available(dependencies))
2015    }
2016
2017    /// The regular and dev dependencies filtered by Python version and the markers of this fork,
2018    /// plus the extras dependencies of the current package (e.g., `black` depending on
2019    /// `black[colorama]`).
2020    fn flatten_requirements<'a>(
2021        &'a self,
2022        dependencies: &'a [Requirement],
2023        dev_dependencies: &'a BTreeMap<GroupName, Box<[Requirement]>>,
2024        extra: Option<&'a ExtraName>,
2025        dev: Option<&'a GroupName>,
2026        name: Option<&PackageName>,
2027        env: &'a ResolverEnvironment,
2028        python_requirement: &'a PythonRequirement,
2029    ) -> impl Iterator<Item = Cow<'a, Requirement>> {
2030        let python_marker = python_requirement.to_marker_tree();
2031
2032        if let Some(dev) = dev {
2033            // Dependency groups can include the project itself, so no need to flatten recursive
2034            // dependencies.
2035            Either::Left(Either::Left(self.requirements_for_extra(
2036                dev_dependencies.get(dev).into_iter().flatten(),
2037                extra,
2038                env,
2039                python_marker,
2040                python_requirement,
2041            )))
2042        } else if !dependencies
2043            .iter()
2044            .any(|req| name == Some(&req.name) && !req.extras.is_empty())
2045        {
2046            // If the project doesn't define any recursive dependencies, take the fast path.
2047            Either::Left(Either::Right(self.requirements_for_extra(
2048                dependencies.iter(),
2049                extra,
2050                env,
2051                python_marker,
2052                python_requirement,
2053            )))
2054        } else {
2055            let mut requirements = self
2056                .requirements_for_extra(
2057                    dependencies.iter(),
2058                    extra,
2059                    env,
2060                    python_marker,
2061                    python_requirement,
2062                )
2063                .collect::<Vec<_>>();
2064
2065            // Transitively process all extras that are recursively included, starting with the current
2066            // extra.
2067            let mut seen = FxHashSet::<(ExtraName, MarkerTree)>::default();
2068            let mut queue: VecDeque<_> = requirements
2069                .iter()
2070                .filter(|req| name == Some(&req.name))
2071                .flat_map(|req| req.extras.iter().cloned().map(|extra| (extra, req.marker)))
2072                .collect();
2073            while let Some((extra, marker)) = queue.pop_front() {
2074                if !seen.insert((extra.clone(), marker)) {
2075                    continue;
2076                }
2077                for requirement in self.requirements_for_extra(
2078                    dependencies,
2079                    Some(&extra),
2080                    env,
2081                    python_marker,
2082                    python_requirement,
2083                ) {
2084                    let requirement = match requirement {
2085                        Cow::Owned(mut requirement) => {
2086                            requirement.marker.and(marker);
2087                            requirement
2088                        }
2089                        Cow::Borrowed(requirement) => {
2090                            let mut marker = marker;
2091                            marker.and(requirement.marker);
2092                            Requirement {
2093                                name: requirement.name.clone(),
2094                                extras: requirement.extras.clone(),
2095                                groups: requirement.groups.clone(),
2096                                source: requirement.source.clone(),
2097                                origin: requirement.origin.clone(),
2098                                marker: marker.simplify_extras(slice::from_ref(&extra)),
2099                            }
2100                        }
2101                    };
2102                    if name == Some(&requirement.name) {
2103                        // Add each transitively included extra.
2104                        queue.extend(
2105                            requirement
2106                                .extras
2107                                .iter()
2108                                .cloned()
2109                                .map(|extra| (extra, requirement.marker)),
2110                        );
2111                    } else {
2112                        // Add the requirements for that extra.
2113                        requirements.push(Cow::Owned(requirement));
2114                    }
2115                }
2116            }
2117
2118            // Retain any self-constraints for that extra, e.g., if `project[foo]` includes
2119            // `project[bar]>1.0`, as a dependency, we need to propagate `project>1.0`, in addition to
2120            // transitively expanding `project[bar]`.
2121            let mut self_constraints = vec![];
2122            for req in &requirements {
2123                if name == Some(&req.name) && !req.source.is_empty() {
2124                    self_constraints.push(Requirement {
2125                        name: req.name.clone(),
2126                        extras: Box::new([]),
2127                        groups: req.groups.clone(),
2128                        source: req.source.clone(),
2129                        origin: req.origin.clone(),
2130                        marker: req.marker,
2131                    });
2132                }
2133            }
2134
2135            // Drop all the self-requirements now that we flattened them out.
2136            requirements.retain(|req| name != Some(&req.name) || req.extras.is_empty());
2137            requirements.extend(self_constraints.into_iter().map(Cow::Owned));
2138
2139            Either::Right(requirements.into_iter())
2140        }
2141    }
2142
2143    /// The set of the regular and dev dependencies, filtered by Python version,
2144    /// the markers of this fork and the requested extra.
2145    fn requirements_for_extra<'data, 'parameters>(
2146        &'data self,
2147        dependencies: impl IntoIterator<Item = &'data Requirement> + 'parameters,
2148        extra: Option<&'parameters ExtraName>,
2149        env: &'parameters ResolverEnvironment,
2150        python_marker: MarkerTree,
2151        python_requirement: &'parameters PythonRequirement,
2152    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2153    where
2154        'data: 'parameters,
2155    {
2156        self.overrides
2157            .apply(dependencies)
2158            .filter(move |requirement| {
2159                Self::is_requirement_applicable(
2160                    requirement,
2161                    extra,
2162                    env,
2163                    python_marker,
2164                    python_requirement,
2165                )
2166            })
2167            .flat_map(move |requirement| {
2168                iter::once(requirement.clone()).chain(self.constraints_for_requirement(
2169                    requirement,
2170                    extra,
2171                    env,
2172                    python_marker,
2173                    python_requirement,
2174                ))
2175            })
2176    }
2177
2178    /// Whether a requirement is applicable for the Python version, the markers of this fork and the
2179    /// requested extra.
2180    fn is_requirement_applicable(
2181        requirement: &Requirement,
2182        extra: Option<&ExtraName>,
2183        env: &ResolverEnvironment,
2184        python_marker: MarkerTree,
2185        python_requirement: &PythonRequirement,
2186    ) -> bool {
2187        // If the requirement isn't relevant for the current platform, skip it.
2188        match extra {
2189            Some(source_extra) => {
2190                // Only include requirements that are relevant for the current extra.
2191                if requirement.evaluate_markers(env.marker_environment(), &[]) {
2192                    return false;
2193                }
2194                if !requirement
2195                    .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2196                {
2197                    return false;
2198                }
2199                if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2200                {
2201                    return false;
2202                }
2203            }
2204            None => {
2205                if !requirement.evaluate_markers(env.marker_environment(), &[]) {
2206                    return false;
2207                }
2208            }
2209        }
2210
2211        // If the requirement would not be selected with any Python version
2212        // supported by the root, skip it.
2213        if python_marker.is_disjoint(requirement.marker) {
2214            trace!(
2215                "Skipping {requirement} because of Requires-Python: {requires_python}",
2216                requires_python = python_requirement.target(),
2217            );
2218            return false;
2219        }
2220
2221        // If we're in a fork in universal mode, ignore any dependency that isn't part of
2222        // this fork (but will be part of another fork).
2223        if !env.included_by_marker(requirement.marker) {
2224            trace!("Skipping {requirement} because of {env}");
2225            return false;
2226        }
2227
2228        true
2229    }
2230
2231    /// The constraints applicable to the requirement, filtered by Python version, the markers of
2232    /// this fork and the requested extra.
2233    fn constraints_for_requirement<'data, 'parameters>(
2234        &'data self,
2235        requirement: Cow<'data, Requirement>,
2236        extra: Option<&'parameters ExtraName>,
2237        env: &'parameters ResolverEnvironment,
2238        python_marker: MarkerTree,
2239        python_requirement: &'parameters PythonRequirement,
2240    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2241    where
2242        'data: 'parameters,
2243    {
2244        self.constraints
2245            .get(&requirement.name)
2246            .into_iter()
2247            .flatten()
2248            .filter_map(move |constraint| {
2249                // If the requirement would not be selected with any Python version
2250                // supported by the root, skip it.
2251                let constraint = if constraint.marker.is_true() {
2252                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2253                    // and the constraint is `requests ; python_version == '3.6'`, the
2254                    // constraint should only apply when _both_ markers are true.
2255                    if requirement.marker.is_true() {
2256                        Cow::Borrowed(constraint)
2257                    } else {
2258                        let mut marker = constraint.marker;
2259                        marker.and(requirement.marker);
2260
2261                        if marker.is_false() {
2262                            trace!(
2263                                "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2264                                constraint.marker.try_to_string().unwrap(),
2265                                requirement.marker.try_to_string().unwrap(),
2266                            );
2267                            return None;
2268                        }
2269
2270                        Cow::Owned(Requirement {
2271                            name: constraint.name.clone(),
2272                            extras: constraint.extras.clone(),
2273                            groups: constraint.groups.clone(),
2274                            source: constraint.source.clone(),
2275                            origin: constraint.origin.clone(),
2276                            marker,
2277                        })
2278                    }
2279                } else {
2280                    let requires_python = python_requirement.target();
2281
2282                    let mut marker = constraint.marker;
2283                    marker.and(requirement.marker);
2284
2285                    if marker.is_false() {
2286                        trace!(
2287                            "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2288                            constraint.marker.try_to_string().unwrap(),
2289                            requirement.marker.try_to_string().unwrap(),
2290                        );
2291                        return None;
2292                    }
2293
2294                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2295                    // and the constraint is `requests ; python_version == '3.6'`, the
2296                    // constraint should only apply when _both_ markers are true.
2297                    if python_marker.is_disjoint(marker) {
2298                        trace!(
2299                            "Skipping constraint {requirement} because of Requires-Python: {requires_python}"
2300                        );
2301                        return None;
2302                    }
2303
2304                    if marker == constraint.marker {
2305                        Cow::Borrowed(constraint)
2306                    } else {
2307                        Cow::Owned(Requirement {
2308                            name: constraint.name.clone(),
2309                            extras: constraint.extras.clone(),
2310                            groups: constraint.groups.clone(),
2311                            source: constraint.source.clone(),
2312                            origin: constraint.origin.clone(),
2313                            marker,
2314                        })
2315                    }
2316                };
2317
2318                // If we're in a fork in universal mode, ignore any dependency that isn't part of
2319                // this fork (but will be part of another fork).
2320                if !env.included_by_marker(constraint.marker) {
2321                    trace!("Skipping {constraint} because of {env}");
2322                    return None;
2323                }
2324
2325                // If the constraint isn't relevant for the current platform, skip it.
2326                match extra {
2327                    Some(source_extra) => {
2328                        if !constraint
2329                            .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2330                        {
2331                            return None;
2332                        }
2333                        if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2334                        {
2335                            return None;
2336                        }
2337                    }
2338                    None => {
2339                        if !constraint.evaluate_markers(env.marker_environment(), &[]) {
2340                            return None;
2341                        }
2342                    }
2343                }
2344
2345                Some(constraint)
2346            })
2347    }
2348
2349    /// Fetch the metadata for a stream of packages and versions.
2350    async fn fetch<Provider: ResolverProvider>(
2351        self: Arc<Self>,
2352        provider: Arc<Provider>,
2353        request_stream: Receiver<Request>,
2354    ) -> Result<(), ResolveError> {
2355        let mut response_stream = ReceiverStream::new(request_stream)
2356            .map(|request| self.process_request(request, &*provider).boxed_local())
2357            // Allow as many futures as possible to start in the background.
2358            // Backpressure is provided by at a more granular level by `DistributionDatabase`
2359            // and `SourceDispatch`, as well as the bounded request channel.
2360            .buffer_unordered(usize::MAX);
2361
2362        while let Some(response) = response_stream.next().await {
2363            match response? {
2364                Some(Response::Package(name, index, version_map)) => {
2365                    trace!("Received package metadata for: {name}");
2366                    if let Some(index) = index {
2367                        self.index
2368                            .explicit()
2369                            .done((name, index), Arc::new(version_map));
2370                    } else {
2371                        self.index.implicit().done(name, Arc::new(version_map));
2372                    }
2373                }
2374                Some(Response::Installed { dist, metadata }) => {
2375                    trace!("Received installed distribution metadata for: {dist}");
2376                    self.index
2377                        .distributions()
2378                        .done(dist.distribution_id(), Arc::new(metadata));
2379                }
2380                Some(Response::Dist { dist, metadata }) => {
2381                    let dist_kind = match dist {
2382                        Dist::Built(_) => "built",
2383                        Dist::Source(_) => "source",
2384                    };
2385                    trace!("Received {dist_kind} distribution metadata for: {dist}");
2386                    if let MetadataResponse::Unavailable(reason) = &metadata {
2387                        let message = UnavailableVersion::from(reason).singular_message();
2388                        if let Some(err) = reason.source() {
2389                            // Show the detailed error for metadata parse errors.
2390                            warn!("{dist} {message}: {err}");
2391                        } else {
2392                            warn!("{dist} {message}");
2393                        }
2394                    }
2395                    self.index
2396                        .distributions()
2397                        .done(dist.distribution_id(), Arc::new(metadata));
2398                }
2399                None => {}
2400            }
2401        }
2402
2403        Ok::<(), ResolveError>(())
2404    }
2405
2406    #[instrument(skip_all, fields(%request))]
2407    async fn process_request<Provider: ResolverProvider>(
2408        &self,
2409        request: Request,
2410        provider: &Provider,
2411    ) -> Result<Option<Response>, ResolveError> {
2412        match request {
2413            // Fetch package metadata from the registry.
2414            Request::Package(package_name, index) => {
2415                let package_versions = provider
2416                    .get_package_versions(&package_name, index.as_ref())
2417                    .boxed_local()
2418                    .await
2419                    .map_err(ResolveError::Client)?;
2420
2421                Ok(Some(Response::Package(
2422                    package_name,
2423                    index.map(IndexMetadata::into_url),
2424                    package_versions,
2425                )))
2426            }
2427
2428            // Fetch distribution metadata from the distribution database.
2429            Request::Dist(dist) => {
2430                if let Some(version) = dist.version() {
2431                    if let Some(index) = dist.index() {
2432                        // Check the implicit indexes for pre-provided metadata.
2433                        let versions_response = self.index.implicit().get(dist.name());
2434                        if let Some(VersionsResponse::Found(version_maps)) =
2435                            versions_response.as_deref()
2436                        {
2437                            for version_map in version_maps {
2438                                if version_map.index() == Some(index) {
2439                                    let Some(metadata) = version_map.get_metadata(version) else {
2440                                        continue;
2441                                    };
2442                                    debug!("Found registry-provided metadata for: {dist}");
2443                                    return Ok(Some(Response::Dist {
2444                                        dist,
2445                                        metadata: MetadataResponse::Found(
2446                                            ArchiveMetadata::from_metadata23(metadata.clone()),
2447                                        ),
2448                                    }));
2449                                }
2450                            }
2451                        }
2452
2453                        // Check the explicit indexes for pre-provided metadata.
2454                        let versions_response = self
2455                            .index
2456                            .explicit()
2457                            .get(&(dist.name().clone(), index.clone()));
2458                        if let Some(VersionsResponse::Found(version_maps)) =
2459                            versions_response.as_deref()
2460                        {
2461                            for version_map in version_maps {
2462                                let Some(metadata) = version_map.get_metadata(version) else {
2463                                    continue;
2464                                };
2465                                debug!("Found registry-provided metadata for: {dist}");
2466                                return Ok(Some(Response::Dist {
2467                                    dist,
2468                                    metadata: MetadataResponse::Found(
2469                                        ArchiveMetadata::from_metadata23(metadata.clone()),
2470                                    ),
2471                                }));
2472                            }
2473                        }
2474                    }
2475                }
2476
2477                let metadata = provider
2478                    .get_or_build_wheel_metadata(&dist)
2479                    .boxed_local()
2480                    .await?;
2481
2482                if let MetadataResponse::Found(metadata) = &metadata {
2483                    if &metadata.metadata.name != dist.name() {
2484                        return Err(ResolveError::MismatchedPackageName {
2485                            request: "distribution metadata",
2486                            expected: dist.name().clone(),
2487                            actual: metadata.metadata.name.clone(),
2488                        });
2489                    }
2490                }
2491
2492                Ok(Some(Response::Dist { dist, metadata }))
2493            }
2494
2495            Request::Installed(dist) => {
2496                let metadata = provider.get_installed_metadata(&dist).boxed_local().await?;
2497
2498                if let MetadataResponse::Found(metadata) = &metadata {
2499                    if &metadata.metadata.name != dist.name() {
2500                        return Err(ResolveError::MismatchedPackageName {
2501                            request: "installed metadata",
2502                            expected: dist.name().clone(),
2503                            actual: metadata.metadata.name.clone(),
2504                        });
2505                    }
2506                }
2507
2508                Ok(Some(Response::Installed { dist, metadata }))
2509            }
2510
2511            // Pre-fetch the package and distribution metadata.
2512            Request::Prefetch(package_name, range, python_requirement) => {
2513                // Wait for the package metadata to become available.
2514                let versions_response = self
2515                    .index
2516                    .implicit()
2517                    .wait(&package_name)
2518                    .await
2519                    .ok_or_else(|| ResolveError::UnregisteredTask(package_name.to_string()))?;
2520
2521                let version_map = match *versions_response {
2522                    VersionsResponse::Found(ref version_map) => version_map,
2523                    // Short-circuit if we did not find any versions for the package
2524                    VersionsResponse::NoIndex => {
2525                        self.unavailable_packages
2526                            .insert(package_name.clone(), UnavailablePackage::NoIndex);
2527
2528                        return Ok(None);
2529                    }
2530                    VersionsResponse::Offline => {
2531                        self.unavailable_packages
2532                            .insert(package_name.clone(), UnavailablePackage::Offline);
2533
2534                        return Ok(None);
2535                    }
2536                    VersionsResponse::NotFound => {
2537                        self.unavailable_packages
2538                            .insert(package_name.clone(), UnavailablePackage::NotFound);
2539
2540                        return Ok(None);
2541                    }
2542                };
2543
2544                // We don't have access to the fork state when prefetching, so assume that
2545                // pre-release versions are allowed.
2546                let env = ResolverEnvironment::universal(vec![]);
2547
2548                // Try to find a compatible version. If there aren't any compatible versions,
2549                // short-circuit.
2550                let Some(candidate) = self.selector.select(
2551                    &package_name,
2552                    &range,
2553                    version_map,
2554                    &self.preferences,
2555                    &self.installed_packages,
2556                    &self.exclusions,
2557                    None,
2558                    &env,
2559                    self.tags.as_ref(),
2560                ) else {
2561                    return Ok(None);
2562                };
2563
2564                // If there is not a compatible distribution, short-circuit.
2565                let Some(dist) = candidate.compatible() else {
2566                    return Ok(None);
2567                };
2568
2569                // If the registry provided metadata for this distribution, use it.
2570                for version_map in version_map {
2571                    if let Some(metadata) = version_map.get_metadata(candidate.version()) {
2572                        let dist = dist.for_resolution();
2573                        if version_map.index() == dist.index() {
2574                            debug!("Found registry-provided metadata for: {dist}");
2575
2576                            let metadata = MetadataResponse::Found(
2577                                ArchiveMetadata::from_metadata23(metadata.clone()),
2578                            );
2579
2580                            let dist = dist.to_owned();
2581                            if &package_name != dist.name() {
2582                                return Err(ResolveError::MismatchedPackageName {
2583                                    request: "distribution",
2584                                    expected: package_name,
2585                                    actual: dist.name().clone(),
2586                                });
2587                            }
2588
2589                            let response = match dist {
2590                                ResolvedDist::Installable { dist, .. } => Response::Dist {
2591                                    dist: (*dist).clone(),
2592                                    metadata,
2593                                },
2594                                ResolvedDist::Installed { dist } => Response::Installed {
2595                                    dist: (*dist).clone(),
2596                                    metadata,
2597                                },
2598                            };
2599
2600                            return Ok(Some(response));
2601                        }
2602                    }
2603                }
2604
2605                // Avoid prefetching source distributions with unbounded lower-bound ranges. This
2606                // often leads to failed attempts to build legacy versions of packages that are
2607                // incompatible with modern build tools.
2608                if dist.wheel().is_none() {
2609                    if !self.selector.use_highest_version(&package_name, &env) {
2610                        if let Some((lower, _)) = range.iter().next() {
2611                            if lower == &Bound::Unbounded {
2612                                debug!(
2613                                    "Skipping prefetch for unbounded minimum-version range: {package_name} ({range})"
2614                                );
2615                                return Ok(None);
2616                            }
2617                        }
2618                    }
2619                }
2620
2621                // Validate the Python requirement.
2622                let requires_python = match dist {
2623                    CompatibleDist::InstalledDist(_) => None,
2624                    CompatibleDist::SourceDist { sdist, .. }
2625                    | CompatibleDist::IncompatibleWheel { sdist, .. } => {
2626                        sdist.file.requires_python.as_ref()
2627                    }
2628                    CompatibleDist::CompatibleWheel { wheel, .. } => {
2629                        wheel.file.requires_python.as_ref()
2630                    }
2631                };
2632                if let Some(requires_python) = requires_python.as_ref() {
2633                    if !python_requirement.target().is_contained_by(requires_python) {
2634                        return Ok(None);
2635                    }
2636                }
2637
2638                // Verify that the package is allowed under the hash-checking policy.
2639                if !self
2640                    .hasher
2641                    .allows_package(candidate.name(), candidate.version())
2642                {
2643                    return Ok(None);
2644                }
2645
2646                // Emit a request to fetch the metadata for this version.
2647                let dist = dist.for_resolution();
2648                if self.index.distributions().register(dist.distribution_id()) {
2649                    let dist = dist.to_owned();
2650                    if &package_name != dist.name() {
2651                        return Err(ResolveError::MismatchedPackageName {
2652                            request: "distribution",
2653                            expected: package_name,
2654                            actual: dist.name().clone(),
2655                        });
2656                    }
2657
2658                    let response = match dist {
2659                        ResolvedDist::Installable { dist, .. } => {
2660                            let metadata = provider
2661                                .get_or_build_wheel_metadata(&dist)
2662                                .boxed_local()
2663                                .await?;
2664
2665                            Response::Dist {
2666                                dist: (*dist).clone(),
2667                                metadata,
2668                            }
2669                        }
2670                        ResolvedDist::Installed { dist } => {
2671                            let metadata =
2672                                provider.get_installed_metadata(&dist).boxed_local().await?;
2673
2674                            Response::Installed {
2675                                dist: (*dist).clone(),
2676                                metadata,
2677                            }
2678                        }
2679                    };
2680
2681                    Ok(Some(response))
2682                } else {
2683                    Ok(None)
2684                }
2685            }
2686        }
2687    }
2688
2689    fn convert_no_solution_err(
2690        &self,
2691        mut err: pubgrub::NoSolutionError<UvDependencyProvider>,
2692        fork_urls: ForkUrls,
2693        fork_indexes: ForkIndexes,
2694        env: ResolverEnvironment,
2695        current_environment: MarkerEnvironment,
2696        exclude_newer: Option<&ExcludeNewer>,
2697        visited: &FxHashSet<PackageName>,
2698    ) -> ResolveError {
2699        err = NoSolutionError::collapse_local_version_segments(NoSolutionError::collapse_proxies(
2700            err,
2701        ));
2702
2703        let mut unavailable_packages = FxHashMap::default();
2704        for package in err.packages() {
2705            if let PubGrubPackageInner::Package { name, .. } = &**package {
2706                if let Some(reason) = self.unavailable_packages.get(name) {
2707                    unavailable_packages.insert(name.clone(), reason.clone());
2708                }
2709            }
2710        }
2711
2712        let mut incomplete_packages = FxHashMap::default();
2713        for package in err.packages() {
2714            if let PubGrubPackageInner::Package { name, .. } = &**package {
2715                if let Some(versions) = self.incomplete_packages.get(name) {
2716                    for entry in versions.iter() {
2717                        let (version, reason) = entry.pair();
2718                        incomplete_packages
2719                            .entry(name.clone())
2720                            .or_insert_with(BTreeMap::default)
2721                            .insert(version.clone(), reason.clone());
2722                    }
2723                }
2724            }
2725        }
2726
2727        let mut available_indexes = FxHashMap::default();
2728        let mut available_versions = FxHashMap::default();
2729        for package in err.packages() {
2730            let Some(name) = package.name() else { continue };
2731            if !visited.contains(name) {
2732                // Avoid including available versions for packages that exist in the derivation
2733                // tree, but were never visited during resolution. We _may_ have metadata for
2734                // these packages, but it's non-deterministic, and omitting them ensures that
2735                // we represent the self of the resolver at the time of failure.
2736                continue;
2737            }
2738            let versions_response = if let Some(index) = fork_indexes.get(name) {
2739                self.index
2740                    .explicit()
2741                    .get(&(name.clone(), index.url().clone()))
2742            } else {
2743                self.index.implicit().get(name)
2744            };
2745            if let Some(response) = versions_response {
2746                if let VersionsResponse::Found(ref version_maps) = *response {
2747                    // Track the available versions, across all indexes.
2748                    for version_map in version_maps {
2749                        let package_versions = available_versions
2750                            .entry(name.clone())
2751                            .or_insert_with(BTreeSet::new);
2752
2753                        for (version, dists) in version_map.iter(&Ranges::full()) {
2754                            // Don't show versions removed by excluded-newer in hints.
2755                            if let Some(exclude_newer) =
2756                                exclude_newer.and_then(|en| en.exclude_newer_package(name))
2757                            {
2758                                let Some(prioritized_dist) = dists.prioritized_dist() else {
2759                                    continue;
2760                                };
2761                                if prioritized_dist.files().all(|file| {
2762                                    file.upload_time_utc_ms.is_none_or(|upload_time| {
2763                                        upload_time >= exclude_newer.timestamp_millis()
2764                                    })
2765                                }) {
2766                                    continue;
2767                                }
2768                            }
2769
2770                            package_versions.insert(version.clone());
2771                        }
2772                    }
2773
2774                    // Track the indexes in which the package is available.
2775                    available_indexes
2776                        .entry(name.clone())
2777                        .or_insert(BTreeSet::new())
2778                        .extend(
2779                            version_maps
2780                                .iter()
2781                                .filter_map(|version_map| version_map.index().cloned()),
2782                        );
2783                }
2784            }
2785        }
2786
2787        ResolveError::NoSolution(Box::new(NoSolutionError::new(
2788            err,
2789            self.index.clone(),
2790            available_versions,
2791            available_indexes,
2792            self.selector.clone(),
2793            self.python_requirement.clone(),
2794            self.locations.clone(),
2795            self.capabilities.clone(),
2796            unavailable_packages,
2797            incomplete_packages,
2798            fork_urls,
2799            fork_indexes,
2800            env,
2801            current_environment,
2802            self.tags.clone(),
2803            self.workspace_members.clone(),
2804            self.options.clone(),
2805        )))
2806    }
2807
2808    fn on_progress(&self, package: &PubGrubPackage, version: &Version) {
2809        if let Some(reporter) = self.reporter.as_ref() {
2810            match &**package {
2811                PubGrubPackageInner::Root(_) => {}
2812                PubGrubPackageInner::Python(_) => {}
2813                PubGrubPackageInner::System(_) => {}
2814                PubGrubPackageInner::Marker { .. } => {}
2815                PubGrubPackageInner::Extra { .. } => {}
2816                PubGrubPackageInner::Group { .. } => {}
2817                PubGrubPackageInner::Package { name, .. } => {
2818                    reporter.on_progress(name, &VersionOrUrlRef::Version(version));
2819                }
2820            }
2821        }
2822    }
2823
2824    fn on_complete(&self) {
2825        if let Some(reporter) = self.reporter.as_ref() {
2826            reporter.on_complete();
2827        }
2828    }
2829}
2830
2831/// State that is used during unit propagation in the resolver, one instance per fork.
2832#[derive(Clone)]
2833pub(crate) struct ForkState {
2834    /// The internal state used by the resolver.
2835    ///
2836    /// Note that not all parts of this state are strictly internal. For
2837    /// example, the edges in the dependency graph generated as part of the
2838    /// output of resolution are derived from the "incompatibilities" tracked
2839    /// in this state. We also ultimately retrieve the final set of version
2840    /// assignments (to packages) from this state's "partial solution."
2841    pubgrub: State<UvDependencyProvider>,
2842    /// The initial package to select. If set, the first iteration over this state will avoid
2843    /// asking PubGrub for the highest-priority package, and will instead use the provided package.
2844    initial_id: Option<Id<PubGrubPackage>>,
2845    /// The initial version to select. If set, the first iteration over this state will avoid
2846    /// asking PubGrub for the highest-priority version, and will instead use the provided version.
2847    initial_version: Option<Version>,
2848    /// The next package on which to run unit propagation.
2849    next: Id<PubGrubPackage>,
2850    /// The set of pinned versions we accrue throughout resolution.
2851    ///
2852    /// The key of this map is a package name, and each package name maps to
2853    /// a set of versions for that package. Each version in turn is mapped
2854    /// to the concrete distribution selected for installation, along with the
2855    /// concrete distribution whose metadata was used during resolution.
2856    /// After resolution is finished, this map is consulted to recover both the
2857    /// locked artifact and the metadata backing the resolved dependency edges.
2858    pins: FilePins,
2859    /// Ensure we don't have duplicate URLs in any branch.
2860    ///
2861    /// Unlike [`Urls`], we add only the URLs we have seen in this branch, and there can be only
2862    /// one URL per package. By prioritizing direct URL dependencies over registry dependencies,
2863    /// this map is populated for all direct URL packages before we look at any registry packages.
2864    fork_urls: ForkUrls,
2865    /// Ensure we don't have duplicate indexes in any branch.
2866    ///
2867    /// Unlike [`Indexes`], we add only the indexes we have seen in this branch, and there can be
2868    /// only one index per package.
2869    fork_indexes: ForkIndexes,
2870    /// When dependencies for a package are retrieved, this map of priorities
2871    /// is updated based on how each dependency was specified. Certain types
2872    /// of dependencies have more "priority" than others (like direct URL
2873    /// dependencies). These priorities help determine which package to
2874    /// consider next during resolution.
2875    priorities: PubGrubPriorities,
2876    /// This keeps track of the set of versions for each package that we've
2877    /// already visited during resolution. This avoids doing redundant work.
2878    added_dependencies: FxHashMap<Id<PubGrubPackage>, FxHashSet<Version>>,
2879    /// The marker expression that created this state.
2880    ///
2881    /// The root state always corresponds to a marker expression that is always
2882    /// `true` for every `MarkerEnvironment`.
2883    ///
2884    /// In non-universal mode, forking never occurs and so this marker
2885    /// expression is always `true`.
2886    ///
2887    /// Whenever dependencies are fetched, all requirement specifications
2888    /// are checked for disjointness with the marker expression of the fork
2889    /// in which those dependencies were fetched. If a requirement has a
2890    /// completely disjoint marker expression (i.e., it can never be true given
2891    /// that the marker expression that provoked the fork is true), then that
2892    /// dependency is completely ignored.
2893    env: ResolverEnvironment,
2894    /// The Python requirement for this fork. Defaults to the Python requirement for
2895    /// the resolution, but may be narrowed if a `python_version` marker is present
2896    /// in a given fork.
2897    ///
2898    /// For example, in:
2899    /// ```text
2900    /// numpy >=1.26 ; python_version >= "3.9"
2901    /// numpy <1.26 ; python_version < "3.9"
2902    /// ```
2903    ///
2904    /// The top fork has a narrower Python compatibility range, and thus can find a
2905    /// solution that omits Python 3.8 support.
2906    python_requirement: PythonRequirement,
2907    conflict_tracker: ConflictTracker,
2908    /// Prefetch package versions for packages with many rejected versions.
2909    ///
2910    /// Tracked on the fork state to avoid counting each identical version between forks as new try.
2911    prefetcher: BatchPrefetcher,
2912}
2913
2914impl ForkState {
2915    fn new(
2916        pubgrub: State<UvDependencyProvider>,
2917        env: ResolverEnvironment,
2918        python_requirement: PythonRequirement,
2919        prefetcher: BatchPrefetcher,
2920    ) -> Self {
2921        Self {
2922            initial_id: None,
2923            initial_version: None,
2924            next: pubgrub.root_package,
2925            pubgrub,
2926            pins: FilePins::default(),
2927            fork_urls: ForkUrls::default(),
2928            fork_indexes: ForkIndexes::default(),
2929            priorities: PubGrubPriorities::default(),
2930            added_dependencies: FxHashMap::default(),
2931            env,
2932            python_requirement,
2933            conflict_tracker: ConflictTracker::default(),
2934            prefetcher,
2935        }
2936    }
2937
2938    /// Visit the dependencies for the selected version of the current package, incorporating any
2939    /// relevant URLs and pinned indexes into the [`ForkState`].
2940    fn visit_package_version_dependencies(
2941        &mut self,
2942        for_package: Id<PubGrubPackage>,
2943        for_version: &Version,
2944        urls: &Urls,
2945        indexes: &Indexes,
2946        dependencies: &[PubGrubDependency],
2947        git: &GitResolver,
2948        workspace_members: &BTreeSet<PackageName>,
2949        resolution_strategy: &ResolutionStrategy,
2950    ) -> Result<(), ResolveError> {
2951        for dependency in dependencies {
2952            let PubGrubDependency {
2953                package,
2954                version,
2955                parent: _,
2956                source,
2957            } = dependency;
2958
2959            let mut has_url = false;
2960            if let Some(name) = package.name() {
2961                // From the [`Requirement`] to [`PubGrubDependency`] conversion, we get a URL if the
2962                // requirement was a URL requirement. `Urls` applies canonicalization to this and
2963                // override URLs to both URL and registry requirements, which we then check for
2964                // conflicts using [`ForkUrl`].
2965                for url in urls.get_url(&self.env, name, source.verbatim_url(), git)? {
2966                    self.fork_urls.insert(name, url, &self.env)?;
2967                    has_url = true;
2968                }
2969
2970                if let Some(index) = source.explicit_index() {
2971                    self.fork_indexes.insert(name, index, &self.env)?;
2972                }
2973
2974                // If the package is pinned to an exact index, add it to the fork.
2975                for index in indexes.get(name, &self.env) {
2976                    self.fork_indexes.insert(name, index, &self.env)?;
2977                }
2978            }
2979
2980            if let Some(name) = self.pubgrub.package_store[for_package]
2981                .name_no_root()
2982                .filter(|name| !workspace_members.contains(name))
2983            {
2984                debug!(
2985                    "Adding transitive dependency for {name}=={for_version}: {package}{version}"
2986                );
2987            } else {
2988                // A dependency from the root package or `requirements.txt`.
2989                debug!("Adding direct dependency: {package}{version}");
2990
2991                // Warn the user if a direct dependency lacks a lower bound in `--lowest` resolution.
2992                let missing_lower_bound = version
2993                    .bounding_range()
2994                    .map(|(lowest, _highest)| lowest == Bound::Unbounded)
2995                    .unwrap_or(true);
2996                let strategy_lowest = matches!(
2997                    resolution_strategy,
2998                    ResolutionStrategy::Lowest | ResolutionStrategy::LowestDirect(..)
2999                );
3000
3001                if !has_url && missing_lower_bound && strategy_lowest {
3002                    let name = package.name_no_root().unwrap();
3003                    // Handle cases where a package is listed both without and with a lower bound.
3004                    // Example:
3005                    // ```
3006                    // "coverage[toml] ; python_version < '3.11'",
3007                    // "coverage >= 7.10.0",
3008                    // ```
3009                    let bound_on_other_package = dependencies.iter().any(|other| {
3010                        Some(name) == other.package.name()
3011                            && !other
3012                                .version
3013                                .bounding_range()
3014                                .map(|(lowest, _highest)| lowest == Bound::Unbounded)
3015                                .unwrap_or(true)
3016                    });
3017
3018                    if !bound_on_other_package {
3019                        warn_user_once!(
3020                            "The direct dependency `{name}` is unpinned. \
3021                            Consider setting a lower bound when using `--resolution lowest` \
3022                            or `--resolution lowest-direct` to avoid using outdated versions.",
3023                        );
3024                    }
3025                }
3026            }
3027
3028            // Update the package priorities.
3029            self.priorities.insert(package, version, &self.fork_urls);
3030            // As we're adding an incompatibility from the proxy package to the base package,
3031            // we need to register the base package.
3032            if let Some(base_package) = package.base_package() {
3033                self.priorities
3034                    .insert(&base_package, version, &self.fork_urls);
3035            }
3036        }
3037
3038        Ok(())
3039    }
3040
3041    /// Add the dependencies for the selected version of the current package.
3042    fn add_package_version_dependencies(
3043        &mut self,
3044        for_package: Id<PubGrubPackage>,
3045        for_version: &Version,
3046        dependencies: Vec<PubGrubDependency>,
3047    ) {
3048        for dependency in &dependencies {
3049            let PubGrubDependency {
3050                package,
3051                version,
3052                parent: _,
3053                source: _,
3054            } = dependency;
3055
3056            let Some(base_package) = package.base_package() else {
3057                continue;
3058            };
3059
3060            let proxy_package = self.pubgrub.package_store.alloc(package.clone());
3061            let base_package_id = self.pubgrub.package_store.alloc(base_package.clone());
3062            self.pubgrub
3063                .add_proxy_package(proxy_package, base_package_id, version.clone());
3064        }
3065
3066        let conflict = self.pubgrub.add_package_version_dependencies(
3067            self.next,
3068            for_version.clone(),
3069            dependencies.into_iter().map(|dependency| {
3070                let PubGrubDependency {
3071                    package,
3072                    version,
3073                    parent: _,
3074                    source: _,
3075                } = dependency;
3076                (package, version)
3077            }),
3078        );
3079
3080        // Conflict tracking: If the version was rejected due to its dependencies, record culprit
3081        // and affected.
3082        if let Some(incompatibility) = conflict {
3083            self.record_conflict(for_package, Some(for_version), incompatibility);
3084        }
3085    }
3086
3087    fn record_conflict(
3088        &mut self,
3089        affected: Id<PubGrubPackage>,
3090        version: Option<&Version>,
3091        incompatibility: IncompId<PubGrubPackage, Ranges<Version>, UnavailableReason>,
3092    ) {
3093        let mut culprit_is_real = false;
3094        for (incompatible, _term) in self.pubgrub.incompatibility_store[incompatibility].iter() {
3095            if incompatible == affected {
3096                continue;
3097            }
3098            if self.pubgrub.package_store[affected].name()
3099                == self.pubgrub.package_store[incompatible].name()
3100            {
3101                // Don't track conflicts between a marker package and the main package, when the
3102                // marker is "copying" the obligations from the main package through conflicts.
3103                continue;
3104            }
3105            culprit_is_real = true;
3106            let culprit_count = self
3107                .conflict_tracker
3108                .culprit
3109                .entry(incompatible)
3110                .or_default();
3111            *culprit_count += 1;
3112            if *culprit_count == CONFLICT_THRESHOLD {
3113                self.conflict_tracker.deprioritize.push(incompatible);
3114            }
3115        }
3116        // Don't track conflicts between a marker package and the main package, when the
3117        // marker is "copying" the obligations from the main package through conflicts.
3118        if culprit_is_real {
3119            if tracing::enabled!(Level::DEBUG) {
3120                let incompatibility = self.pubgrub.incompatibility_store[incompatibility]
3121                    .iter()
3122                    .map(|(package, _term)| &self.pubgrub.package_store[package])
3123                    .join(", ");
3124                if let Some(version) = version {
3125                    debug!(
3126                        "Recording dependency conflict of {}=={} from incompatibility of ({})",
3127                        self.pubgrub.package_store[affected], version, incompatibility
3128                    );
3129                } else {
3130                    debug!(
3131                        "Recording unit propagation conflict of {} from incompatibility of ({})",
3132                        self.pubgrub.package_store[affected], incompatibility
3133                    );
3134                }
3135            }
3136
3137            let affected_count = self.conflict_tracker.affected.entry(self.next).or_default();
3138            *affected_count += 1;
3139            if *affected_count == CONFLICT_THRESHOLD {
3140                self.conflict_tracker.prioritize.push(self.next);
3141            }
3142        }
3143    }
3144
3145    fn add_unavailable_version(&mut self, version: Version, reason: UnavailableVersion) {
3146        // Incompatible requires-python versions are special in that we track
3147        // them as incompatible dependencies instead of marking the package version
3148        // as unavailable directly.
3149        if let UnavailableVersion::IncompatibleDist(
3150            IncompatibleDist::Source(IncompatibleSource::RequiresPython(requires_python, kind))
3151            | IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(requires_python, kind)),
3152        ) = reason
3153        {
3154            let package = &self.next;
3155            let python = self.pubgrub.package_store.alloc(PubGrubPackage::from(
3156                PubGrubPackageInner::Python(match kind {
3157                    PythonRequirementKind::Installed => PubGrubPython::Installed,
3158                    PythonRequirementKind::Target => PubGrubPython::Target,
3159                }),
3160            ));
3161            self.pubgrub
3162                .add_incompatibility(Incompatibility::from_dependency(
3163                    *package,
3164                    Range::singleton(version.clone()),
3165                    (python, release_specifiers_to_ranges(requires_python)),
3166                ));
3167            self.pubgrub
3168                .partial_solution
3169                .add_decision(self.next, version);
3170            return;
3171        }
3172        self.pubgrub
3173            .add_incompatibility(Incompatibility::custom_version(
3174                self.next,
3175                version.clone(),
3176                UnavailableReason::Version(reason),
3177            ));
3178    }
3179
3180    /// Subset the current markers with the new markers and update the python requirements fields
3181    /// accordingly.
3182    ///
3183    /// If the fork should be dropped (e.g., because its markers can never be true for its
3184    /// Python requirement), then this returns `None`.
3185    fn with_env(mut self, env: ResolverEnvironment) -> Self {
3186        self.env = env;
3187        // If the fork contains a narrowed Python requirement, apply it.
3188        if let Some(req) = self.env.narrow_python_requirement(&self.python_requirement) {
3189            debug!("Narrowed `requires-python` bound to: {}", req.target());
3190            self.python_requirement = req;
3191        }
3192        self
3193    }
3194
3195    /// Returns the URL or index for a package and version.
3196    ///
3197    /// In practice, exactly one of the returned values will be `Some`.
3198    fn source(
3199        &self,
3200        name: &PackageName,
3201        version: &Version,
3202    ) -> (Option<&VerbatimParsedUrl>, Option<&IndexUrl>) {
3203        let url = self.fork_urls.get(name);
3204        let index = url
3205            .is_none()
3206            .then(|| {
3207                self.pins
3208                    .get(name, version)
3209                    .expect("Every package should be pinned")
3210                    .index()
3211            })
3212            .flatten();
3213        (url, index)
3214    }
3215
3216    fn into_resolution(self) -> Resolution {
3217        let solution: FxHashMap<_, _> = self.pubgrub.partial_solution.extract_solution().collect();
3218        let edge_count: usize = solution
3219            .keys()
3220            .map(|package| self.pubgrub.incompatibilities[package].len())
3221            .sum();
3222        let mut edges: Vec<ResolutionDependencyEdge> = Vec::with_capacity(edge_count);
3223        for (package, self_version) in &solution {
3224            for id in &self.pubgrub.incompatibilities[package] {
3225                let pubgrub::Kind::FromDependencyOf(
3226                    self_package,
3227                    ref self_range,
3228                    dependency_package,
3229                    ref dependency_range,
3230                ) = self.pubgrub.incompatibility_store[*id].kind
3231                else {
3232                    continue;
3233                };
3234                if *package != self_package {
3235                    continue;
3236                }
3237                if !self_range.contains(self_version) {
3238                    continue;
3239                }
3240                let Some(dependency_version) = solution.get(&dependency_package) else {
3241                    continue;
3242                };
3243                if !dependency_range.contains(dependency_version) {
3244                    continue;
3245                }
3246
3247                let self_package = &self.pubgrub.package_store[self_package];
3248                let dependency_package = &self.pubgrub.package_store[dependency_package];
3249
3250                let (self_name, self_extra, self_group) = match &**self_package {
3251                    PubGrubPackageInner::Package {
3252                        name: self_name,
3253                        extra: self_extra,
3254                        group: self_group,
3255                        marker: _,
3256                    } => (Some(self_name), self_extra.as_ref(), self_group.as_ref()),
3257
3258                    PubGrubPackageInner::Root(_) => (None, None, None),
3259
3260                    _ => continue,
3261                };
3262
3263                let (self_url, self_index) = self_name
3264                    .map(|self_name| self.source(self_name, self_version))
3265                    .unwrap_or((None, None));
3266
3267                match **dependency_package {
3268                    PubGrubPackageInner::Package {
3269                        name: ref dependency_name,
3270                        extra: ref dependency_extra,
3271                        group: ref dependency_dev,
3272                        marker: ref dependency_marker,
3273                    } => {
3274                        debug_assert!(
3275                            dependency_extra.is_none(),
3276                            "Packages should depend on an extra proxy"
3277                        );
3278                        debug_assert!(
3279                            dependency_dev.is_none(),
3280                            "Packages should depend on a group proxy"
3281                        );
3282
3283                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3284                        // but allow groups to depend on other groups, or on the package itself.
3285                        if self_group.is_none() {
3286                            if self_name == Some(dependency_name) {
3287                                continue;
3288                            }
3289                        }
3290
3291                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3292
3293                        let edge = ResolutionDependencyEdge {
3294                            from: self_name.cloned(),
3295                            from_version: self_version.clone(),
3296                            from_url: self_url.cloned(),
3297                            from_index: self_index.cloned(),
3298                            from_extra: self_extra.cloned(),
3299                            from_group: self_group.cloned(),
3300                            to: dependency_name.clone(),
3301                            to_version: dependency_version.clone(),
3302                            to_url: to_url.cloned(),
3303                            to_index: to_index.cloned(),
3304                            to_extra: dependency_extra.clone(),
3305                            to_group: dependency_dev.clone(),
3306                            marker: *dependency_marker,
3307                        };
3308                        edges.push(edge);
3309                    }
3310
3311                    PubGrubPackageInner::Marker {
3312                        name: ref dependency_name,
3313                        marker: ref dependency_marker,
3314                    } => {
3315                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3316                        // but allow groups to depend on other groups, or on the package itself.
3317                        if self_group.is_none() {
3318                            if self_name == Some(dependency_name) {
3319                                continue;
3320                            }
3321                        }
3322
3323                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3324
3325                        let edge = ResolutionDependencyEdge {
3326                            from: self_name.cloned(),
3327                            from_version: self_version.clone(),
3328                            from_url: self_url.cloned(),
3329                            from_index: self_index.cloned(),
3330                            from_extra: self_extra.cloned(),
3331                            from_group: self_group.cloned(),
3332                            to: dependency_name.clone(),
3333                            to_version: dependency_version.clone(),
3334                            to_url: to_url.cloned(),
3335                            to_index: to_index.cloned(),
3336                            to_extra: None,
3337                            to_group: None,
3338                            marker: *dependency_marker,
3339                        };
3340                        edges.push(edge);
3341                    }
3342
3343                    PubGrubPackageInner::Extra {
3344                        name: ref dependency_name,
3345                        extra: ref dependency_extra,
3346                        marker: ref dependency_marker,
3347                    } => {
3348                        if self_group.is_none() {
3349                            debug_assert!(
3350                                self_name != Some(dependency_name),
3351                                "Extras should be flattened"
3352                            );
3353                        }
3354                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3355
3356                        // Insert an edge from the dependent package to the extra package.
3357                        let edge = ResolutionDependencyEdge {
3358                            from: self_name.cloned(),
3359                            from_version: self_version.clone(),
3360                            from_url: self_url.cloned(),
3361                            from_index: self_index.cloned(),
3362                            from_extra: self_extra.cloned(),
3363                            from_group: self_group.cloned(),
3364                            to: dependency_name.clone(),
3365                            to_version: dependency_version.clone(),
3366                            to_url: to_url.cloned(),
3367                            to_index: to_index.cloned(),
3368                            to_extra: Some(dependency_extra.clone()),
3369                            to_group: None,
3370                            marker: *dependency_marker,
3371                        };
3372                        edges.push(edge);
3373
3374                        // Insert an edge from the dependent package to the base package.
3375                        let edge = ResolutionDependencyEdge {
3376                            from: self_name.cloned(),
3377                            from_version: self_version.clone(),
3378                            from_url: self_url.cloned(),
3379                            from_index: self_index.cloned(),
3380                            from_extra: self_extra.cloned(),
3381                            from_group: self_group.cloned(),
3382                            to: dependency_name.clone(),
3383                            to_version: dependency_version.clone(),
3384                            to_url: to_url.cloned(),
3385                            to_index: to_index.cloned(),
3386                            to_extra: None,
3387                            to_group: None,
3388                            marker: *dependency_marker,
3389                        };
3390                        edges.push(edge);
3391                    }
3392
3393                    PubGrubPackageInner::Group {
3394                        name: ref dependency_name,
3395                        group: ref dependency_group,
3396                        marker: ref dependency_marker,
3397                    } => {
3398                        debug_assert!(
3399                            self_name != Some(dependency_name),
3400                            "Groups should be flattened"
3401                        );
3402
3403                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3404
3405                        // Add an edge from the dependent package to the dev package, but _not_ the
3406                        // base package.
3407                        let edge = ResolutionDependencyEdge {
3408                            from: self_name.cloned(),
3409                            from_version: self_version.clone(),
3410                            from_url: self_url.cloned(),
3411                            from_index: self_index.cloned(),
3412                            from_extra: self_extra.cloned(),
3413                            from_group: self_group.cloned(),
3414                            to: dependency_name.clone(),
3415                            to_version: dependency_version.clone(),
3416                            to_url: to_url.cloned(),
3417                            to_index: to_index.cloned(),
3418                            to_extra: None,
3419                            to_group: Some(dependency_group.clone()),
3420                            marker: *dependency_marker,
3421                        };
3422                        edges.push(edge);
3423                    }
3424
3425                    _ => {}
3426                }
3427            }
3428        }
3429
3430        let nodes = solution
3431            .into_iter()
3432            .filter_map(|(package, version)| {
3433                if let PubGrubPackageInner::Package {
3434                    name,
3435                    extra,
3436                    group,
3437                    marker: MarkerTree::TRUE,
3438                } = &*self.pubgrub.package_store[package]
3439                {
3440                    let (url, index) = self.source(name, &version);
3441                    Some((
3442                        ResolutionPackage {
3443                            name: name.clone(),
3444                            extra: extra.clone(),
3445                            dev: group.clone(),
3446                            url: url.cloned(),
3447                            index: index.cloned(),
3448                        },
3449                        version,
3450                    ))
3451                } else {
3452                    None
3453                }
3454            })
3455            .collect();
3456
3457        Resolution {
3458            nodes,
3459            edges,
3460            pins: self.pins,
3461            env: self.env,
3462        }
3463    }
3464}
3465
3466/// The resolution from a single fork including the virtual packages and the edges between them.
3467#[derive(Debug)]
3468pub(crate) struct Resolution {
3469    pub(crate) nodes: FxHashMap<ResolutionPackage, Version>,
3470    /// The directed connections between the nodes, where the marker is the node weight. We don't
3471    /// store the requirement itself, but it can be retrieved from the package metadata.
3472    pub(crate) edges: Vec<ResolutionDependencyEdge>,
3473    /// Map each package name, version tuple from `packages` to a distribution.
3474    pub(crate) pins: FilePins,
3475    /// The environment setting this resolution was found under.
3476    pub(crate) env: ResolverEnvironment,
3477}
3478
3479/// Package representation we used during resolution where each extra and also the dev-dependencies
3480/// group are their own package.
3481#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3482pub(crate) struct ResolutionPackage {
3483    pub(crate) name: PackageName,
3484    pub(crate) extra: Option<ExtraName>,
3485    pub(crate) dev: Option<GroupName>,
3486    /// For registry packages, this is `None`; otherwise, the direct URL of the distribution.
3487    pub(crate) url: Option<VerbatimParsedUrl>,
3488    /// For URL packages, this is `None`; otherwise, the index URL of the distribution.
3489    pub(crate) index: Option<IndexUrl>,
3490}
3491
3492/// The `from_` fields and the `to_` fields allow mapping to the originating and target
3493///  [`ResolutionPackage`] respectively. The `marker` is the edge weight.
3494#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3495pub(crate) struct ResolutionDependencyEdge {
3496    /// This value is `None` if the dependency comes from the root package.
3497    pub(crate) from: Option<PackageName>,
3498    pub(crate) from_version: Version,
3499    pub(crate) from_url: Option<VerbatimParsedUrl>,
3500    pub(crate) from_index: Option<IndexUrl>,
3501    pub(crate) from_extra: Option<ExtraName>,
3502    pub(crate) from_group: Option<GroupName>,
3503    pub(crate) to: PackageName,
3504    pub(crate) to_version: Version,
3505    pub(crate) to_url: Option<VerbatimParsedUrl>,
3506    pub(crate) to_index: Option<IndexUrl>,
3507    pub(crate) to_extra: Option<ExtraName>,
3508    pub(crate) to_group: Option<GroupName>,
3509    pub(crate) marker: MarkerTree,
3510}
3511
3512impl ResolutionDependencyEdge {
3513    pub(crate) fn universal_marker(&self) -> UniversalMarker {
3514        // We specifically do not account for conflict
3515        // markers here. Instead, those are computed via
3516        // a traversal on the resolution graph.
3517        UniversalMarker::new(self.marker, ConflictMarker::TRUE)
3518    }
3519}
3520
3521/// Fetch the metadata for an item
3522#[derive(Debug)]
3523#[expect(clippy::large_enum_variant)]
3524pub(crate) enum Request {
3525    /// A request to fetch the metadata for a package.
3526    Package(PackageName, Option<IndexMetadata>),
3527    /// A request to fetch the metadata for a built or source distribution.
3528    Dist(Dist),
3529    /// A request to fetch the metadata from an already-installed distribution.
3530    Installed(InstalledDist),
3531    /// A request to pre-fetch the metadata for a package and the best-guess distribution.
3532    Prefetch(PackageName, Range<Version>, PythonRequirement),
3533}
3534
3535impl<'a> From<ResolvedDistRef<'a>> for Request {
3536    fn from(dist: ResolvedDistRef<'a>) -> Self {
3537        // N.B. This is almost identical to `ResolvedDistRef::to_owned`, but
3538        // creates a `Request` instead of a `ResolvedDist`. There's probably
3539        // some room for DRYing this up a bit. The obvious way would be to
3540        // add a method to create a `Dist`, but a `Dist` cannot be represented
3541        // as an installed dist.
3542        match dist {
3543            ResolvedDistRef::InstallableRegistrySourceDist { sdist, prioritized } => {
3544                // This is okay because we're only here if the prioritized dist
3545                // has an sdist, so this always succeeds.
3546                let source = prioritized.source_dist().expect("a source distribution");
3547                assert_eq!(
3548                    (&sdist.name, &sdist.version),
3549                    (&source.name, &source.version),
3550                    "expected chosen sdist to match prioritized sdist"
3551                );
3552                Self::Dist(Dist::Source(SourceDist::Registry(source)))
3553            }
3554            ResolvedDistRef::InstallableRegistryBuiltDist {
3555                wheel, prioritized, ..
3556            } => {
3557                assert_eq!(
3558                    Some(&wheel.filename),
3559                    prioritized.best_wheel().map(|(wheel, _)| &wheel.filename),
3560                    "expected chosen wheel to match best wheel"
3561                );
3562                // This is okay because we're only here if the prioritized dist
3563                // has at least one wheel, so this always succeeds.
3564                let built = prioritized.built_dist().expect("at least one wheel");
3565                Self::Dist(Dist::Built(BuiltDist::Registry(built)))
3566            }
3567            ResolvedDistRef::Installed { dist } => Self::Installed(dist.clone()),
3568        }
3569    }
3570}
3571
3572impl Display for Request {
3573    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
3574        match self {
3575            Self::Package(package_name, _) => {
3576                write!(f, "Versions {package_name}")
3577            }
3578            Self::Dist(dist) => {
3579                write!(f, "Metadata {dist}")
3580            }
3581            Self::Installed(dist) => {
3582                write!(f, "Installed metadata {dist}")
3583            }
3584            Self::Prefetch(package_name, range, _) => {
3585                write!(f, "Prefetch {package_name} {range}")
3586            }
3587        }
3588    }
3589}
3590
3591#[derive(Debug)]
3592#[expect(clippy::large_enum_variant)]
3593enum Response {
3594    /// The returned metadata for a package hosted on a registry.
3595    Package(PackageName, Option<IndexUrl>, VersionsResponse),
3596    /// The returned metadata for a distribution.
3597    Dist {
3598        dist: Dist,
3599        metadata: MetadataResponse,
3600    },
3601    /// The returned metadata for an already-installed distribution.
3602    Installed {
3603        dist: InstalledDist,
3604        metadata: MetadataResponse,
3605    },
3606}
3607
3608/// Information about the dependencies for a particular package.
3609///
3610/// This effectively distills the dependency metadata of a package down into
3611/// its pubgrub specific constituent parts: each dependency package has a range
3612/// of possible versions.
3613enum Dependencies {
3614    /// Package dependencies are not available.
3615    Unavailable(UnavailableVersion),
3616    /// Container for all available package versions.
3617    ///
3618    /// Note that in universal mode, it is possible and allowed for multiple
3619    /// `PubGrubPackage` values in this list to have the same package name.
3620    /// These conflicts are resolved via `Dependencies::fork`.
3621    Available(Vec<PubGrubDependency>),
3622    /// Dependencies that should never result in a fork.
3623    ///
3624    /// For example, the dependencies of a `Marker` package will have the
3625    /// same name and version, but differ according to marker expressions.
3626    /// But we never want this to result in a fork.
3627    Unforkable(Vec<PubGrubDependency>),
3628}
3629
3630impl Dependencies {
3631    /// Turn this flat list of dependencies into a potential set of forked
3632    /// groups of dependencies.
3633    ///
3634    /// A fork *only* occurs when there are multiple dependencies with the same
3635    /// name *and* those dependency specifications have corresponding marker
3636    /// expressions that are completely disjoint with one another.
3637    fn fork(
3638        self,
3639        env: &ResolverEnvironment,
3640        python_requirement: &PythonRequirement,
3641        conflicts: &Conflicts,
3642    ) -> ForkedDependencies {
3643        let deps = match self {
3644            Self::Available(deps) => deps,
3645            Self::Unforkable(deps) => return ForkedDependencies::Unforked(deps),
3646            Self::Unavailable(err) => return ForkedDependencies::Unavailable(err),
3647        };
3648        let mut name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>> = BTreeMap::new();
3649        for dep in deps {
3650            let name = dep
3651                .package
3652                .name()
3653                .expect("dependency always has a name")
3654                .clone();
3655            name_to_deps.entry(name).or_default().push(dep);
3656        }
3657        let Forks {
3658            mut forks,
3659            diverging_packages,
3660        } = Forks::new(name_to_deps, env, python_requirement, conflicts);
3661        if forks.is_empty() {
3662            ForkedDependencies::Unforked(vec![])
3663        } else if forks.len() == 1 {
3664            ForkedDependencies::Unforked(forks.pop().unwrap().dependencies)
3665        } else {
3666            ForkedDependencies::Forked {
3667                forks,
3668                diverging_packages: diverging_packages.into_iter().collect(),
3669            }
3670        }
3671    }
3672}
3673
3674/// Information about the (possibly forked) dependencies for a particular
3675/// package.
3676///
3677/// This is like `Dependencies` but with an extra variant that only occurs when
3678/// a `Dependencies` list has multiple dependency specifications with the same
3679/// name and non-overlapping marker expressions (i.e., a fork occurs).
3680#[derive(Debug)]
3681enum ForkedDependencies {
3682    /// Package dependencies are not available.
3683    Unavailable(UnavailableVersion),
3684    /// No forking occurred.
3685    ///
3686    /// This is the same as `Dependencies::Available`.
3687    Unforked(Vec<PubGrubDependency>),
3688    /// Forked containers for all available package versions.
3689    ///
3690    /// Note that there is always at least two forks. If there would
3691    /// be fewer than 2 forks, then there is no fork at all and the
3692    /// `Unforked` variant is used instead.
3693    Forked {
3694        forks: Vec<Fork>,
3695        /// The package(s) with different requirements for disjoint markers.
3696        diverging_packages: Vec<PackageName>,
3697    },
3698}
3699
3700/// A list of forks determined from the dependencies of a single package.
3701///
3702/// Any time a marker expression is seen that is not true for all possible
3703/// marker environments, it is possible for it to introduce a new fork.
3704#[derive(Debug, Default)]
3705struct Forks {
3706    /// The forks discovered among the dependencies.
3707    forks: Vec<Fork>,
3708    /// The package(s) that provoked at least one additional fork.
3709    diverging_packages: BTreeSet<PackageName>,
3710}
3711
3712impl Forks {
3713    fn new(
3714        name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>>,
3715        env: &ResolverEnvironment,
3716        python_requirement: &PythonRequirement,
3717        conflicts: &Conflicts,
3718    ) -> Self {
3719        let python_marker = python_requirement.to_marker_tree();
3720
3721        let mut forks = vec![Fork::new(env.clone())];
3722        let mut diverging_packages = BTreeSet::new();
3723        for (name, mut deps) in name_to_deps {
3724            assert!(!deps.is_empty(), "every name has at least one dependency");
3725            // We never fork if there's only one dependency
3726            // specification for a given package name. This particular
3727            // strategy results in a "conservative" approach to forking
3728            // that gives up correctness in some cases in exchange for
3729            // more limited forking. More limited forking results in
3730            // simpler-and-easier-to-understand lock files and faster
3731            // resolving. The correctness we give up manifests when
3732            // two transitive non-sibling dependencies conflict. In
3733            // that case, we don't detect the fork ahead of time (at
3734            // present).
3735            if let [dep] = deps.as_slice() {
3736                // There's one exception: if the requirement increases the minimum-supported Python
3737                // version, we also fork in order to respect that minimum in the subsequent
3738                // resolution.
3739                //
3740                // For example, given `requires-python = ">=3.7"` and `uv ; python_version >= "3.8"`,
3741                // where uv itself only supports Python 3.8 and later, we need to fork to ensure
3742                // that the resolution can find a solution.
3743                if marker::requires_python(dep.package.marker())
3744                    .is_none_or(|bound| !python_requirement.raises(&bound))
3745                {
3746                    let dep = deps.pop().unwrap();
3747                    let marker = dep.package.marker();
3748                    for fork in &mut forks {
3749                        if fork.env.included_by_marker(marker) {
3750                            fork.add_dependency(dep.clone());
3751                        }
3752                    }
3753                    continue;
3754                }
3755            } else {
3756                // If all dependencies have the same markers, we should also avoid forking.
3757                if let Some(dep) = deps.first() {
3758                    let marker = dep.package.marker();
3759                    if deps.iter().all(|dep| marker == dep.package.marker()) {
3760                        // Unless that "same marker" is a Python requirement that is stricter than
3761                        // the current Python requirement. In that case, we need to fork to respect
3762                        // the stricter requirement.
3763                        if marker::requires_python(marker)
3764                            .is_none_or(|bound| !python_requirement.raises(&bound))
3765                        {
3766                            for dep in deps {
3767                                for fork in &mut forks {
3768                                    if fork.env.included_by_marker(marker) {
3769                                        fork.add_dependency(dep.clone());
3770                                    }
3771                                }
3772                            }
3773                            continue;
3774                        }
3775                    }
3776                }
3777            }
3778            for dep in deps {
3779                let mut forker = match ForkingPossibility::new(env, &dep) {
3780                    ForkingPossibility::Possible(forker) => forker,
3781                    ForkingPossibility::DependencyAlwaysExcluded => {
3782                        // If the markers can never be satisfied by the parent
3783                        // fork, then we can drop this dependency unceremoniously.
3784                        continue;
3785                    }
3786                    ForkingPossibility::NoForkingPossible => {
3787                        // Or, if the markers are always true, then we just
3788                        // add the dependency to every fork unconditionally.
3789                        for fork in &mut forks {
3790                            fork.add_dependency(dep.clone());
3791                        }
3792                        continue;
3793                    }
3794                };
3795                // Otherwise, we *should* need to add a new fork...
3796                diverging_packages.insert(name.clone());
3797
3798                let mut new = vec![];
3799                for fork in std::mem::take(&mut forks) {
3800                    let Some((remaining_forker, envs)) = forker.fork(&fork.env) else {
3801                        new.push(fork);
3802                        continue;
3803                    };
3804                    forker = remaining_forker;
3805
3806                    for fork_env in envs {
3807                        let mut new_fork = fork.clone();
3808                        new_fork.set_env(fork_env);
3809                        // We only add the dependency to this fork if it
3810                        // satisfies the fork's markers. Some forks are
3811                        // specifically created to exclude this dependency,
3812                        // so this isn't always true!
3813                        if forker.included(&new_fork.env) {
3814                            new_fork.add_dependency(dep.clone());
3815                        }
3816                        // Filter out any forks we created that are disjoint with our
3817                        // Python requirement.
3818                        if new_fork.env.included_by_marker(python_marker) {
3819                            new.push(new_fork);
3820                        }
3821                    }
3822                }
3823                forks = new;
3824            }
3825        }
3826        // When there is a conflicting group configuration, we need
3827        // to potentially add more forks. Each fork added contains an
3828        // exclusion list of conflicting groups where dependencies with
3829        // the corresponding package and extra name are forcefully
3830        // excluded from that group.
3831        //
3832        // We specifically iterate on conflicting groups and
3833        // potentially re-generate all forks for each one. We do it
3834        // this way in case there are multiple sets of conflicting
3835        // groups that impact the forks here.
3836        //
3837        // For example, if we have conflicting groups {x1, x2} and {x3,
3838        // x4}, we need to make sure the forks generated from one set
3839        // also account for the other set.
3840        for set in conflicts.iter() {
3841            let mut new = vec![];
3842            for fork in std::mem::take(&mut forks) {
3843                // Check if this conflict set is relevant to this fork. We need two conditions:
3844                //
3845                // 1. At least one item has dependencies in this fork (otherwise there's nothing to
3846                //    fork on).
3847                // 2. At least two items are not already excluded in this fork's environment
3848                //    (otherwise the conflict constraint is already satisfied and no fork is
3849                //    needed).
3850                let mut has_conflicting_dependency = false;
3851                for item in set.iter() {
3852                    if fork.contains_conflicting_item(item.as_ref()) {
3853                        has_conflicting_dependency = true;
3854                        diverging_packages.insert(item.package().clone());
3855                        break;
3856                    }
3857                }
3858                if !has_conflicting_dependency {
3859                    new.push(fork);
3860                    continue;
3861                }
3862
3863                // If fewer than two items in this conflict set are still possible (not already
3864                // excluded) in this fork, the conflict constraint is already satisfied by prior
3865                // forking. We can skip the full N+1 fork split if the single remaining non-excluded
3866                // item doesn't appear in any other conflict set (since it would never need its own
3867                // "excluded" variant).
3868                let non_excluded: Vec<_> = set
3869                    .iter()
3870                    .filter(|item| fork.env.included_by_group(item.as_ref()))
3871                    .collect();
3872                if non_excluded.len() < 2 {
3873                    // Check if any non-excluded item still has a live conflict in another set —
3874                    // i.e., another set where this item AND at least one other non-excluded item
3875                    // both appear. If so, we still need to fork to create the "excluded" variant
3876                    // for that item.
3877                    let dominated = non_excluded.iter().all(|item| {
3878                        !conflicts.iter().any(|other_set| {
3879                            !std::ptr::eq(set, other_set)
3880                                && other_set.contains(item.package(), item.kind().as_ref())
3881                                && other_set
3882                                    .iter()
3883                                    .filter(|other_item| {
3884                                        other_item.package() != item.package()
3885                                            || other_item.kind() != item.kind()
3886                                    })
3887                                    .any(|other_item| {
3888                                        fork.env.included_by_group(other_item.as_ref())
3889                                    })
3890                        })
3891                    });
3892                    if dominated {
3893                        // When dependencies are added to forks, we check `included_by_marker` but
3894                        // not on whether the dependency's conflict item is included by the fork's
3895                        // environment so there may be extraneous dependencies and we need to filter
3896                        // the fork to clean up dependencies gated on already-excluded extras.
3897                        let rules: Vec<_> = set
3898                            .iter()
3899                            .filter(|item| !fork.env.included_by_group(item.as_ref()))
3900                            .cloned()
3901                            .map(Err)
3902                            .collect();
3903                        if let Some(filtered) = fork.filter(rules) {
3904                            new.push(filtered);
3905                        }
3906                        continue;
3907                    }
3908                }
3909
3910                // Create a fork that excludes ALL conflicts.
3911                if let Some(fork_none) = fork.clone().filter(set.iter().cloned().map(Err)) {
3912                    new.push(fork_none);
3913                }
3914
3915                // Now create a fork for each conflicting group, where
3916                // that fork excludes every *other* conflicting group.
3917                //
3918                // So if we have conflicting extras foo, bar and baz,
3919                // then this creates three forks: one that excludes
3920                // {foo, bar}, one that excludes {foo, baz} and one
3921                // that excludes {bar, baz}.
3922                for (i, _) in set.iter().enumerate() {
3923                    let fork_allows_group = fork.clone().filter(
3924                        set.iter()
3925                            .cloned()
3926                            .enumerate()
3927                            .map(|(j, group)| if i == j { Ok(group) } else { Err(group) }),
3928                    );
3929                    if let Some(fork_allows_group) = fork_allows_group {
3930                        new.push(fork_allows_group);
3931                    }
3932                }
3933            }
3934            forks = new;
3935        }
3936        Self {
3937            forks,
3938            diverging_packages,
3939        }
3940    }
3941}
3942
3943/// A single fork in a list of dependencies.
3944///
3945/// A fork corresponds to the full list of dependencies for a package,
3946/// but with any conflicting dependency specifications omitted. For
3947/// example, if we have `a<2 ; sys_platform == 'foo'` and `a>=2 ;
3948/// sys_platform == 'bar'`, then because the dependency specifications
3949/// have the same name and because the marker expressions are disjoint,
3950/// a fork occurs. One fork will contain `a<2` but not `a>=2`, while
3951/// the other fork will contain `a>=2` but not `a<2`.
3952#[derive(Clone, Debug)]
3953struct Fork {
3954    /// The list of dependencies for this fork, guaranteed to be conflict
3955    /// free. (i.e., There are no two packages with the same name with
3956    /// non-overlapping marker expressions.)
3957    ///
3958    /// Note that callers shouldn't mutate this sequence directly. Instead,
3959    /// they should use `add_forked_package` or `add_nonfork_package`. Namely,
3960    /// it should be impossible for a package with a marker expression that is
3961    /// disjoint from the marker expression on this fork to be added.
3962    dependencies: Vec<PubGrubDependency>,
3963    /// The conflicting groups in this fork.
3964    ///
3965    /// This exists to make some access patterns more efficient. Namely,
3966    /// it makes it easy to check whether there's a dependency with a
3967    /// particular conflicting group in this fork.
3968    conflicts: crate::FxHashbrownSet<ConflictItem>,
3969    /// The resolver environment for this fork.
3970    ///
3971    /// Principally, this corresponds to the markers in this for. So in the
3972    /// example above, the `a<2` fork would have `sys_platform == 'foo'`, while
3973    /// the `a>=2` fork would have `sys_platform == 'bar'`.
3974    ///
3975    /// If this fork was generated from another fork, then this *includes*
3976    /// the criteria from its parent. i.e., Its marker expression represents
3977    /// the intersection of the marker expression from its parent and any
3978    /// additional marker expression generated by addition forking based on
3979    /// conflicting dependency specifications.
3980    env: ResolverEnvironment,
3981}
3982
3983impl Fork {
3984    /// Create a new fork with no dependencies with the given resolver
3985    /// environment.
3986    fn new(env: ResolverEnvironment) -> Self {
3987        Self {
3988            dependencies: vec![],
3989            conflicts: crate::FxHashbrownSet::default(),
3990            env,
3991        }
3992    }
3993
3994    /// Add a dependency to this fork.
3995    fn add_dependency(&mut self, dep: PubGrubDependency) {
3996        if let Some(conflicting_item) = dep.conflicting_item() {
3997            self.conflicts.insert(conflicting_item.to_owned());
3998        }
3999        self.dependencies.push(dep);
4000    }
4001
4002    /// Sets the resolver environment to the one given.
4003    ///
4004    /// Any dependency in this fork that does not satisfy the given environment
4005    /// is removed.
4006    fn set_env(&mut self, env: ResolverEnvironment) {
4007        self.env = env;
4008        self.dependencies.retain(|dep| {
4009            let marker = dep.package.marker();
4010            if self.env.included_by_marker(marker) {
4011                return true;
4012            }
4013            if let Some(conflicting_item) = dep.conflicting_item() {
4014                self.conflicts.remove(&conflicting_item);
4015            }
4016            false
4017        });
4018    }
4019
4020    /// Returns true if any of the dependencies in this fork contain a
4021    /// dependency with the given package and extra values.
4022    fn contains_conflicting_item(&self, item: ConflictItemRef<'_>) -> bool {
4023        self.conflicts.contains(&item)
4024    }
4025
4026    /// Include or Exclude the given groups from this fork.
4027    ///
4028    /// This removes all dependencies matching the given conflicting groups.
4029    ///
4030    /// If the exclusion rules would result in a fork with an unsatisfiable
4031    /// resolver environment, then this returns `None`.
4032    fn filter(
4033        mut self,
4034        rules: impl IntoIterator<Item = Result<ConflictItem, ConflictItem>>,
4035    ) -> Option<Self> {
4036        self.env = self.env.filter_by_group(rules)?;
4037        self.dependencies.retain(|dep| {
4038            let Some(conflicting_item) = dep.conflicting_item() else {
4039                return true;
4040            };
4041            if self.env.included_by_group(conflicting_item) {
4042                return true;
4043            }
4044            match conflicting_item.kind() {
4045                // We should not filter entire projects unless they're a top-level dependency
4046                // Otherwise, we'll fail to solve for children of the project, like extras
4047                ConflictKindRef::Project => {
4048                    if dep.parent.is_some() {
4049                        return true;
4050                    }
4051                }
4052                ConflictKindRef::Group(_) => {}
4053                ConflictKindRef::Extra(_) => {}
4054            }
4055            self.conflicts.remove(&conflicting_item);
4056            false
4057        });
4058        Some(self)
4059    }
4060
4061    /// Compare forks, preferring forks with g `requires-python` requirements.
4062    fn cmp_requires_python(&self, other: &Self) -> Ordering {
4063        // A higher `requires-python` requirement indicates a _higher-priority_ fork.
4064        //
4065        // This ordering ensures that we prefer choosing the highest version for each fork based on
4066        // its `requires-python` requirement.
4067        //
4068        // The reverse would prefer choosing fewer versions, at the cost of using older package
4069        // versions on newer Python versions. For example, if reversed, we'd prefer to solve `<3.7
4070        // before solving `>=3.7`, since the resolution produced by the former might work for the
4071        // latter, but the inverse is unlikely to be true.
4072        let self_bound = self.env.requires_python().unwrap_or_default();
4073        let other_bound = other.env.requires_python().unwrap_or_default();
4074        self_bound.lower().cmp(other_bound.lower())
4075    }
4076
4077    /// Compare forks, preferring forks with upper bounds.
4078    fn cmp_upper_bounds(&self, other: &Self) -> Ordering {
4079        // We'd prefer to solve `numpy <= 2` before solving `numpy >= 1`, since the resolution
4080        // produced by the former might work for the latter, but the inverse is unlikely to be true
4081        // due to maximum version selection. (Selecting `numpy==2.0.0` would satisfy both forks, but
4082        // selecting the latest `numpy` would not.)
4083        let self_upper_bounds = self
4084            .dependencies
4085            .iter()
4086            .filter(|dep| {
4087                dep.version
4088                    .bounding_range()
4089                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4090            })
4091            .count();
4092        let other_upper_bounds = other
4093            .dependencies
4094            .iter()
4095            .filter(|dep| {
4096                dep.version
4097                    .bounding_range()
4098                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4099            })
4100            .count();
4101
4102        self_upper_bounds.cmp(&other_upper_bounds)
4103    }
4104}
4105
4106impl Eq for Fork {}
4107
4108impl PartialEq for Fork {
4109    fn eq(&self, other: &Self) -> bool {
4110        self.dependencies == other.dependencies && self.env == other.env
4111    }
4112}
4113
4114#[derive(Debug, Clone)]
4115pub(crate) struct VersionFork {
4116    /// The environment to use in the fork.
4117    env: ResolverEnvironment,
4118    /// The initial package to select in the fork.
4119    id: Id<PubGrubPackage>,
4120    /// The initial version to set for the selected package in the fork.
4121    version: Option<Version>,
4122}
4123
4124/// Enrich a [`ResolveError`] with additional information about why a given package was included.
4125fn enrich_dependency_error(
4126    error: ResolveError,
4127    id: Id<PubGrubPackage>,
4128    version: &Version,
4129    pubgrub: &State<UvDependencyProvider>,
4130) -> ResolveError {
4131    let Some(name) = pubgrub.package_store[id].name_no_root() else {
4132        return error;
4133    };
4134    let chain = DerivationChainBuilder::from_state(id, version, pubgrub).unwrap_or_default();
4135    ResolveError::Dependencies(Box::new(error), name.clone(), version.clone(), chain)
4136}
4137
4138/// Compute the set of markers for which a package is known to be relevant.
4139fn find_environments(id: Id<PubGrubPackage>, state: &State<UvDependencyProvider>) -> MarkerTree {
4140    let package = &state.package_store[id];
4141    if package.is_root() {
4142        return MarkerTree::TRUE;
4143    }
4144
4145    // Retrieve the incompatibilities for the current package.
4146    let Some(incompatibilities) = state.incompatibilities.get(&id) else {
4147        return MarkerTree::FALSE;
4148    };
4149
4150    // Find all dependencies on the current package.
4151    let mut marker = MarkerTree::FALSE;
4152    for index in incompatibilities {
4153        let incompat = &state.incompatibility_store[*index];
4154        if let Kind::FromDependencyOf(id1, _, id2, _) = &incompat.kind {
4155            if id == *id2 {
4156                marker.or({
4157                    let mut marker = package.marker();
4158                    marker.and(find_environments(*id1, state));
4159                    marker
4160                });
4161            }
4162        }
4163    }
4164    marker
4165}
4166
4167#[derive(Debug, Default, Clone)]
4168struct ConflictTracker {
4169    /// How often a decision on the package was discarded due to another package decided earlier.
4170    affected: FxHashMap<Id<PubGrubPackage>, usize>,
4171    /// Package(s) to be prioritized after the next unit propagation
4172    ///
4173    /// Distilled from `affected` for fast checking in the hot loop.
4174    prioritize: Vec<Id<PubGrubPackage>>,
4175    /// How often a package was decided earlier and caused another package to be discarded.
4176    culprit: FxHashMap<Id<PubGrubPackage>, usize>,
4177    /// Package(s) to be de-prioritized after the next unit propagation
4178    ///
4179    /// Distilled from `culprit` for fast checking in the hot loop.
4180    deprioritize: Vec<Id<PubGrubPackage>>,
4181}