Skip to main content

uv_resolver/resolver/
mod.rs

1//! Given a set of requirements, find a set of compatible packages.
2
3use std::borrow::Cow;
4use std::cmp::Ordering;
5use std::collections::{BTreeMap, BTreeSet, VecDeque};
6use std::fmt::{Display, Formatter, Write};
7use std::ops::Bound;
8use std::sync::Arc;
9use std::time::Instant;
10use std::{iter, slice, thread};
11
12use dashmap::DashMap;
13use either::Either;
14use futures::{FutureExt, StreamExt};
15use itertools::Itertools;
16use pubgrub::{Id, IncompId, Incompatibility, Kind, Range, Ranges, State};
17use rustc_hash::{FxHashMap, FxHashSet};
18use tokio::sync::mpsc::{self, Receiver, Sender};
19use tokio::sync::oneshot;
20use tokio_stream::wrappers::ReceiverStream;
21use tracing::{Level, debug, info, instrument, trace, warn};
22
23use uv_configuration::{Constraints, Excludes, Overrides};
24use uv_distribution::{ArchiveMetadata, DistributionDatabase};
25use uv_distribution_types::{
26    BuiltDist, CompatibleDist, DerivationChain, Dist, DistErrorKind, Identifier, IncompatibleDist,
27    IncompatibleSource, IncompatibleWheel, IndexCapabilities, IndexLocations, IndexMetadata,
28    IndexUrl, InstalledDist, Name, PythonRequirementKind, RemoteSource, Requirement, ResolvedDist,
29    ResolvedDistRef, SourceDist, VersionOrUrlRef, implied_markers,
30};
31use uv_git::GitResolver;
32use uv_normalize::{ExtraName, GroupName, PackageName};
33use uv_pep440::{MIN_VERSION, Version, VersionSpecifiers, release_specifiers_to_ranges};
34use uv_pep508::{
35    MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree, MarkerValueString,
36};
37use uv_platform_tags::{IncompatibleTag, Tags};
38use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKindRef, Conflicts, VerbatimParsedUrl};
39use uv_torch::TorchStrategy;
40use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
41use uv_warnings::warn_user_once;
42
43use crate::candidate_selector::{Candidate, CandidateDist, CandidateSelector};
44use crate::dependency_provider::UvDependencyProvider;
45use crate::error::{NoSolutionError, ResolveError};
46use crate::fork_indexes::ForkIndexes;
47use crate::fork_strategy::ForkStrategy;
48use crate::fork_urls::ForkUrls;
49use crate::manifest::Manifest;
50use crate::pins::FilePins;
51use crate::preferences::{PreferenceSource, Preferences};
52use crate::pubgrub::{
53    DependencySource, PubGrubDependency, PubGrubPackage, PubGrubPackageInner, PubGrubPriorities,
54    PubGrubPython,
55};
56use crate::python_requirement::PythonRequirement;
57use crate::resolution::ResolverOutput;
58use crate::resolution_mode::ResolutionStrategy;
59pub(crate) use crate::resolver::availability::{
60    ResolverVersion, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
61    UnavailableVersion,
62};
63use crate::resolver::batch_prefetch::BatchPrefetcher;
64pub use crate::resolver::derivation::DerivationChainBuilder;
65pub use crate::resolver::environment::ResolverEnvironment;
66use crate::resolver::environment::{
67    ForkingPossibility, fork_version_by_marker, fork_version_by_python_requirement,
68};
69pub(crate) use crate::resolver::fork_map::{ForkMap, ForkSet};
70pub use crate::resolver::index::InMemoryIndex;
71use crate::resolver::indexes::Indexes;
72pub use crate::resolver::provider::{
73    DefaultResolverProvider, MetadataResponse, PackageVersionsResult, ResolverProvider,
74    VersionsResponse, WheelMetadataResult,
75};
76pub use crate::resolver::reporter::{BuildId, Reporter};
77use crate::resolver::system::SystemDependency;
78pub(crate) use crate::resolver::urls::Urls;
79use crate::universal_marker::{ConflictMarker, UniversalMarker};
80use crate::yanks::AllowedYanks;
81use crate::{DependencyMode, Exclusions, FlatIndex, Options, ResolutionMode, VersionMap, marker};
82pub(crate) use provider::MetadataUnavailable;
83
84mod availability;
85mod batch_prefetch;
86mod derivation;
87mod environment;
88mod fork_map;
89mod index;
90mod indexes;
91mod provider;
92mod reporter;
93mod system;
94mod urls;
95
96/// The number of conflicts a package may accumulate before we re-prioritize and backtrack.
97const CONFLICT_THRESHOLD: usize = 5;
98
99pub struct Resolver<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
100    state: ResolverState<InstalledPackages>,
101    provider: Provider,
102}
103
104/// State that is shared between the prefetcher and the PubGrub solver during
105/// resolution, across all forks.
106struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
107    project: Option<PackageName>,
108    requirements: Vec<Requirement>,
109    constraints: Constraints,
110    overrides: Overrides,
111    excludes: Excludes,
112    preferences: Preferences,
113    git: GitResolver,
114    capabilities: IndexCapabilities,
115    locations: IndexLocations,
116    exclusions: Exclusions,
117    urls: Urls,
118    indexes: Indexes,
119    dependency_mode: DependencyMode,
120    hasher: HashStrategy,
121    env: ResolverEnvironment,
122    // The environment of the current Python interpreter.
123    current_environment: MarkerEnvironment,
124    tags: Option<Tags>,
125    python_requirement: PythonRequirement,
126    conflicts: Conflicts,
127    workspace_members: BTreeSet<PackageName>,
128    selector: CandidateSelector,
129    index: InMemoryIndex,
130    installed_packages: InstalledPackages,
131    /// Incompatibilities for packages that are entirely unavailable.
132    unavailable_packages: DashMap<PackageName, UnavailablePackage>,
133    /// Incompatibilities for packages that are unavailable at specific versions.
134    incomplete_packages: DashMap<PackageName, DashMap<Version, MetadataUnavailable>>,
135    /// The options that were used to configure this resolver.
136    options: Options,
137    /// The reporter to use for this resolver.
138    reporter: Option<Arc<dyn Reporter>>,
139}
140
141impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
142    Resolver<DefaultResolverProvider<'a, Context>, InstalledPackages>
143{
144    /// Initialize a new resolver using the default backend doing real requests.
145    ///
146    /// Reads the flat index entries.
147    ///
148    /// # Marker environment
149    ///
150    /// The marker environment is optional.
151    ///
152    /// When a marker environment is not provided, the resolver is said to be
153    /// in "universal" mode. When in universal mode, the resolution produced
154    /// may contain multiple versions of the same package. And thus, in order
155    /// to use the resulting resolution, there must be a "universal"-aware
156    /// reader of the resolution that knows to exclude distributions that can't
157    /// be used in the current environment.
158    ///
159    /// When a marker environment is provided, the resolver is in
160    /// "non-universal" mode, which corresponds to standard `pip` behavior that
161    /// works only for a specific marker environment.
162    pub fn new(
163        manifest: Manifest,
164        options: Options,
165        python_requirement: &'a PythonRequirement,
166        env: ResolverEnvironment,
167        current_environment: &MarkerEnvironment,
168        conflicts: Conflicts,
169        tags: Option<&'a Tags>,
170        flat_index: &'a FlatIndex,
171        index: &'a InMemoryIndex,
172        hasher: &'a HashStrategy,
173        build_context: &'a Context,
174        installed_packages: InstalledPackages,
175        database: DistributionDatabase<'a, Context>,
176    ) -> Result<Self, ResolveError> {
177        let provider = DefaultResolverProvider::new(
178            database,
179            flat_index,
180            tags,
181            python_requirement.target(),
182            AllowedYanks::from_manifest(&manifest, &env, options.dependency_mode),
183            hasher,
184            options.exclude_newer.clone(),
185            build_context.locations(),
186            build_context.build_options(),
187            build_context.capabilities(),
188        );
189
190        Self::new_custom_io(
191            manifest,
192            options,
193            hasher,
194            env,
195            current_environment,
196            tags.cloned(),
197            python_requirement,
198            conflicts,
199            index,
200            build_context.git(),
201            build_context.capabilities(),
202            build_context.locations(),
203            provider,
204            installed_packages,
205        )
206    }
207}
208
209impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
210    Resolver<Provider, InstalledPackages>
211{
212    /// Initialize a new resolver using a user provided backend.
213    pub fn new_custom_io(
214        manifest: Manifest,
215        options: Options,
216        hasher: &HashStrategy,
217        env: ResolverEnvironment,
218        current_environment: &MarkerEnvironment,
219        tags: Option<Tags>,
220        python_requirement: &PythonRequirement,
221        conflicts: Conflicts,
222        index: &InMemoryIndex,
223        git: &GitResolver,
224        capabilities: &IndexCapabilities,
225        locations: &IndexLocations,
226        provider: Provider,
227        installed_packages: InstalledPackages,
228    ) -> Result<Self, ResolveError> {
229        let state = ResolverState {
230            index: index.clone(),
231            git: git.clone(),
232            capabilities: capabilities.clone(),
233            selector: CandidateSelector::for_resolution(&options, &manifest, &env),
234            dependency_mode: options.dependency_mode,
235            urls: Urls::from_manifest(&manifest, &env, git, options.dependency_mode),
236            indexes: Indexes::from_manifest(&manifest, &env, options.dependency_mode),
237            project: manifest.project,
238            workspace_members: manifest.workspace_members,
239            requirements: manifest.requirements,
240            constraints: manifest.constraints,
241            overrides: manifest.overrides,
242            excludes: manifest.excludes,
243            preferences: manifest.preferences,
244            exclusions: manifest.exclusions,
245            hasher: hasher.clone(),
246            locations: locations.clone(),
247            env,
248            current_environment: current_environment.clone(),
249            tags,
250            python_requirement: python_requirement.clone(),
251            conflicts,
252            installed_packages,
253            unavailable_packages: DashMap::default(),
254            incomplete_packages: DashMap::default(),
255            options,
256            reporter: None,
257        };
258        Ok(Self { state, provider })
259    }
260
261    /// Set the [`Reporter`] to use for this installer.
262    #[must_use]
263    pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
264        Self {
265            state: ResolverState {
266                reporter: Some(reporter.clone()),
267                ..self.state
268            },
269            provider: self
270                .provider
271                .with_reporter(reporter.into_distribution_reporter()),
272        }
273    }
274
275    /// Resolve a set of requirements into a set of pinned versions.
276    pub async fn resolve(self) -> Result<ResolverOutput, ResolveError> {
277        let state = Arc::new(self.state);
278        let provider = Arc::new(self.provider);
279
280        // A channel to fetch package metadata (e.g., given `flask`, fetch all versions) and version
281        // metadata (e.g., given `flask==1.0.0`, fetch the metadata for that version).
282        // Channel size is set large to accommodate batch prefetching.
283        let (request_sink, request_stream) = mpsc::channel(300);
284
285        // Run the fetcher.
286        let requests_fut = state.clone().fetch(provider.clone(), request_stream).fuse();
287
288        // Spawn the PubGrub solver on a dedicated thread.
289        let solver = state.clone();
290        let (tx, rx) = oneshot::channel();
291        thread::Builder::new()
292            .name("uv-resolver".into())
293            .spawn(move || {
294                let result = solver.solve(&request_sink);
295
296                // This may fail if the main thread returned early due to an error.
297                let _ = tx.send(result);
298            })
299            .unwrap();
300
301        let resolve_fut = async move { rx.await.map_err(|_| ResolveError::ChannelClosed) };
302
303        // Wait for both to complete.
304        let ((), resolution) = tokio::try_join!(requests_fut, resolve_fut)?;
305
306        state.on_complete();
307        resolution
308    }
309}
310
311impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackages> {
312    #[instrument(skip_all)]
313    fn solve(
314        self: Arc<Self>,
315        request_sink: &Sender<Request>,
316    ) -> Result<ResolverOutput, ResolveError> {
317        debug!(
318            "Solving with installed Python version: {}",
319            self.python_requirement.exact()
320        );
321        debug!(
322            "Solving with target Python version: {}",
323            self.python_requirement.target()
324        );
325        if !self.options.exclude_newer.is_empty() {
326            debug!("Solving with exclude-newer: {}", self.options.exclude_newer);
327        }
328
329        let mut visited = FxHashSet::default();
330
331        let root = PubGrubPackage::from(PubGrubPackageInner::Root(self.project.clone()));
332        let pubgrub = State::init(root.clone(), MIN_VERSION.clone());
333        let prefetcher = BatchPrefetcher::new(
334            self.capabilities.clone(),
335            self.index.clone(),
336            request_sink.clone(),
337        );
338        let state = ForkState::new(
339            pubgrub,
340            self.env.clone(),
341            self.python_requirement.clone(),
342            prefetcher,
343        );
344        let mut preferences = self.preferences.clone();
345        let mut forked_states = self.env.initial_forked_states(state)?;
346        let mut resolutions = vec![];
347
348        'FORK: while let Some(mut state) = forked_states.pop() {
349            if let Some(split) = state.env.end_user_fork_display() {
350                let requires_python = state.python_requirement.target();
351                debug!("Solving {split} (requires-python: {requires_python:?})");
352            }
353            let start = Instant::now();
354            loop {
355                let highest_priority_pkg =
356                    if let Some(initial) = state.initial_id.take() {
357                        // If we just forked based on `requires-python`, we can skip unit
358                        // propagation, since we already propagated the package that initiated
359                        // the fork.
360                        initial
361                    } else {
362                        // Run unit propagation.
363                        let result = state.pubgrub.unit_propagation(state.next);
364                        match result {
365                            Err(err) => {
366                                // If unit propagation failed, there is no solution.
367                                return Err(self.convert_no_solution_err(
368                                    err,
369                                    state.fork_urls,
370                                    state.fork_indexes,
371                                    state.env,
372                                    self.current_environment.clone(),
373                                    &visited,
374                                ));
375                            }
376                            Ok(conflicts) => {
377                                for (affected, incompatibility) in conflicts {
378                                    // Conflict tracking: If there was a conflict, track affected and
379                                    // culprit for all root cause incompatibilities
380                                    state.record_conflict(affected, None, incompatibility);
381                                }
382                            }
383                        }
384
385                        // Pre-visit all candidate packages, to allow metadata to be fetched in parallel.
386                        if self.dependency_mode.is_transitive() {
387                            Self::pre_visit(
388                                state
389                                    .pubgrub
390                                    .partial_solution
391                                    .prioritized_packages()
392                                    .map(|(id, range)| (&state.pubgrub.package_store[id], range)),
393                                &self.urls,
394                                &self.indexes,
395                                &state.python_requirement,
396                                request_sink,
397                            )?;
398                        }
399
400                        Self::reprioritize_conflicts(&mut state);
401
402                        trace!(
403                            "Assigned packages: {}",
404                            state
405                                .pubgrub
406                                .partial_solution
407                                .extract_solution()
408                                .filter(|(p, _)| !state.pubgrub.package_store[*p].is_proxy())
409                                .map(|(p, v)| format!("{}=={}", state.pubgrub.package_store[p], v))
410                                .join(", ")
411                        );
412                        // Choose a package.
413                        // We aren't allowed to use the term intersection as it would extend the
414                        // mutable borrow of `state`.
415                        let Some((highest_priority_pkg, _)) =
416                            state.pubgrub.partial_solution.pick_highest_priority_pkg(
417                                |id, _range| state.priorities.get(&state.pubgrub.package_store[id]),
418                            )
419                        else {
420                            // All packages have been assigned, the fork has been successfully resolved
421                            if tracing::enabled!(Level::DEBUG) {
422                                state.prefetcher.log_tried_versions();
423                            }
424                            debug!(
425                                "{} resolution took {:.3}s",
426                                state.env,
427                                start.elapsed().as_secs_f32()
428                            );
429
430                            let resolution = state.into_resolution();
431
432                            // Walk over the selected versions, and mark them as preferences. We have to
433                            // add forks back as to not override the preferences from the lockfile for
434                            // the next fork
435                            //
436                            // If we're using a resolution mode that varies based on whether a dependency is
437                            // direct or transitive, skip preferences, as we risk adding a preference from
438                            // one fork (in which it's a transitive dependency) to another fork (in which
439                            // it's direct).
440                            if matches!(
441                                self.options.resolution_mode,
442                                ResolutionMode::Lowest | ResolutionMode::Highest
443                            ) {
444                                for (package, version) in &resolution.nodes {
445                                    preferences.insert(
446                                        package.name.clone(),
447                                        package.index.clone(),
448                                        resolution
449                                            .env
450                                            .try_universal_markers()
451                                            .unwrap_or(UniversalMarker::TRUE),
452                                        version.clone(),
453                                        PreferenceSource::Resolver,
454                                    );
455                                }
456                            }
457
458                            resolutions.push(resolution);
459                            continue 'FORK;
460                        };
461                        trace!(
462                            "Chose package for decision: {}. remaining choices: {}",
463                            state.pubgrub.package_store[highest_priority_pkg],
464                            state
465                                .pubgrub
466                                .partial_solution
467                                .undecided_packages()
468                                .filter(|(p, _)| !state.pubgrub.package_store[**p].is_proxy())
469                                .map(|(p, _)| state.pubgrub.package_store[*p].to_string())
470                                .join(", ")
471                        );
472
473                        highest_priority_pkg
474                    };
475
476                state.next = highest_priority_pkg;
477
478                // TODO(charlie): Remove as many usages of `next_package` as we can.
479                let next_id = state.next;
480                let next_package = &state.pubgrub.package_store[state.next];
481
482                let url = next_package
483                    .name()
484                    .and_then(|name| state.fork_urls.get(name));
485                let index = next_package
486                    .name()
487                    .and_then(|name| state.fork_indexes.get(name));
488
489                // Consider:
490                // ```toml
491                // dependencies = [
492                //   "iniconfig == 1.1.1 ; python_version < '3.12'",
493                //   "iniconfig @ https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl ; python_version >= '3.12'",
494                // ]
495                // ```
496                // In the `python_version < '3.12'` case, we haven't pre-visited `iniconfig` yet,
497                // since we weren't sure whether it might also be a URL requirement when
498                // transforming the requirements. For that case, we do another request here
499                // (idempotent due to caching).
500                self.request_package(next_package, url, index, request_sink)?;
501
502                let version = if let Some(version) = state.initial_version.take() {
503                    // If we just forked based on platform support, we can skip version selection,
504                    // since the fork operation itself already selected the appropriate version for
505                    // the platform.
506                    version
507                } else {
508                    let term_intersection = state
509                        .pubgrub
510                        .partial_solution
511                        .term_intersection_for_package(next_id)
512                        .expect("a package was chosen but we don't have a term");
513                    let decision = self.choose_version(
514                        next_package,
515                        next_id,
516                        index.map(IndexMetadata::url),
517                        term_intersection.unwrap_positive(),
518                        &mut state.pins,
519                        &preferences,
520                        &state.fork_urls,
521                        &state.env,
522                        &state.python_requirement,
523                        &state.pubgrub,
524                        &mut visited,
525                        request_sink,
526                    )?;
527
528                    // Pick the next compatible version.
529                    let Some(version) = decision else {
530                        debug!("No compatible version found for: {next_package}");
531
532                        let term_intersection = state
533                            .pubgrub
534                            .partial_solution
535                            .term_intersection_for_package(next_id)
536                            .expect("a package was chosen but we don't have a term");
537
538                        if let PubGrubPackageInner::Package { name, .. } = &**next_package {
539                            // Check if the decision was due to the package being unavailable
540                            if let Some(entry) = self.unavailable_packages.get(name) {
541                                state
542                                    .pubgrub
543                                    .add_incompatibility(Incompatibility::custom_term(
544                                        next_id,
545                                        term_intersection.clone(),
546                                        UnavailableReason::Package(entry.clone()),
547                                    ));
548                                continue;
549                            }
550                        }
551
552                        state
553                            .pubgrub
554                            .add_incompatibility(Incompatibility::no_versions(
555                                next_id,
556                                term_intersection.clone(),
557                            ));
558                        continue;
559                    };
560
561                    let version = match version {
562                        ResolverVersion::Unforked(version) => version,
563                        ResolverVersion::Forked(forks) => {
564                            forked_states.extend(self.version_forks_to_fork_states(state, forks));
565                            continue 'FORK;
566                        }
567                        ResolverVersion::Unavailable(version, reason) => {
568                            state.add_unavailable_version(version, reason);
569                            continue;
570                        }
571                    };
572
573                    // Only consider registry packages for prefetch.
574                    if url.is_none() {
575                        state.prefetcher.prefetch_batches(
576                            next_package,
577                            index,
578                            &version,
579                            term_intersection.unwrap_positive(),
580                            state
581                                .pubgrub
582                                .partial_solution
583                                .unchanging_term_for_package(next_id),
584                            &state.python_requirement,
585                            &self.selector,
586                            &state.env,
587                        )?;
588                    }
589
590                    version
591                };
592
593                state.prefetcher.version_tried(next_package, &version);
594
595                self.on_progress(next_package, &version);
596
597                if !state
598                    .added_dependencies
599                    .entry(next_id)
600                    .or_default()
601                    .insert(version.clone())
602                {
603                    // `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
604                    // terms and can add the decision directly.
605                    state
606                        .pubgrub
607                        .partial_solution
608                        .add_decision(next_id, version);
609                    continue;
610                }
611
612                // Retrieve that package dependencies.
613                let forked_deps = self.get_dependencies_forking(
614                    next_id,
615                    next_package,
616                    &version,
617                    &state.pins,
618                    &state.fork_urls,
619                    &state.env,
620                    &state.python_requirement,
621                    &state.pubgrub,
622                )?;
623
624                match forked_deps {
625                    ForkedDependencies::Unavailable(reason) => {
626                        // Then here, if we get a reason that we consider unrecoverable, we should
627                        // show the derivation chain.
628                        state
629                            .pubgrub
630                            .add_incompatibility(Incompatibility::custom_version(
631                                next_id,
632                                version.clone(),
633                                UnavailableReason::Version(reason),
634                            ));
635                    }
636                    ForkedDependencies::Unforked(dependencies) => {
637                        // Enrich the state with any URLs, etc.
638                        state
639                            .visit_package_version_dependencies(
640                                next_id,
641                                &version,
642                                &self.urls,
643                                &self.indexes,
644                                &dependencies,
645                                &self.git,
646                                &self.workspace_members,
647                                self.selector.resolution_strategy(),
648                            )
649                            .map_err(|err| {
650                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
651                            })?;
652
653                        // Emit a request to fetch the metadata for each registry package.
654                        self.visit_dependencies(&dependencies, &state, request_sink)
655                            .map_err(|err| {
656                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
657                            })?;
658
659                        // Add the dependencies to the state.
660                        state.add_package_version_dependencies(next_id, &version, dependencies);
661                    }
662                    ForkedDependencies::Forked {
663                        mut forks,
664                        diverging_packages,
665                    } => {
666                        debug!(
667                            "Pre-fork {} took {:.3}s",
668                            state.env,
669                            start.elapsed().as_secs_f32()
670                        );
671
672                        // Prioritize the forks.
673                        match (self.options.fork_strategy, self.options.resolution_mode) {
674                            (ForkStrategy::Fewest, _) | (_, ResolutionMode::Lowest) => {
675                                // Prefer solving forks with lower Python bounds, since they're more
676                                // likely to produce solutions that work for forks with higher
677                                // Python bounds (whereas the inverse is not true).
678                                forks.sort_by(|a, b| {
679                                    a.cmp_requires_python(b)
680                                        .reverse()
681                                        .then_with(|| a.cmp_upper_bounds(b))
682                                });
683                            }
684                            (ForkStrategy::RequiresPython, _) => {
685                                // Otherwise, prefer solving forks with higher Python bounds, since
686                                // we want to prioritize choosing the latest-compatible package
687                                // version for each Python version.
688                                forks.sort_by(|a, b| {
689                                    a.cmp_requires_python(b).then_with(|| a.cmp_upper_bounds(b))
690                                });
691                            }
692                        }
693
694                        for new_fork_state in self.forks_to_fork_states(
695                            state,
696                            &version,
697                            forks,
698                            request_sink,
699                            &diverging_packages,
700                        ) {
701                            forked_states.push(new_fork_state?);
702                        }
703                        continue 'FORK;
704                    }
705                }
706            }
707        }
708        if resolutions.len() > 1 {
709            info!(
710                "Solved your requirements for {} environments",
711                resolutions.len()
712            );
713        }
714        if tracing::enabled!(Level::DEBUG) {
715            for resolution in &resolutions {
716                if let Some(env) = resolution.env.end_user_fork_display() {
717                    let packages: FxHashSet<_> = resolution
718                        .nodes
719                        .keys()
720                        .map(|package| &package.name)
721                        .collect();
722                    debug!(
723                        "Distinct solution for {env} with {} package(s)",
724                        packages.len()
725                    );
726                }
727            }
728        }
729        for resolution in &resolutions {
730            Self::trace_resolution(resolution);
731        }
732        ResolverOutput::from_state(
733            &resolutions,
734            &self.requirements,
735            &self.constraints,
736            &self.overrides,
737            &self.preferences,
738            &self.index,
739            &self.git,
740            &self.python_requirement,
741            &self.conflicts,
742            self.selector.resolution_strategy(),
743            self.options.clone(),
744        )
745    }
746
747    /// Change the priority of often conflicting packages and backtrack.
748    ///
749    /// To be called after unit propagation.
750    fn reprioritize_conflicts(state: &mut ForkState) {
751        for package in state.conflict_tracker.prioritize.drain(..) {
752            let changed = state
753                .priorities
754                .mark_conflict_early(&state.pubgrub.package_store[package]);
755            if changed {
756                debug!(
757                    "Package {} has too many conflicts (affected), prioritizing",
758                    &state.pubgrub.package_store[package]
759                );
760            } else {
761                debug!(
762                    "Package {} has too many conflicts (affected), already {:?}",
763                    state.pubgrub.package_store[package],
764                    state.priorities.get(&state.pubgrub.package_store[package])
765                );
766            }
767        }
768
769        for package in state.conflict_tracker.deprioritize.drain(..) {
770            let changed = state
771                .priorities
772                .mark_conflict_late(&state.pubgrub.package_store[package]);
773            if changed {
774                debug!(
775                    "Package {} has too many conflicts (culprit), deprioritizing and backtracking",
776                    state.pubgrub.package_store[package],
777                );
778                let backtrack_level = state.pubgrub.backtrack_package(package);
779                if let Some(backtrack_level) = backtrack_level {
780                    debug!("Backtracked {backtrack_level} decisions");
781                } else {
782                    debug!(
783                        "Package {} is not decided, cannot backtrack",
784                        state.pubgrub.package_store[package]
785                    );
786                }
787            } else {
788                debug!(
789                    "Package {} has too many conflicts (culprit), already {:?}",
790                    state.pubgrub.package_store[package],
791                    state.priorities.get(&state.pubgrub.package_store[package])
792                );
793            }
794        }
795    }
796
797    /// When trace level logging is enabled, we dump the final
798    /// set of resolutions, including markers, to help with
799    /// debugging. Namely, this tells use precisely the state
800    /// emitted by the resolver before going off to construct a
801    /// resolution graph.
802    fn trace_resolution(combined: &Resolution) {
803        if !tracing::enabled!(Level::TRACE) {
804            return;
805        }
806        trace!("Resolution: {:?}", combined.env);
807        for edge in &combined.edges {
808            trace!(
809                "Resolution edge: {} -> {}",
810                edge.from
811                    .as_ref()
812                    .map(PackageName::as_str)
813                    .unwrap_or("ROOT"),
814                edge.to,
815            );
816            // The unwraps below are OK because `write`ing to
817            // a String can never fail (except for OOM).
818            let mut msg = String::new();
819            write!(msg, "{}", edge.from_version).unwrap();
820            if let Some(ref extra) = edge.from_extra {
821                write!(msg, " (extra: {extra})").unwrap();
822            }
823            if let Some(ref dev) = edge.from_group {
824                write!(msg, " (group: {dev})").unwrap();
825            }
826
827            write!(msg, " -> ").unwrap();
828
829            write!(msg, "{}", edge.to_version).unwrap();
830            if let Some(ref extra) = edge.to_extra {
831                write!(msg, " (extra: {extra})").unwrap();
832            }
833            if let Some(ref dev) = edge.to_group {
834                write!(msg, " (group: {dev})").unwrap();
835            }
836            if let Some(marker) = edge.marker.contents() {
837                write!(msg, " ; {marker}").unwrap();
838            }
839            trace!("Resolution edge:     {msg}");
840        }
841    }
842
843    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
844    fn forks_to_fork_states<'a>(
845        &'a self,
846        current_state: ForkState,
847        version: &'a Version,
848        forks: Vec<Fork>,
849        request_sink: &'a Sender<Request>,
850        diverging_packages: &'a [PackageName],
851    ) -> impl Iterator<Item = Result<ForkState, ResolveError>> + 'a {
852        debug!(
853            "Splitting resolution on {}=={} over {} into {} resolution{} with separate markers",
854            current_state.pubgrub.package_store[current_state.next],
855            version,
856            diverging_packages
857                .iter()
858                .map(ToString::to_string)
859                .join(", "),
860            forks.len(),
861            if forks.len() == 1 { "" } else { "s" }
862        );
863        assert!(forks.len() >= 2);
864        // This is a somewhat tortured technique to ensure
865        // that our resolver state is only cloned as much
866        // as it needs to be. We basically move the state
867        // into `forked_states`, and then only clone it if
868        // there is at least one more fork to visit.
869        let package = current_state.next;
870        let mut cur_state = Some(current_state);
871        let forks_len = forks.len();
872        forks
873            .into_iter()
874            .enumerate()
875            .map(move |(i, fork)| {
876                let is_last = i == forks_len - 1;
877                let forked_state = cur_state.take().unwrap();
878                if !is_last {
879                    cur_state = Some(forked_state.clone());
880                }
881
882                let env = fork.env.clone();
883                (fork, forked_state.with_env(env))
884            })
885            .map(move |(fork, mut forked_state)| {
886                // Enrich the state with any URLs, etc.
887                forked_state
888                    .visit_package_version_dependencies(
889                        package,
890                        version,
891                        &self.urls,
892                        &self.indexes,
893                        &fork.dependencies,
894                        &self.git,
895                        &self.workspace_members,
896                        self.selector.resolution_strategy(),
897                    )
898                    .map_err(|err| {
899                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
900                    })?;
901
902                // Emit a request to fetch the metadata for each registry package.
903                self.visit_dependencies(&fork.dependencies, &forked_state, request_sink)
904                    .map_err(|err| {
905                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
906                    })?;
907
908                // Add the dependencies to the state.
909                forked_state.add_package_version_dependencies(package, version, fork.dependencies);
910
911                Ok(forked_state)
912            })
913    }
914
915    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
916    #[expect(clippy::unused_self)]
917    fn version_forks_to_fork_states(
918        &self,
919        current_state: ForkState,
920        forks: Vec<VersionFork>,
921    ) -> impl Iterator<Item = ForkState> + '_ {
922        // This is a somewhat tortured technique to ensure
923        // that our resolver state is only cloned as much
924        // as it needs to be. We basically move the state
925        // into `forked_states`, and then only clone it if
926        // there is at least one more fork to visit.
927        let mut cur_state = Some(current_state);
928        let forks_len = forks.len();
929        forks.into_iter().enumerate().map(move |(i, fork)| {
930            let is_last = i == forks_len - 1;
931            let mut forked_state = cur_state.take().unwrap();
932            if !is_last {
933                cur_state = Some(forked_state.clone());
934            }
935            forked_state.initial_id = Some(fork.id);
936            forked_state.initial_version = fork.version;
937            forked_state.with_env(fork.env)
938        })
939    }
940
941    /// Visit a set of [`PubGrubDependency`] entities prior to selection.
942    fn visit_dependencies(
943        &self,
944        dependencies: &[PubGrubDependency],
945        state: &ForkState,
946        request_sink: &Sender<Request>,
947    ) -> Result<(), ResolveError> {
948        for dependency in dependencies {
949            let PubGrubDependency {
950                package,
951                version: _,
952                parent: _,
953                source: _,
954            } = dependency;
955            let url = package.name().and_then(|name| state.fork_urls.get(name));
956            let index = package.name().and_then(|name| state.fork_indexes.get(name));
957            self.visit_package(package, url, index, request_sink)?;
958        }
959        Ok(())
960    }
961
962    /// Visit a [`PubGrubPackage`] prior to selection. This should be called on a [`PubGrubPackage`]
963    /// before it is selected, to allow metadata to be fetched in parallel.
964    fn visit_package(
965        &self,
966        package: &PubGrubPackage,
967        url: Option<&VerbatimParsedUrl>,
968        index: Option<&IndexMetadata>,
969        request_sink: &Sender<Request>,
970    ) -> Result<(), ResolveError> {
971        // Ignore unresolved URL packages, i.e., packages that use a direct URL in some forks.
972        if url.is_none()
973            && package
974                .name()
975                .map(|name| self.urls.any_url(name))
976                .unwrap_or(true)
977        {
978            return Ok(());
979        }
980
981        self.request_package(package, url, index, request_sink)
982    }
983
984    fn request_package(
985        &self,
986        package: &PubGrubPackage,
987        url: Option<&VerbatimParsedUrl>,
988        index: Option<&IndexMetadata>,
989        request_sink: &Sender<Request>,
990    ) -> Result<(), ResolveError> {
991        // Only request real packages.
992        let Some(name) = package.name_no_root() else {
993            return Ok(());
994        };
995
996        if let Some(url) = url {
997            // Verify that the package is allowed under the hash-checking policy.
998            if !self.hasher.allows_url(&url.verbatim) {
999                return Err(ResolveError::UnhashedPackage(name.clone()));
1000            }
1001
1002            // Emit a request to fetch the metadata for this distribution.
1003            let dist = Dist::from_url(name.clone(), url.clone())?;
1004            if self.index.distributions().register(dist.distribution_id()) {
1005                request_sink.blocking_send(Request::Dist(dist))?;
1006            }
1007        } else if let Some(index) = index {
1008            // Emit a request to fetch the metadata for this package on the index.
1009            if self
1010                .index
1011                .explicit()
1012                .register((name.clone(), index.url().clone()))
1013            {
1014                request_sink.blocking_send(Request::Package(name.clone(), Some(index.clone())))?;
1015            }
1016        } else {
1017            // Emit a request to fetch the metadata for this package.
1018            if self.index.implicit().register(name.clone()) {
1019                request_sink.blocking_send(Request::Package(name.clone(), None))?;
1020            }
1021        }
1022        Ok(())
1023    }
1024
1025    /// Visit the set of [`PubGrubPackage`] candidates prior to selection. This allows us to fetch
1026    /// metadata for all packages in parallel.
1027    fn pre_visit<'data>(
1028        packages: impl Iterator<Item = (&'data PubGrubPackage, &'data Range<Version>)>,
1029        urls: &Urls,
1030        indexes: &Indexes,
1031        python_requirement: &PythonRequirement,
1032        request_sink: &Sender<Request>,
1033    ) -> Result<(), ResolveError> {
1034        // Iterate over the potential packages, and fetch file metadata for any of them. These
1035        // represent our current best guesses for the versions that we _might_ select.
1036        for (package, range) in packages {
1037            let PubGrubPackageInner::Package {
1038                name,
1039                extra: None,
1040                group: None,
1041                marker: MarkerTree::TRUE,
1042            } = &**package
1043            else {
1044                continue;
1045            };
1046            // Avoid pre-visiting packages that have any URLs in any fork. At this point we can't
1047            // tell whether they are registry distributions or which url they use.
1048            if urls.any_url(name) {
1049                continue;
1050            }
1051            // Avoid visiting packages that may use an explicit index.
1052            if indexes.contains_key(name) {
1053                continue;
1054            }
1055            request_sink.blocking_send(Request::Prefetch(
1056                name.clone(),
1057                range.clone(),
1058                python_requirement.clone(),
1059            ))?;
1060        }
1061        Ok(())
1062    }
1063
1064    /// Given a candidate package, choose the next version in range to try.
1065    ///
1066    /// Returns `None` when there are no versions in the given range, rejecting the current partial
1067    /// solution.
1068    // TODO(konsti): re-enable tracing. This trace is crucial to understanding the
1069    // tracing-durations-export diagrams, but it took ~5% resolver thread runtime for apache-airflow
1070    // when I last measured.
1071    #[cfg_attr(feature = "tracing-durations-export", instrument(skip_all, fields(%package)))]
1072    fn choose_version(
1073        &self,
1074        package: &PubGrubPackage,
1075        id: Id<PubGrubPackage>,
1076        index: Option<&IndexUrl>,
1077        range: &Range<Version>,
1078        pins: &mut FilePins,
1079        preferences: &Preferences,
1080        fork_urls: &ForkUrls,
1081        env: &ResolverEnvironment,
1082        python_requirement: &PythonRequirement,
1083        pubgrub: &State<UvDependencyProvider>,
1084        visited: &mut FxHashSet<PackageName>,
1085        request_sink: &Sender<Request>,
1086    ) -> Result<Option<ResolverVersion>, ResolveError> {
1087        match &**package {
1088            PubGrubPackageInner::Root(_) => {
1089                Ok(Some(ResolverVersion::Unforked(MIN_VERSION.clone())))
1090            }
1091
1092            PubGrubPackageInner::Python(_) => {
1093                // Dependencies on Python are only added when a package is incompatible; as such,
1094                // we don't need to do anything here.
1095                Ok(None)
1096            }
1097
1098            PubGrubPackageInner::System(_) => {
1099                // We don't care what the actual version is here, just that it's consistent across
1100                // the dependency graph.
1101                let Some(version) = range.as_singleton() else {
1102                    return Ok(None);
1103                };
1104                Ok(Some(ResolverVersion::Unforked(version.clone())))
1105            }
1106
1107            PubGrubPackageInner::Marker { name, .. }
1108            | PubGrubPackageInner::Extra { name, .. }
1109            | PubGrubPackageInner::Group { name, .. }
1110            | PubGrubPackageInner::Package { name, .. } => {
1111                if let Some(url) = package.name().and_then(|name| fork_urls.get(name)) {
1112                    self.choose_version_url(id, name, range, url, env, python_requirement, pubgrub)
1113                } else {
1114                    self.choose_version_registry(
1115                        package,
1116                        id,
1117                        name,
1118                        index,
1119                        range,
1120                        preferences,
1121                        env,
1122                        python_requirement,
1123                        pubgrub,
1124                        pins,
1125                        visited,
1126                        request_sink,
1127                    )
1128                }
1129            }
1130        }
1131    }
1132
1133    /// Select a version for a URL requirement. Since there is only one version per URL, we return
1134    /// that version if it is in range and `None` otherwise.
1135    fn choose_version_url(
1136        &self,
1137        id: Id<PubGrubPackage>,
1138        name: &PackageName,
1139        range: &Range<Version>,
1140        url: &VerbatimParsedUrl,
1141        env: &ResolverEnvironment,
1142        python_requirement: &PythonRequirement,
1143        pubgrub: &State<UvDependencyProvider>,
1144    ) -> Result<Option<ResolverVersion>, ResolveError> {
1145        debug!(
1146            "Searching for a compatible version of {name} @ {} ({range})",
1147            url.verbatim
1148        );
1149
1150        let dist = Dist::from_url(name.clone(), url.clone())?;
1151        let distribution_id = dist.distribution_id();
1152        let response = self
1153            .index
1154            .distributions()
1155            .wait_blocking(&distribution_id)
1156            .ok_or_else(|| ResolveError::UnregisteredTask(dist.to_string()))?;
1157
1158        // If we failed to fetch the metadata for a URL, we can't proceed.
1159        let metadata = match &*response {
1160            MetadataResponse::Found(archive) => &archive.metadata,
1161            MetadataResponse::Unavailable(reason) => {
1162                self.unavailable_packages
1163                    .insert(name.clone(), reason.into());
1164                return Ok(None);
1165            }
1166            // TODO(charlie): Add derivation chain for URL dependencies. In practice, this isn't
1167            // critical since we fetch URL dependencies _prior_ to invoking the resolver.
1168            MetadataResponse::Error(dist, err) => {
1169                return Err(ResolveError::Dist(
1170                    DistErrorKind::from_requested_dist(dist, &**err),
1171                    dist.clone(),
1172                    DerivationChain::default(),
1173                    err.clone(),
1174                ));
1175            }
1176        };
1177
1178        let version = &metadata.version;
1179
1180        // The version is incompatible with the requirement.
1181        if !range.contains(version) {
1182            return Ok(None);
1183        }
1184
1185        // If the URL points to a pre-built wheel, and the wheel's supported Python versions don't
1186        // match our `Requires-Python`, mark it as incompatible.
1187        if let Dist::Built(dist) = &dist {
1188            let filename = match &dist {
1189                BuiltDist::Registry(dist) => &dist.best_wheel().filename,
1190                BuiltDist::DirectUrl(dist) => &dist.filename,
1191                BuiltDist::Path(dist) => &dist.filename,
1192            };
1193
1194            // If the wheel does _not_ cover an environment that requires artifact coverage, it's
1195            // incompatible.
1196            if env.marker_environment().is_none() && !self.options.artifact_environments.is_empty()
1197            {
1198                let wheel_marker = implied_markers(filename);
1199                // If the caller marked an environment as requiring artifact coverage, ensure it
1200                // has coverage.
1201                for environment_marker in self.options.artifact_environments.iter().copied() {
1202                    // If the platform is part of the current environment...
1203                    if env.included_by_marker(environment_marker)
1204                        && !find_environments(id, pubgrub).is_disjoint(environment_marker)
1205                    {
1206                        // ...but the wheel doesn't support it, it's incompatible.
1207                        if wheel_marker.is_disjoint(environment_marker) {
1208                            return Ok(Some(ResolverVersion::Unavailable(
1209                                version.clone(),
1210                                UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1211                                    IncompatibleWheel::MissingPlatform(environment_marker),
1212                                )),
1213                            )));
1214                        }
1215                    }
1216                }
1217            }
1218
1219            // If the wheel's Python tag doesn't match the target Python, it's incompatible.
1220            if !python_requirement.target().matches_wheel_tag(filename) {
1221                return Ok(Some(ResolverVersion::Unavailable(
1222                    filename.version.clone(),
1223                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1224                        IncompatibleWheel::Tag(IncompatibleTag::AbiPythonVersion),
1225                    )),
1226                )));
1227            }
1228        }
1229
1230        // The version is incompatible due to its `Requires-Python` requirement.
1231        if let Some(requires_python) = metadata.requires_python.as_ref() {
1232            if !python_requirement.target().is_contained_by(requires_python) {
1233                let kind = if python_requirement.installed() == python_requirement.target() {
1234                    PythonRequirementKind::Installed
1235                } else {
1236                    PythonRequirementKind::Target
1237                };
1238                return Ok(Some(ResolverVersion::Unavailable(
1239                    version.clone(),
1240                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1241                        IncompatibleSource::RequiresPython(requires_python.clone(), kind),
1242                    )),
1243                )));
1244            }
1245        }
1246
1247        Ok(Some(ResolverVersion::Unforked(version.clone())))
1248    }
1249
1250    /// Given a candidate registry requirement, choose the next version in range to try, or `None`
1251    /// if there is no version in this range.
1252    fn choose_version_registry(
1253        &self,
1254        package: &PubGrubPackage,
1255        id: Id<PubGrubPackage>,
1256        name: &PackageName,
1257        index: Option<&IndexUrl>,
1258        range: &Range<Version>,
1259        preferences: &Preferences,
1260        env: &ResolverEnvironment,
1261        python_requirement: &PythonRequirement,
1262        pubgrub: &State<UvDependencyProvider>,
1263        pins: &mut FilePins,
1264        visited: &mut FxHashSet<PackageName>,
1265        request_sink: &Sender<Request>,
1266    ) -> Result<Option<ResolverVersion>, ResolveError> {
1267        // Wait for the metadata to be available.
1268        let versions_response = if let Some(index) = index {
1269            self.index
1270                .explicit()
1271                .wait_blocking(&(name.clone(), index.clone()))
1272                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1273        } else {
1274            self.index
1275                .implicit()
1276                .wait_blocking(name)
1277                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1278        };
1279        visited.insert(name.clone());
1280
1281        let version_maps = match *versions_response {
1282            VersionsResponse::Found(ref version_maps) => version_maps.as_slice(),
1283            VersionsResponse::NoIndex => {
1284                self.unavailable_packages
1285                    .insert(name.clone(), UnavailablePackage::NoIndex);
1286                &[]
1287            }
1288            VersionsResponse::Offline => {
1289                self.unavailable_packages
1290                    .insert(name.clone(), UnavailablePackage::Offline);
1291                &[]
1292            }
1293            VersionsResponse::NotFound => {
1294                self.unavailable_packages
1295                    .insert(name.clone(), UnavailablePackage::NotFound);
1296                &[]
1297            }
1298        };
1299
1300        debug!("Searching for a compatible version of {package} ({range})");
1301
1302        // Find a version.
1303        let Some(candidate) = self.selector.select(
1304            name,
1305            range,
1306            version_maps,
1307            preferences,
1308            &self.installed_packages,
1309            &self.exclusions,
1310            index,
1311            env,
1312            self.tags.as_ref(),
1313        ) else {
1314            // Short circuit: we couldn't find _any_ versions for a package.
1315            return Ok(None);
1316        };
1317
1318        let dist = match candidate.dist() {
1319            CandidateDist::Compatible(dist) => dist,
1320            CandidateDist::Incompatible {
1321                incompatible_dist: incompatibility,
1322                prioritized_dist: _,
1323            } => {
1324                // If the version is incompatible because no distributions are compatible, exit early.
1325                return Ok(Some(ResolverVersion::Unavailable(
1326                    candidate.version().clone(),
1327                    // TODO(charlie): We can avoid this clone; the candidate is dropped here and
1328                    // owns the incompatibility.
1329                    UnavailableVersion::IncompatibleDist(incompatibility.clone()),
1330                )));
1331            }
1332        };
1333
1334        // Check whether the version is incompatible due to its Python requirement.
1335        if let Some((requires_python, incompatibility)) =
1336            Self::check_requires_python(dist, python_requirement)
1337        {
1338            if matches!(self.options.fork_strategy, ForkStrategy::RequiresPython) {
1339                if env.marker_environment().is_none() {
1340                    let forks = fork_version_by_python_requirement(
1341                        requires_python,
1342                        python_requirement,
1343                        env,
1344                    );
1345                    if !forks.is_empty() {
1346                        debug!(
1347                            "Forking Python requirement `{}` on `{}` for {}=={} ({})",
1348                            python_requirement.target(),
1349                            requires_python,
1350                            name,
1351                            candidate.version(),
1352                            forks
1353                                .iter()
1354                                .map(ToString::to_string)
1355                                .collect::<Vec<_>>()
1356                                .join(", ")
1357                        );
1358                        let forks = forks
1359                            .into_iter()
1360                            .map(|env| VersionFork {
1361                                env,
1362                                id,
1363                                version: None,
1364                            })
1365                            .collect();
1366                        return Ok(Some(ResolverVersion::Forked(forks)));
1367                    }
1368                }
1369            }
1370
1371            return Ok(Some(ResolverVersion::Unavailable(
1372                candidate.version().clone(),
1373                UnavailableVersion::IncompatibleDist(incompatibility),
1374            )));
1375        }
1376
1377        // Check whether this version covers all supported platforms; and, if not, generate a fork.
1378        if let Some(forked) = self.fork_version_registry(
1379            &candidate,
1380            dist,
1381            version_maps,
1382            package,
1383            id,
1384            name,
1385            index,
1386            range,
1387            preferences,
1388            env,
1389            pubgrub,
1390            pins,
1391            request_sink,
1392        )? {
1393            return Ok(Some(forked));
1394        }
1395
1396        let filename = match dist.for_installation() {
1397            ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1398                .filename()
1399                .unwrap_or(Cow::Borrowed("unknown filename")),
1400            ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1401                .filename()
1402                .unwrap_or(Cow::Borrowed("unknown filename")),
1403            ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1404        };
1405
1406        debug!(
1407            "Selecting: {}=={} [{}] ({})",
1408            name,
1409            candidate.version(),
1410            candidate.choice_kind(),
1411            filename,
1412        );
1413        self.visit_candidate(&candidate, dist, package, name, pins, request_sink)?;
1414
1415        let version = candidate.version().clone();
1416        Ok(Some(ResolverVersion::Unforked(version)))
1417    }
1418
1419    /// Determine whether a candidate covers all supported platforms; and, if not, generate a fork.
1420    ///
1421    /// This only ever applies to versions that lack source distributions And, for now, we only
1422    /// apply it in two cases:
1423    ///
1424    /// 1. Local versions, where the non-local version has greater platform coverage. The intent is
1425    ///    such that, if we're resolving PyTorch, and we choose `torch==2.5.2+cpu`, we want to
1426    ///    fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't
1427    ///    include any macOS wheels).
1428    /// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user
1429    ///    might require that the generated resolution always includes wheels for x86 macOS, and
1430    ///    fails entirely if the platform is unsupported.
1431    fn fork_version_registry(
1432        &self,
1433        candidate: &Candidate,
1434        dist: &CompatibleDist,
1435        version_maps: &[VersionMap],
1436        package: &PubGrubPackage,
1437        id: Id<PubGrubPackage>,
1438        name: &PackageName,
1439        index: Option<&IndexUrl>,
1440        range: &Range<Version>,
1441        preferences: &Preferences,
1442        env: &ResolverEnvironment,
1443        pubgrub: &State<UvDependencyProvider>,
1444        pins: &mut FilePins,
1445        request_sink: &Sender<Request>,
1446    ) -> Result<Option<ResolverVersion>, ResolveError> {
1447        // This only applies to universal resolutions.
1448        if env.marker_environment().is_some() {
1449            return Ok(None);
1450        }
1451
1452        // If the package is already compatible with all environments (as is the case for
1453        // packages that include a source distribution), we don't need to fork.
1454        if dist.implied_markers().is_true() {
1455            return Ok(None);
1456        }
1457
1458        // If the caller marked an environment as requiring artifact coverage, ensure it has
1459        // coverage.
1460        for marker in self.options.artifact_environments.iter().copied() {
1461            // If the platform is part of the current environment...
1462            if env.included_by_marker(marker) {
1463                // But isn't supported by the distribution...
1464                if dist.implied_markers().is_disjoint(marker)
1465                    && !find_environments(id, pubgrub).is_disjoint(marker)
1466                {
1467                    // Then we need to fork.
1468                    let Some((left, right)) = fork_version_by_marker(env, marker) else {
1469                        return Ok(Some(ResolverVersion::Unavailable(
1470                            candidate.version().clone(),
1471                            UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1472                                IncompatibleWheel::MissingPlatform(marker),
1473                            )),
1474                        )));
1475                    };
1476
1477                    debug!(
1478                        "Forking on required platform `{}` for {}=={} ({})",
1479                        marker.try_to_string().unwrap_or_else(|| "true".to_string()),
1480                        name,
1481                        candidate.version(),
1482                        [&left, &right]
1483                            .iter()
1484                            .map(ToString::to_string)
1485                            .collect::<Vec<_>>()
1486                            .join(", ")
1487                    );
1488                    let forks = vec![
1489                        VersionFork {
1490                            env: left,
1491                            id,
1492                            version: None,
1493                        },
1494                        VersionFork {
1495                            env: right,
1496                            id,
1497                            version: None,
1498                        },
1499                    ];
1500                    return Ok(Some(ResolverVersion::Forked(forks)));
1501                }
1502            }
1503        }
1504
1505        // For now, we only apply this to local versions.
1506        if !candidate.version().is_local() {
1507            return Ok(None);
1508        }
1509
1510        debug!(
1511            "Looking at local version: {}=={}",
1512            name,
1513            candidate.version()
1514        );
1515
1516        // If there's a non-local version...
1517        let range = range.clone().intersection(&Range::singleton(
1518            candidate.version().clone().without_local(),
1519        ));
1520
1521        let Some(base_candidate) = self.selector.select(
1522            name,
1523            &range,
1524            version_maps,
1525            preferences,
1526            &self.installed_packages,
1527            &self.exclusions,
1528            index,
1529            env,
1530            self.tags.as_ref(),
1531        ) else {
1532            return Ok(None);
1533        };
1534        let CandidateDist::Compatible(base_dist) = base_candidate.dist() else {
1535            return Ok(None);
1536        };
1537
1538        // ...and the non-local version has greater platform support...
1539        let mut remainder = {
1540            let mut remainder = base_dist.implied_markers();
1541            remainder.and(dist.implied_markers().negate());
1542            remainder
1543        };
1544        if remainder.is_false() {
1545            return Ok(None);
1546        }
1547
1548        // If the remainder isn't relevant to the current environment, there's no need to fork.
1549        // For example, if we're solving for `sys_platform == 'darwin'` but the remainder is
1550        // `sys_platform == 'linux'`, we don't need to fork.
1551        if !env.included_by_marker(remainder) {
1552            return Ok(None);
1553        }
1554
1555        // Similarly, if the local distribution is incompatible with the current environment, then
1556        // use the base distribution instead (but don't fork).
1557        if !env.included_by_marker(dist.implied_markers()) {
1558            let filename = match dist.for_installation() {
1559                ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1560                    .filename()
1561                    .unwrap_or(Cow::Borrowed("unknown filename")),
1562                ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1563                    .filename()
1564                    .unwrap_or(Cow::Borrowed("unknown filename")),
1565                ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1566            };
1567
1568            debug!(
1569                "Preferring non-local candidate: {}=={} [{}] ({})",
1570                name,
1571                base_candidate.version(),
1572                base_candidate.choice_kind(),
1573                filename,
1574            );
1575            self.visit_candidate(
1576                &base_candidate,
1577                base_dist,
1578                package,
1579                name,
1580                pins,
1581                request_sink,
1582            )?;
1583
1584            return Ok(Some(ResolverVersion::Unforked(
1585                base_candidate.version().clone(),
1586            )));
1587        }
1588
1589        // If the implied markers includes _some_ macOS environments, but the remainder doesn't,
1590        // then we can extend the implied markers to include _all_ macOS environments. Same goes for
1591        // Linux and Windows.
1592        //
1593        // The idea here is that the base version could support (e.g.) ARM macOS, but not Intel
1594        // macOS. But if _neither_ version supports Intel macOS, we'd rather use `sys_platform == 'darwin'`
1595        // instead of `sys_platform == 'darwin' and platform_machine == 'arm64'`, since it's much
1596        // simpler, and _neither_ version will succeed with Intel macOS anyway.
1597        for value in [
1598            arcstr::literal!("darwin"),
1599            arcstr::literal!("linux"),
1600            arcstr::literal!("win32"),
1601        ] {
1602            let sys_platform = MarkerTree::expression(MarkerExpression::String {
1603                key: MarkerValueString::SysPlatform,
1604                operator: MarkerOperator::Equal,
1605                value,
1606            });
1607            if dist.implied_markers().is_disjoint(sys_platform)
1608                && !remainder.is_disjoint(sys_platform)
1609            {
1610                remainder.or(sys_platform);
1611            }
1612        }
1613
1614        // Otherwise, we need to fork.
1615        let Some((base_env, local_env)) = fork_version_by_marker(env, remainder) else {
1616            return Ok(None);
1617        };
1618
1619        debug!(
1620            "Forking platform for {}=={} ({})",
1621            name,
1622            candidate.version(),
1623            [&base_env, &local_env]
1624                .iter()
1625                .map(ToString::to_string)
1626                .collect::<Vec<_>>()
1627                .join(", ")
1628        );
1629        self.visit_candidate(candidate, dist, package, name, pins, request_sink)?;
1630        self.visit_candidate(
1631            &base_candidate,
1632            base_dist,
1633            package,
1634            name,
1635            pins,
1636            request_sink,
1637        )?;
1638
1639        let forks = vec![
1640            VersionFork {
1641                env: base_env.clone(),
1642                id,
1643                version: Some(base_candidate.version().clone()),
1644            },
1645            VersionFork {
1646                env: local_env.clone(),
1647                id,
1648                version: Some(candidate.version().clone()),
1649            },
1650        ];
1651        Ok(Some(ResolverVersion::Forked(forks)))
1652    }
1653
1654    /// Visit a selected candidate.
1655    fn visit_candidate(
1656        &self,
1657        candidate: &Candidate,
1658        dist: &CompatibleDist,
1659        package: &PubGrubPackage,
1660        name: &PackageName,
1661        pins: &mut FilePins,
1662        request_sink: &Sender<Request>,
1663    ) -> Result<(), ResolveError> {
1664        // We want to return a package pinned to a specific version; but we _also_ want to
1665        // store the exact file that we selected to satisfy that version.
1666        pins.insert(candidate, dist);
1667
1668        // Emit a request to fetch the metadata for this version.
1669        if matches!(&**package, PubGrubPackageInner::Package { .. }) {
1670            if self.dependency_mode.is_transitive() {
1671                let dist = dist.for_resolution();
1672                if self.index.distributions().register(dist.distribution_id()) {
1673                    if name != dist.name() {
1674                        return Err(ResolveError::MismatchedPackageName {
1675                            request: "distribution",
1676                            expected: name.clone(),
1677                            actual: dist.name().clone(),
1678                        });
1679                    }
1680                    // Verify that the package is allowed under the hash-checking policy.
1681                    if !self
1682                        .hasher
1683                        .allows_package(candidate.name(), candidate.version())
1684                    {
1685                        return Err(ResolveError::UnhashedPackage(candidate.name().clone()));
1686                    }
1687
1688                    let request = Request::from(dist);
1689                    request_sink.blocking_send(request)?;
1690                }
1691            }
1692        }
1693
1694        Ok(())
1695    }
1696
1697    /// Check if the distribution is incompatible with the Python requirement, and if so, return
1698    /// the incompatibility.
1699    fn check_requires_python<'dist>(
1700        dist: &'dist CompatibleDist,
1701        python_requirement: &PythonRequirement,
1702    ) -> Option<(&'dist VersionSpecifiers, IncompatibleDist)> {
1703        let requires_python = dist.requires_python()?;
1704        if python_requirement.target().is_contained_by(requires_python) {
1705            None
1706        } else {
1707            let incompatibility = if matches!(dist, CompatibleDist::CompatibleWheel { .. }) {
1708                IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(
1709                    requires_python.clone(),
1710                    if python_requirement.installed() == python_requirement.target() {
1711                        PythonRequirementKind::Installed
1712                    } else {
1713                        PythonRequirementKind::Target
1714                    },
1715                ))
1716            } else {
1717                IncompatibleDist::Source(IncompatibleSource::RequiresPython(
1718                    requires_python.clone(),
1719                    if python_requirement.installed() == python_requirement.target() {
1720                        PythonRequirementKind::Installed
1721                    } else {
1722                        PythonRequirementKind::Target
1723                    },
1724                ))
1725            };
1726            Some((requires_python, incompatibility))
1727        }
1728    }
1729
1730    /// Given a candidate package and version, return its dependencies.
1731    #[instrument(skip_all, fields(%package, %version))]
1732    fn get_dependencies_forking(
1733        &self,
1734        id: Id<PubGrubPackage>,
1735        package: &PubGrubPackage,
1736        version: &Version,
1737        pins: &FilePins,
1738        fork_urls: &ForkUrls,
1739        env: &ResolverEnvironment,
1740        python_requirement: &PythonRequirement,
1741        pubgrub: &State<UvDependencyProvider>,
1742    ) -> Result<ForkedDependencies, ResolveError> {
1743        let result = self.get_dependencies(
1744            id,
1745            package,
1746            version,
1747            pins,
1748            fork_urls,
1749            env,
1750            python_requirement,
1751            pubgrub,
1752        );
1753        if env.marker_environment().is_some() {
1754            result.map(|deps| match deps {
1755                Dependencies::Available(deps) | Dependencies::Unforkable(deps) => {
1756                    ForkedDependencies::Unforked(deps)
1757                }
1758                Dependencies::Unavailable(err) => ForkedDependencies::Unavailable(err),
1759            })
1760        } else {
1761            Ok(result?.fork(env, python_requirement, &self.conflicts))
1762        }
1763    }
1764
1765    /// Given a candidate package and version, return its dependencies.
1766    #[instrument(skip_all, fields(%package, %version))]
1767    fn get_dependencies(
1768        &self,
1769        id: Id<PubGrubPackage>,
1770        package: &PubGrubPackage,
1771        version: &Version,
1772        pins: &FilePins,
1773        fork_urls: &ForkUrls,
1774        env: &ResolverEnvironment,
1775        python_requirement: &PythonRequirement,
1776        pubgrub: &State<UvDependencyProvider>,
1777    ) -> Result<Dependencies, ResolveError> {
1778        let dependencies = match &**package {
1779            PubGrubPackageInner::Root(_) => {
1780                let no_dev_deps = BTreeMap::default();
1781                let requirements = self.flatten_requirements(
1782                    &self.requirements,
1783                    &no_dev_deps,
1784                    None,
1785                    None,
1786                    None,
1787                    env,
1788                    python_requirement,
1789                );
1790
1791                requirements
1792                    .flat_map(move |requirement| {
1793                        PubGrubDependency::from_requirement(
1794                            &self.conflicts,
1795                            requirement,
1796                            None,
1797                            Some(package),
1798                        )
1799                    })
1800                    .collect()
1801            }
1802
1803            PubGrubPackageInner::Package {
1804                name,
1805                extra,
1806                group,
1807                marker: _,
1808            } => {
1809                // If we're excluding transitive dependencies, short-circuit.
1810                if self.dependency_mode.is_direct() {
1811                    return Ok(Dependencies::Unforkable(Vec::default()));
1812                }
1813
1814                // Look up the distribution ID from the pins (common case) or fork URLs.
1815                let owned_id;
1816                let distribution_id = if let Some((_, metadata_id)) =
1817                    pins.dist_and_id(name, version)
1818                {
1819                    metadata_id
1820                } else if let Some(url) = fork_urls.get(name) {
1821                    let dist = Dist::from_url(name.clone(), url.clone())?;
1822                    owned_id = dist.distribution_id();
1823                    &owned_id
1824                } else {
1825                    debug_assert!(
1826                        false,
1827                        "Dependencies were requested for a package without a pinned distribution"
1828                    );
1829                    return Err(ResolveError::UnregisteredTask(format!("{name}=={version}")));
1830                };
1831
1832                // If the package does not exist in the registry or locally, we cannot fetch its dependencies
1833                if self.dependency_mode.is_transitive()
1834                    && self.unavailable_packages.get(name).is_some()
1835                    && self.installed_packages.get_packages(name).is_empty()
1836                {
1837                    debug_assert!(
1838                        false,
1839                        "Dependencies were requested for a package that is not available"
1840                    );
1841                    return Err(ResolveError::PackageUnavailable(name.clone()));
1842                }
1843
1844                // Wait for the metadata to be available.
1845                let response = self
1846                    .index
1847                    .distributions()
1848                    .wait_blocking(distribution_id)
1849                    .ok_or_else(|| ResolveError::UnregisteredTask(format!("{name}=={version}")))?;
1850
1851                let metadata = match &*response {
1852                    MetadataResponse::Found(archive) => &archive.metadata,
1853                    MetadataResponse::Unavailable(reason) => {
1854                        let unavailable_version = UnavailableVersion::from(reason);
1855                        let message = unavailable_version.singular_message();
1856                        if let Some(err) = reason.source() {
1857                            // Show the detailed error for metadata parse errors.
1858                            warn!("{name} {message}: {err}");
1859                        } else {
1860                            warn!("{name} {message}");
1861                        }
1862                        self.incomplete_packages
1863                            .entry(name.clone())
1864                            .or_default()
1865                            .insert(version.clone(), reason.clone());
1866                        return Ok(Dependencies::Unavailable(unavailable_version));
1867                    }
1868                    MetadataResponse::Error(dist, err) => {
1869                        let chain = DerivationChainBuilder::from_state(id, version, pubgrub)
1870                            .unwrap_or_default();
1871                        return Err(ResolveError::Dist(
1872                            DistErrorKind::from_requested_dist(dist, &**err),
1873                            dist.clone(),
1874                            chain,
1875                            err.clone(),
1876                        ));
1877                    }
1878                };
1879
1880                // If there was no requires-python on the index page, we may have an incompatible
1881                // distribution.
1882                if let Some(requires_python) = &metadata.requires_python {
1883                    if !python_requirement.target().is_contained_by(requires_python) {
1884                        return Ok(Dependencies::Unavailable(
1885                            UnavailableVersion::RequiresPython(requires_python.clone()),
1886                        ));
1887                    }
1888                }
1889
1890                // Identify any system dependencies based on the index URL.
1891                let system_dependencies = self
1892                    .options
1893                    .torch_backend
1894                    .as_ref()
1895                    .filter(|torch_backend| matches!(torch_backend, TorchStrategy::Cuda { .. }))
1896                    .filter(|torch_backend| torch_backend.has_system_dependency(name))
1897                    .and_then(|_| pins.get(name, version).and_then(ResolvedDist::index))
1898                    .map(IndexUrl::url)
1899                    .and_then(SystemDependency::from_index)
1900                    .into_iter()
1901                    .inspect(|system_dependency| {
1902                        debug!(
1903                            "Adding system dependency `{}` for `{package}@{version}`",
1904                            system_dependency
1905                        );
1906                    })
1907                    .map(PubGrubDependency::from);
1908
1909                let requirements = self.flatten_requirements(
1910                    &metadata.requires_dist,
1911                    &metadata.dependency_groups,
1912                    extra.as_ref(),
1913                    group.as_ref(),
1914                    Some(name),
1915                    env,
1916                    python_requirement,
1917                );
1918
1919                requirements
1920                    .filter(|requirement| !self.excludes.contains(&requirement.name))
1921                    .flat_map(|requirement| {
1922                        PubGrubDependency::from_requirement(
1923                            &self.conflicts,
1924                            requirement,
1925                            group.as_ref(),
1926                            Some(package),
1927                        )
1928                    })
1929                    .chain(system_dependencies)
1930                    .collect()
1931            }
1932
1933            PubGrubPackageInner::Python(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1934
1935            PubGrubPackageInner::System(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1936
1937            // Add a dependency on both the marker and base package.
1938            PubGrubPackageInner::Marker { name, marker } => {
1939                return Ok(Dependencies::Unforkable(
1940                    [MarkerTree::TRUE, *marker]
1941                        .into_iter()
1942                        .map(move |marker| PubGrubDependency {
1943                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1944                                name: name.clone(),
1945                                extra: None,
1946                                group: None,
1947                                marker,
1948                            }),
1949                            version: Range::singleton(version.clone()),
1950                            parent: None,
1951                            source: DependencySource::Unspecified,
1952                        })
1953                        .collect(),
1954                ));
1955            }
1956
1957            // Add a dependency on both the extra and base package, with and without the marker.
1958            PubGrubPackageInner::Extra {
1959                name,
1960                extra,
1961                marker,
1962            } => {
1963                return Ok(Dependencies::Unforkable(
1964                    [MarkerTree::TRUE, *marker]
1965                        .into_iter()
1966                        .dedup()
1967                        .flat_map(move |marker| {
1968                            [None, Some(extra)]
1969                                .into_iter()
1970                                .map(move |extra| PubGrubDependency {
1971                                    package: PubGrubPackage::from(PubGrubPackageInner::Package {
1972                                        name: name.clone(),
1973                                        extra: extra.cloned(),
1974                                        group: None,
1975                                        marker,
1976                                    }),
1977                                    version: Range::singleton(version.clone()),
1978                                    parent: None,
1979                                    source: DependencySource::Unspecified,
1980                                })
1981                        })
1982                        .collect(),
1983                ));
1984            }
1985
1986            // Add a dependency on the dependency group, with and without the marker.
1987            PubGrubPackageInner::Group {
1988                name,
1989                group,
1990                marker,
1991            } => {
1992                return Ok(Dependencies::Unforkable(
1993                    [MarkerTree::TRUE, *marker]
1994                        .into_iter()
1995                        .dedup()
1996                        .map(|marker| PubGrubDependency {
1997                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1998                                name: name.clone(),
1999                                extra: None,
2000                                group: Some(group.clone()),
2001                                marker,
2002                            }),
2003                            version: Range::singleton(version.clone()),
2004                            parent: None,
2005                            source: DependencySource::Unspecified,
2006                        })
2007                        .collect(),
2008                ));
2009            }
2010        };
2011        Ok(Dependencies::Available(dependencies))
2012    }
2013
2014    /// The regular and dev dependencies filtered by Python version and the markers of this fork,
2015    /// plus the extras dependencies of the current package (e.g., `black` depending on
2016    /// `black[colorama]`).
2017    fn flatten_requirements<'a>(
2018        &'a self,
2019        dependencies: &'a [Requirement],
2020        dev_dependencies: &'a BTreeMap<GroupName, Box<[Requirement]>>,
2021        extra: Option<&'a ExtraName>,
2022        dev: Option<&'a GroupName>,
2023        name: Option<&PackageName>,
2024        env: &'a ResolverEnvironment,
2025        python_requirement: &'a PythonRequirement,
2026    ) -> impl Iterator<Item = Cow<'a, Requirement>> {
2027        let python_marker = python_requirement.to_marker_tree();
2028
2029        if let Some(dev) = dev {
2030            // Dependency groups can include the project itself, so no need to flatten recursive
2031            // dependencies.
2032            Either::Left(Either::Left(self.requirements_for_extra(
2033                dev_dependencies.get(dev).into_iter().flatten(),
2034                extra,
2035                env,
2036                python_marker,
2037                python_requirement,
2038            )))
2039        } else if !dependencies
2040            .iter()
2041            .any(|req| name == Some(&req.name) && !req.extras.is_empty())
2042        {
2043            // If the project doesn't define any recursive dependencies, take the fast path.
2044            Either::Left(Either::Right(self.requirements_for_extra(
2045                dependencies.iter(),
2046                extra,
2047                env,
2048                python_marker,
2049                python_requirement,
2050            )))
2051        } else {
2052            let mut requirements = self
2053                .requirements_for_extra(
2054                    dependencies.iter(),
2055                    extra,
2056                    env,
2057                    python_marker,
2058                    python_requirement,
2059                )
2060                .collect::<Vec<_>>();
2061
2062            // Transitively process all extras that are recursively included, starting with the current
2063            // extra.
2064            let mut seen = FxHashSet::<(ExtraName, MarkerTree)>::default();
2065            let mut queue: VecDeque<_> = requirements
2066                .iter()
2067                .filter(|req| name == Some(&req.name))
2068                .flat_map(|req| req.extras.iter().cloned().map(|extra| (extra, req.marker)))
2069                .collect();
2070            while let Some((extra, marker)) = queue.pop_front() {
2071                if !seen.insert((extra.clone(), marker)) {
2072                    continue;
2073                }
2074                for requirement in self.requirements_for_extra(
2075                    dependencies,
2076                    Some(&extra),
2077                    env,
2078                    python_marker,
2079                    python_requirement,
2080                ) {
2081                    let requirement = match requirement {
2082                        Cow::Owned(mut requirement) => {
2083                            requirement.marker.and(marker);
2084                            requirement
2085                        }
2086                        Cow::Borrowed(requirement) => {
2087                            let mut marker = marker;
2088                            marker.and(requirement.marker);
2089                            Requirement {
2090                                name: requirement.name.clone(),
2091                                extras: requirement.extras.clone(),
2092                                groups: requirement.groups.clone(),
2093                                source: requirement.source.clone(),
2094                                origin: requirement.origin.clone(),
2095                                marker: marker.simplify_extras(slice::from_ref(&extra)),
2096                            }
2097                        }
2098                    };
2099                    if name == Some(&requirement.name) {
2100                        // Add each transitively included extra.
2101                        queue.extend(
2102                            requirement
2103                                .extras
2104                                .iter()
2105                                .cloned()
2106                                .map(|extra| (extra, requirement.marker)),
2107                        );
2108                    } else {
2109                        // Add the requirements for that extra.
2110                        requirements.push(Cow::Owned(requirement));
2111                    }
2112                }
2113            }
2114
2115            // Retain any self-constraints for that extra, e.g., if `project[foo]` includes
2116            // `project[bar]>1.0`, as a dependency, we need to propagate `project>1.0`, in addition to
2117            // transitively expanding `project[bar]`.
2118            let mut self_constraints = vec![];
2119            for req in &requirements {
2120                if name == Some(&req.name) && !req.source.is_empty() {
2121                    self_constraints.push(Requirement {
2122                        name: req.name.clone(),
2123                        extras: Box::new([]),
2124                        groups: req.groups.clone(),
2125                        source: req.source.clone(),
2126                        origin: req.origin.clone(),
2127                        marker: req.marker,
2128                    });
2129                }
2130            }
2131
2132            // Drop all the self-requirements now that we flattened them out.
2133            requirements.retain(|req| name != Some(&req.name) || req.extras.is_empty());
2134            requirements.extend(self_constraints.into_iter().map(Cow::Owned));
2135
2136            Either::Right(requirements.into_iter())
2137        }
2138    }
2139
2140    /// The set of the regular and dev dependencies, filtered by Python version,
2141    /// the markers of this fork and the requested extra.
2142    fn requirements_for_extra<'data, 'parameters>(
2143        &'data self,
2144        dependencies: impl IntoIterator<Item = &'data Requirement> + 'parameters,
2145        extra: Option<&'parameters ExtraName>,
2146        env: &'parameters ResolverEnvironment,
2147        python_marker: MarkerTree,
2148        python_requirement: &'parameters PythonRequirement,
2149    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2150    where
2151        'data: 'parameters,
2152    {
2153        self.overrides
2154            .apply(dependencies)
2155            .filter(move |requirement| {
2156                Self::is_requirement_applicable(
2157                    requirement,
2158                    extra,
2159                    env,
2160                    python_marker,
2161                    python_requirement,
2162                )
2163            })
2164            .flat_map(move |requirement| {
2165                iter::once(requirement.clone()).chain(self.constraints_for_requirement(
2166                    requirement,
2167                    extra,
2168                    env,
2169                    python_marker,
2170                    python_requirement,
2171                ))
2172            })
2173    }
2174
2175    /// Whether a requirement is applicable for the Python version, the markers of this fork and the
2176    /// requested extra.
2177    fn is_requirement_applicable(
2178        requirement: &Requirement,
2179        extra: Option<&ExtraName>,
2180        env: &ResolverEnvironment,
2181        python_marker: MarkerTree,
2182        python_requirement: &PythonRequirement,
2183    ) -> bool {
2184        // If the requirement isn't relevant for the current platform, skip it.
2185        match extra {
2186            Some(source_extra) => {
2187                // Only include requirements that are relevant for the current extra.
2188                if requirement.evaluate_markers(env.marker_environment(), &[]) {
2189                    return false;
2190                }
2191                if !requirement
2192                    .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2193                {
2194                    return false;
2195                }
2196                if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2197                {
2198                    return false;
2199                }
2200            }
2201            None => {
2202                if !requirement.evaluate_markers(env.marker_environment(), &[]) {
2203                    return false;
2204                }
2205            }
2206        }
2207
2208        // If the requirement would not be selected with any Python version
2209        // supported by the root, skip it.
2210        if python_marker.is_disjoint(requirement.marker) {
2211            trace!(
2212                "Skipping {requirement} because of Requires-Python: {requires_python}",
2213                requires_python = python_requirement.target(),
2214            );
2215            return false;
2216        }
2217
2218        // If we're in a fork in universal mode, ignore any dependency that isn't part of
2219        // this fork (but will be part of another fork).
2220        if !env.included_by_marker(requirement.marker) {
2221            trace!("Skipping {requirement} because of {env}");
2222            return false;
2223        }
2224
2225        true
2226    }
2227
2228    /// The constraints applicable to the requirement, filtered by Python version, the markers of
2229    /// this fork and the requested extra.
2230    fn constraints_for_requirement<'data, 'parameters>(
2231        &'data self,
2232        requirement: Cow<'data, Requirement>,
2233        extra: Option<&'parameters ExtraName>,
2234        env: &'parameters ResolverEnvironment,
2235        python_marker: MarkerTree,
2236        python_requirement: &'parameters PythonRequirement,
2237    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2238    where
2239        'data: 'parameters,
2240    {
2241        self.constraints
2242            .get(&requirement.name)
2243            .into_iter()
2244            .flatten()
2245            .filter_map(move |constraint| {
2246                // If the requirement would not be selected with any Python version
2247                // supported by the root, skip it.
2248                let constraint = if constraint.marker.is_true() {
2249                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2250                    // and the constraint is `requests ; python_version == '3.6'`, the
2251                    // constraint should only apply when _both_ markers are true.
2252                    if requirement.marker.is_true() {
2253                        Cow::Borrowed(constraint)
2254                    } else {
2255                        let mut marker = constraint.marker;
2256                        marker.and(requirement.marker);
2257
2258                        if marker.is_false() {
2259                            trace!(
2260                                "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2261                                constraint.marker.try_to_string().unwrap(),
2262                                requirement.marker.try_to_string().unwrap(),
2263                            );
2264                            return None;
2265                        }
2266
2267                        Cow::Owned(Requirement {
2268                            name: constraint.name.clone(),
2269                            extras: constraint.extras.clone(),
2270                            groups: constraint.groups.clone(),
2271                            source: constraint.source.clone(),
2272                            origin: constraint.origin.clone(),
2273                            marker,
2274                        })
2275                    }
2276                } else {
2277                    let requires_python = python_requirement.target();
2278
2279                    let mut marker = constraint.marker;
2280                    marker.and(requirement.marker);
2281
2282                    if marker.is_false() {
2283                        trace!(
2284                            "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2285                            constraint.marker.try_to_string().unwrap(),
2286                            requirement.marker.try_to_string().unwrap(),
2287                        );
2288                        return None;
2289                    }
2290
2291                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2292                    // and the constraint is `requests ; python_version == '3.6'`, the
2293                    // constraint should only apply when _both_ markers are true.
2294                    if python_marker.is_disjoint(marker) {
2295                        trace!(
2296                            "Skipping constraint {requirement} because of Requires-Python: {requires_python}"
2297                        );
2298                        return None;
2299                    }
2300
2301                    if marker == constraint.marker {
2302                        Cow::Borrowed(constraint)
2303                    } else {
2304                        Cow::Owned(Requirement {
2305                            name: constraint.name.clone(),
2306                            extras: constraint.extras.clone(),
2307                            groups: constraint.groups.clone(),
2308                            source: constraint.source.clone(),
2309                            origin: constraint.origin.clone(),
2310                            marker,
2311                        })
2312                    }
2313                };
2314
2315                // If we're in a fork in universal mode, ignore any dependency that isn't part of
2316                // this fork (but will be part of another fork).
2317                if !env.included_by_marker(constraint.marker) {
2318                    trace!("Skipping {constraint} because of {env}");
2319                    return None;
2320                }
2321
2322                // If the constraint isn't relevant for the current platform, skip it.
2323                match extra {
2324                    Some(source_extra) => {
2325                        if !constraint
2326                            .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2327                        {
2328                            return None;
2329                        }
2330                        if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2331                        {
2332                            return None;
2333                        }
2334                    }
2335                    None => {
2336                        if !constraint.evaluate_markers(env.marker_environment(), &[]) {
2337                            return None;
2338                        }
2339                    }
2340                }
2341
2342                Some(constraint)
2343            })
2344    }
2345
2346    /// Fetch the metadata for a stream of packages and versions.
2347    async fn fetch<Provider: ResolverProvider>(
2348        self: Arc<Self>,
2349        provider: Arc<Provider>,
2350        request_stream: Receiver<Request>,
2351    ) -> Result<(), ResolveError> {
2352        let mut response_stream = ReceiverStream::new(request_stream)
2353            .map(|request| self.process_request(request, &*provider).boxed_local())
2354            // Allow as many futures as possible to start in the background.
2355            // Backpressure is provided by at a more granular level by `DistributionDatabase`
2356            // and `SourceDispatch`, as well as the bounded request channel.
2357            .buffer_unordered(usize::MAX);
2358
2359        while let Some(response) = response_stream.next().await {
2360            match response? {
2361                Some(Response::Package(name, index, version_map)) => {
2362                    trace!("Received package metadata for: {name}");
2363                    if let Some(index) = index {
2364                        self.index
2365                            .explicit()
2366                            .done((name, index), Arc::new(version_map));
2367                    } else {
2368                        self.index.implicit().done(name, Arc::new(version_map));
2369                    }
2370                }
2371                Some(Response::Installed { dist, metadata }) => {
2372                    trace!("Received installed distribution metadata for: {dist}");
2373                    self.index
2374                        .distributions()
2375                        .done(dist.distribution_id(), Arc::new(metadata));
2376                }
2377                Some(Response::Dist { dist, metadata }) => {
2378                    let dist_kind = match dist {
2379                        Dist::Built(_) => "built",
2380                        Dist::Source(_) => "source",
2381                    };
2382                    trace!("Received {dist_kind} distribution metadata for: {dist}");
2383                    if let MetadataResponse::Unavailable(reason) = &metadata {
2384                        let message = UnavailableVersion::from(reason).singular_message();
2385                        if let Some(err) = reason.source() {
2386                            // Show the detailed error for metadata parse errors.
2387                            warn!("{dist} {message}: {err}");
2388                        } else {
2389                            warn!("{dist} {message}");
2390                        }
2391                    }
2392                    self.index
2393                        .distributions()
2394                        .done(dist.distribution_id(), Arc::new(metadata));
2395                }
2396                None => {}
2397            }
2398        }
2399
2400        Ok::<(), ResolveError>(())
2401    }
2402
2403    #[instrument(skip_all, fields(%request))]
2404    async fn process_request<Provider: ResolverProvider>(
2405        &self,
2406        request: Request,
2407        provider: &Provider,
2408    ) -> Result<Option<Response>, ResolveError> {
2409        match request {
2410            // Fetch package metadata from the registry.
2411            Request::Package(package_name, index) => {
2412                let package_versions = provider
2413                    .get_package_versions(&package_name, index.as_ref())
2414                    .boxed_local()
2415                    .await
2416                    .map_err(ResolveError::Client)?;
2417
2418                Ok(Some(Response::Package(
2419                    package_name,
2420                    index.map(IndexMetadata::into_url),
2421                    package_versions,
2422                )))
2423            }
2424
2425            // Fetch distribution metadata from the distribution database.
2426            Request::Dist(dist) => {
2427                if let Some(version) = dist.version() {
2428                    if let Some(index) = dist.index() {
2429                        // Check the implicit indexes for pre-provided metadata.
2430                        let versions_response = self.index.implicit().get(dist.name());
2431                        if let Some(VersionsResponse::Found(version_maps)) =
2432                            versions_response.as_deref()
2433                        {
2434                            for version_map in version_maps {
2435                                if version_map.index() == Some(index) {
2436                                    let Some(metadata) = version_map.get_metadata(version) else {
2437                                        continue;
2438                                    };
2439                                    debug!("Found registry-provided metadata for: {dist}");
2440                                    return Ok(Some(Response::Dist {
2441                                        dist,
2442                                        metadata: MetadataResponse::Found(
2443                                            ArchiveMetadata::from_metadata23(metadata.clone()),
2444                                        ),
2445                                    }));
2446                                }
2447                            }
2448                        }
2449
2450                        // Check the explicit indexes for pre-provided metadata.
2451                        let versions_response = self
2452                            .index
2453                            .explicit()
2454                            .get(&(dist.name().clone(), index.clone()));
2455                        if let Some(VersionsResponse::Found(version_maps)) =
2456                            versions_response.as_deref()
2457                        {
2458                            for version_map in version_maps {
2459                                let Some(metadata) = version_map.get_metadata(version) else {
2460                                    continue;
2461                                };
2462                                debug!("Found registry-provided metadata for: {dist}");
2463                                return Ok(Some(Response::Dist {
2464                                    dist,
2465                                    metadata: MetadataResponse::Found(
2466                                        ArchiveMetadata::from_metadata23(metadata.clone()),
2467                                    ),
2468                                }));
2469                            }
2470                        }
2471                    }
2472                }
2473
2474                let metadata = provider
2475                    .get_or_build_wheel_metadata(&dist)
2476                    .boxed_local()
2477                    .await?;
2478
2479                if let MetadataResponse::Found(metadata) = &metadata {
2480                    if &metadata.metadata.name != dist.name() {
2481                        return Err(ResolveError::MismatchedPackageName {
2482                            request: "distribution metadata",
2483                            expected: dist.name().clone(),
2484                            actual: metadata.metadata.name.clone(),
2485                        });
2486                    }
2487                }
2488
2489                Ok(Some(Response::Dist { dist, metadata }))
2490            }
2491
2492            Request::Installed(dist) => {
2493                let metadata = provider.get_installed_metadata(&dist).boxed_local().await?;
2494
2495                if let MetadataResponse::Found(metadata) = &metadata {
2496                    if &metadata.metadata.name != dist.name() {
2497                        return Err(ResolveError::MismatchedPackageName {
2498                            request: "installed metadata",
2499                            expected: dist.name().clone(),
2500                            actual: metadata.metadata.name.clone(),
2501                        });
2502                    }
2503                }
2504
2505                Ok(Some(Response::Installed { dist, metadata }))
2506            }
2507
2508            // Pre-fetch the package and distribution metadata.
2509            Request::Prefetch(package_name, range, python_requirement) => {
2510                // Wait for the package metadata to become available.
2511                let versions_response = self
2512                    .index
2513                    .implicit()
2514                    .wait(&package_name)
2515                    .await
2516                    .ok_or_else(|| ResolveError::UnregisteredTask(package_name.to_string()))?;
2517
2518                let version_map = match *versions_response {
2519                    VersionsResponse::Found(ref version_map) => version_map,
2520                    // Short-circuit if we did not find any versions for the package
2521                    VersionsResponse::NoIndex => {
2522                        self.unavailable_packages
2523                            .insert(package_name.clone(), UnavailablePackage::NoIndex);
2524
2525                        return Ok(None);
2526                    }
2527                    VersionsResponse::Offline => {
2528                        self.unavailable_packages
2529                            .insert(package_name.clone(), UnavailablePackage::Offline);
2530
2531                        return Ok(None);
2532                    }
2533                    VersionsResponse::NotFound => {
2534                        self.unavailable_packages
2535                            .insert(package_name.clone(), UnavailablePackage::NotFound);
2536
2537                        return Ok(None);
2538                    }
2539                };
2540
2541                // We don't have access to the fork state when prefetching, so assume that
2542                // pre-release versions are allowed.
2543                let env = ResolverEnvironment::universal(vec![]);
2544
2545                // Try to find a compatible version. If there aren't any compatible versions,
2546                // short-circuit.
2547                let Some(candidate) = self.selector.select(
2548                    &package_name,
2549                    &range,
2550                    version_map,
2551                    &self.preferences,
2552                    &self.installed_packages,
2553                    &self.exclusions,
2554                    None,
2555                    &env,
2556                    self.tags.as_ref(),
2557                ) else {
2558                    return Ok(None);
2559                };
2560
2561                // If there is not a compatible distribution, short-circuit.
2562                let Some(dist) = candidate.compatible() else {
2563                    return Ok(None);
2564                };
2565
2566                // If the registry provided metadata for this distribution, use it.
2567                for version_map in version_map {
2568                    if let Some(metadata) = version_map.get_metadata(candidate.version()) {
2569                        let dist = dist.for_resolution();
2570                        if version_map.index() == dist.index() {
2571                            debug!("Found registry-provided metadata for: {dist}");
2572
2573                            let metadata = MetadataResponse::Found(
2574                                ArchiveMetadata::from_metadata23(metadata.clone()),
2575                            );
2576
2577                            let dist = dist.to_owned();
2578                            if &package_name != dist.name() {
2579                                return Err(ResolveError::MismatchedPackageName {
2580                                    request: "distribution",
2581                                    expected: package_name,
2582                                    actual: dist.name().clone(),
2583                                });
2584                            }
2585
2586                            let response = match dist {
2587                                ResolvedDist::Installable { dist, .. } => Response::Dist {
2588                                    dist: (*dist).clone(),
2589                                    metadata,
2590                                },
2591                                ResolvedDist::Installed { dist } => Response::Installed {
2592                                    dist: (*dist).clone(),
2593                                    metadata,
2594                                },
2595                            };
2596
2597                            return Ok(Some(response));
2598                        }
2599                    }
2600                }
2601
2602                // Avoid prefetching source distributions with unbounded lower-bound ranges. This
2603                // often leads to failed attempts to build legacy versions of packages that are
2604                // incompatible with modern build tools.
2605                if dist.wheel().is_none() {
2606                    if !self.selector.use_highest_version(&package_name, &env) {
2607                        if let Some((lower, _)) = range.iter().next() {
2608                            if lower == &Bound::Unbounded {
2609                                debug!(
2610                                    "Skipping prefetch for unbounded minimum-version range: {package_name} ({range})"
2611                                );
2612                                return Ok(None);
2613                            }
2614                        }
2615                    }
2616                }
2617
2618                // Validate the Python requirement.
2619                let requires_python = match dist {
2620                    CompatibleDist::InstalledDist(_) => None,
2621                    CompatibleDist::SourceDist { sdist, .. }
2622                    | CompatibleDist::IncompatibleWheel { sdist, .. } => {
2623                        sdist.file.requires_python.as_ref()
2624                    }
2625                    CompatibleDist::CompatibleWheel { wheel, .. } => {
2626                        wheel.file.requires_python.as_ref()
2627                    }
2628                };
2629                if let Some(requires_python) = requires_python.as_ref() {
2630                    if !python_requirement.target().is_contained_by(requires_python) {
2631                        return Ok(None);
2632                    }
2633                }
2634
2635                // Verify that the package is allowed under the hash-checking policy.
2636                if !self
2637                    .hasher
2638                    .allows_package(candidate.name(), candidate.version())
2639                {
2640                    return Ok(None);
2641                }
2642
2643                // Emit a request to fetch the metadata for this version.
2644                let dist = dist.for_resolution();
2645                if self.index.distributions().register(dist.distribution_id()) {
2646                    let dist = dist.to_owned();
2647                    if &package_name != dist.name() {
2648                        return Err(ResolveError::MismatchedPackageName {
2649                            request: "distribution",
2650                            expected: package_name,
2651                            actual: dist.name().clone(),
2652                        });
2653                    }
2654
2655                    let response = match dist {
2656                        ResolvedDist::Installable { dist, .. } => {
2657                            let metadata = provider
2658                                .get_or_build_wheel_metadata(&dist)
2659                                .boxed_local()
2660                                .await?;
2661
2662                            Response::Dist {
2663                                dist: (*dist).clone(),
2664                                metadata,
2665                            }
2666                        }
2667                        ResolvedDist::Installed { dist } => {
2668                            let metadata =
2669                                provider.get_installed_metadata(&dist).boxed_local().await?;
2670
2671                            Response::Installed {
2672                                dist: (*dist).clone(),
2673                                metadata,
2674                            }
2675                        }
2676                    };
2677
2678                    Ok(Some(response))
2679                } else {
2680                    Ok(None)
2681                }
2682            }
2683        }
2684    }
2685
2686    fn convert_no_solution_err(
2687        &self,
2688        mut err: pubgrub::NoSolutionError<UvDependencyProvider>,
2689        fork_urls: ForkUrls,
2690        fork_indexes: ForkIndexes,
2691        env: ResolverEnvironment,
2692        current_environment: MarkerEnvironment,
2693        visited: &FxHashSet<PackageName>,
2694    ) -> ResolveError {
2695        err = NoSolutionError::collapse_local_version_segments(NoSolutionError::collapse_proxies(
2696            err,
2697        ));
2698
2699        let mut unavailable_packages = FxHashMap::default();
2700        for package in err.packages() {
2701            if let PubGrubPackageInner::Package { name, .. } = &**package {
2702                if let Some(reason) = self.unavailable_packages.get(name) {
2703                    unavailable_packages.insert(name.clone(), reason.clone());
2704                }
2705            }
2706        }
2707
2708        let mut incomplete_packages = FxHashMap::default();
2709        for package in err.packages() {
2710            if let PubGrubPackageInner::Package { name, .. } = &**package {
2711                if let Some(versions) = self.incomplete_packages.get(name) {
2712                    for entry in versions.iter() {
2713                        let (version, reason) = entry.pair();
2714                        incomplete_packages
2715                            .entry(name.clone())
2716                            .or_insert_with(BTreeMap::default)
2717                            .insert(version.clone(), reason.clone());
2718                    }
2719                }
2720            }
2721        }
2722
2723        let mut available_indexes = FxHashMap::default();
2724        let mut available_versions = FxHashMap::default();
2725        for package in err.packages() {
2726            let Some(name) = package.name() else { continue };
2727            if !visited.contains(name) {
2728                // Avoid including available versions for packages that exist in the derivation
2729                // tree, but were never visited during resolution. We _may_ have metadata for
2730                // these packages, but it's non-deterministic, and omitting them ensures that
2731                // we represent the self of the resolver at the time of failure.
2732                continue;
2733            }
2734            let versions_response = if let Some(index) = fork_indexes.get(name) {
2735                self.index
2736                    .explicit()
2737                    .get(&(name.clone(), index.url().clone()))
2738            } else {
2739                self.index.implicit().get(name)
2740            };
2741            if let Some(response) = versions_response {
2742                if let VersionsResponse::Found(ref version_maps) = *response {
2743                    // Track the available versions, across all indexes.
2744                    for version_map in version_maps {
2745                        let package_versions = available_versions
2746                            .entry(name.clone())
2747                            .or_insert_with(BTreeSet::new);
2748
2749                        for (version, dists) in version_map.iter(&Ranges::full()) {
2750                            // Don't show versions removed by excluded-newer in hints.
2751                            if let Some(exclude_newer) = version_map.exclude_newer() {
2752                                let Some(prioritized_dist) = dists.prioritized_dist() else {
2753                                    continue;
2754                                };
2755                                if prioritized_dist.files().all(|file| {
2756                                    file.upload_time_utc_ms.is_none_or(|upload_time| {
2757                                        upload_time >= exclude_newer.timestamp_millis()
2758                                    })
2759                                }) {
2760                                    continue;
2761                                }
2762                            }
2763
2764                            package_versions.insert(version.clone());
2765                        }
2766                    }
2767
2768                    // Track the indexes in which the package is available.
2769                    available_indexes
2770                        .entry(name.clone())
2771                        .or_insert(BTreeSet::new())
2772                        .extend(
2773                            version_maps
2774                                .iter()
2775                                .filter_map(|version_map| version_map.index().cloned()),
2776                        );
2777                }
2778            }
2779        }
2780
2781        ResolveError::NoSolution(Box::new(NoSolutionError::new(
2782            err,
2783            self.index.clone(),
2784            available_versions,
2785            available_indexes,
2786            self.selector.clone(),
2787            self.python_requirement.clone(),
2788            self.locations.clone(),
2789            self.capabilities.clone(),
2790            unavailable_packages,
2791            incomplete_packages,
2792            fork_urls,
2793            fork_indexes,
2794            env,
2795            current_environment,
2796            self.tags.clone(),
2797            self.workspace_members.clone(),
2798            self.options.clone(),
2799        )))
2800    }
2801
2802    fn on_progress(&self, package: &PubGrubPackage, version: &Version) {
2803        if let Some(reporter) = self.reporter.as_ref() {
2804            match &**package {
2805                PubGrubPackageInner::Root(_) => {}
2806                PubGrubPackageInner::Python(_) => {}
2807                PubGrubPackageInner::System(_) => {}
2808                PubGrubPackageInner::Marker { .. } => {}
2809                PubGrubPackageInner::Extra { .. } => {}
2810                PubGrubPackageInner::Group { .. } => {}
2811                PubGrubPackageInner::Package { name, .. } => {
2812                    reporter.on_progress(name, &VersionOrUrlRef::Version(version));
2813                }
2814            }
2815        }
2816    }
2817
2818    fn on_complete(&self) {
2819        if let Some(reporter) = self.reporter.as_ref() {
2820            reporter.on_complete();
2821        }
2822    }
2823}
2824
2825/// State that is used during unit propagation in the resolver, one instance per fork.
2826#[derive(Clone)]
2827pub(crate) struct ForkState {
2828    /// The internal state used by the resolver.
2829    ///
2830    /// Note that not all parts of this state are strictly internal. For
2831    /// example, the edges in the dependency graph generated as part of the
2832    /// output of resolution are derived from the "incompatibilities" tracked
2833    /// in this state. We also ultimately retrieve the final set of version
2834    /// assignments (to packages) from this state's "partial solution."
2835    pubgrub: State<UvDependencyProvider>,
2836    /// The initial package to select. If set, the first iteration over this state will avoid
2837    /// asking PubGrub for the highest-priority package, and will instead use the provided package.
2838    initial_id: Option<Id<PubGrubPackage>>,
2839    /// The initial version to select. If set, the first iteration over this state will avoid
2840    /// asking PubGrub for the highest-priority version, and will instead use the provided version.
2841    initial_version: Option<Version>,
2842    /// The next package on which to run unit propagation.
2843    next: Id<PubGrubPackage>,
2844    /// The set of pinned versions we accrue throughout resolution.
2845    ///
2846    /// The key of this map is a package name, and each package name maps to
2847    /// a set of versions for that package. Each version in turn is mapped
2848    /// to the concrete distribution selected for installation, along with the
2849    /// concrete distribution whose metadata was used during resolution.
2850    /// After resolution is finished, this map is consulted to recover both the
2851    /// locked artifact and the metadata backing the resolved dependency edges.
2852    pins: FilePins,
2853    /// Ensure we don't have duplicate URLs in any branch.
2854    ///
2855    /// Unlike [`Urls`], we add only the URLs we have seen in this branch, and there can be only
2856    /// one URL per package. By prioritizing direct URL dependencies over registry dependencies,
2857    /// this map is populated for all direct URL packages before we look at any registry packages.
2858    fork_urls: ForkUrls,
2859    /// Ensure we don't have duplicate indexes in any branch.
2860    ///
2861    /// Unlike [`Indexes`], we add only the indexes we have seen in this branch, and there can be
2862    /// only one index per package.
2863    fork_indexes: ForkIndexes,
2864    /// When dependencies for a package are retrieved, this map of priorities
2865    /// is updated based on how each dependency was specified. Certain types
2866    /// of dependencies have more "priority" than others (like direct URL
2867    /// dependencies). These priorities help determine which package to
2868    /// consider next during resolution.
2869    priorities: PubGrubPriorities,
2870    /// This keeps track of the set of versions for each package that we've
2871    /// already visited during resolution. This avoids doing redundant work.
2872    added_dependencies: FxHashMap<Id<PubGrubPackage>, FxHashSet<Version>>,
2873    /// The marker expression that created this state.
2874    ///
2875    /// The root state always corresponds to a marker expression that is always
2876    /// `true` for every `MarkerEnvironment`.
2877    ///
2878    /// In non-universal mode, forking never occurs and so this marker
2879    /// expression is always `true`.
2880    ///
2881    /// Whenever dependencies are fetched, all requirement specifications
2882    /// are checked for disjointness with the marker expression of the fork
2883    /// in which those dependencies were fetched. If a requirement has a
2884    /// completely disjoint marker expression (i.e., it can never be true given
2885    /// that the marker expression that provoked the fork is true), then that
2886    /// dependency is completely ignored.
2887    env: ResolverEnvironment,
2888    /// The Python requirement for this fork. Defaults to the Python requirement for
2889    /// the resolution, but may be narrowed if a `python_version` marker is present
2890    /// in a given fork.
2891    ///
2892    /// For example, in:
2893    /// ```text
2894    /// numpy >=1.26 ; python_version >= "3.9"
2895    /// numpy <1.26 ; python_version < "3.9"
2896    /// ```
2897    ///
2898    /// The top fork has a narrower Python compatibility range, and thus can find a
2899    /// solution that omits Python 3.8 support.
2900    python_requirement: PythonRequirement,
2901    conflict_tracker: ConflictTracker,
2902    /// Prefetch package versions for packages with many rejected versions.
2903    ///
2904    /// Tracked on the fork state to avoid counting each identical version between forks as new try.
2905    prefetcher: BatchPrefetcher,
2906}
2907
2908impl ForkState {
2909    fn new(
2910        pubgrub: State<UvDependencyProvider>,
2911        env: ResolverEnvironment,
2912        python_requirement: PythonRequirement,
2913        prefetcher: BatchPrefetcher,
2914    ) -> Self {
2915        Self {
2916            initial_id: None,
2917            initial_version: None,
2918            next: pubgrub.root_package,
2919            pubgrub,
2920            pins: FilePins::default(),
2921            fork_urls: ForkUrls::default(),
2922            fork_indexes: ForkIndexes::default(),
2923            priorities: PubGrubPriorities::default(),
2924            added_dependencies: FxHashMap::default(),
2925            env,
2926            python_requirement,
2927            conflict_tracker: ConflictTracker::default(),
2928            prefetcher,
2929        }
2930    }
2931
2932    /// Visit the dependencies for the selected version of the current package, incorporating any
2933    /// relevant URLs and pinned indexes into the [`ForkState`].
2934    fn visit_package_version_dependencies(
2935        &mut self,
2936        for_package: Id<PubGrubPackage>,
2937        for_version: &Version,
2938        urls: &Urls,
2939        indexes: &Indexes,
2940        dependencies: &[PubGrubDependency],
2941        git: &GitResolver,
2942        workspace_members: &BTreeSet<PackageName>,
2943        resolution_strategy: &ResolutionStrategy,
2944    ) -> Result<(), ResolveError> {
2945        for dependency in dependencies {
2946            let PubGrubDependency {
2947                package,
2948                version,
2949                parent: _,
2950                source,
2951            } = dependency;
2952
2953            let mut has_url = false;
2954            if let Some(name) = package.name() {
2955                // From the [`Requirement`] to [`PubGrubDependency`] conversion, we get a URL if the
2956                // requirement was a URL requirement. `Urls` applies canonicalization to this and
2957                // override URLs to both URL and registry requirements, which we then check for
2958                // conflicts using [`ForkUrl`].
2959                for url in urls.get_url(&self.env, name, source.verbatim_url(), git)? {
2960                    self.fork_urls.insert(name, url, &self.env)?;
2961                    has_url = true;
2962                }
2963
2964                if let Some(index) = source.explicit_index() {
2965                    self.fork_indexes.insert(name, index, &self.env)?;
2966                }
2967
2968                // If the package is pinned to an exact index, add it to the fork.
2969                for index in indexes.get(name, &self.env) {
2970                    self.fork_indexes.insert(name, index, &self.env)?;
2971                }
2972            }
2973
2974            if let Some(name) = self.pubgrub.package_store[for_package]
2975                .name_no_root()
2976                .filter(|name| !workspace_members.contains(name))
2977            {
2978                debug!(
2979                    "Adding transitive dependency for {name}=={for_version}: {package}{version}"
2980                );
2981            } else {
2982                // A dependency from the root package or `requirements.txt`.
2983                debug!("Adding direct dependency: {package}{version}");
2984
2985                // Warn the user if a direct dependency lacks a lower bound in `--lowest` resolution.
2986                let missing_lower_bound = version
2987                    .bounding_range()
2988                    .map(|(lowest, _highest)| lowest == Bound::Unbounded)
2989                    .unwrap_or(true);
2990                let strategy_lowest = matches!(
2991                    resolution_strategy,
2992                    ResolutionStrategy::Lowest | ResolutionStrategy::LowestDirect(..)
2993                );
2994
2995                if !has_url && missing_lower_bound && strategy_lowest {
2996                    let name = package.name_no_root().unwrap();
2997                    // Handle cases where a package is listed both without and with a lower bound.
2998                    // Example:
2999                    // ```
3000                    // "coverage[toml] ; python_version < '3.11'",
3001                    // "coverage >= 7.10.0",
3002                    // ```
3003                    let bound_on_other_package = dependencies.iter().any(|other| {
3004                        Some(name) == other.package.name()
3005                            && !other
3006                                .version
3007                                .bounding_range()
3008                                .map(|(lowest, _highest)| lowest == Bound::Unbounded)
3009                                .unwrap_or(true)
3010                    });
3011
3012                    if !bound_on_other_package {
3013                        warn_user_once!(
3014                            "The direct dependency `{name}` is unpinned. \
3015                            Consider setting a lower bound when using `--resolution lowest` \
3016                            or `--resolution lowest-direct` to avoid using outdated versions.",
3017                        );
3018                    }
3019                }
3020            }
3021
3022            // Update the package priorities.
3023            self.priorities.insert(package, version, &self.fork_urls);
3024            // As we're adding an incompatibility from the proxy package to the base package,
3025            // we need to register the base package.
3026            if let Some(base_package) = package.base_package() {
3027                self.priorities
3028                    .insert(&base_package, version, &self.fork_urls);
3029            }
3030        }
3031
3032        Ok(())
3033    }
3034
3035    /// Add the dependencies for the selected version of the current package.
3036    fn add_package_version_dependencies(
3037        &mut self,
3038        for_package: Id<PubGrubPackage>,
3039        for_version: &Version,
3040        dependencies: Vec<PubGrubDependency>,
3041    ) {
3042        for dependency in &dependencies {
3043            let PubGrubDependency {
3044                package,
3045                version,
3046                parent: _,
3047                source: _,
3048            } = dependency;
3049
3050            let Some(base_package) = package.base_package() else {
3051                continue;
3052            };
3053
3054            let proxy_package = self.pubgrub.package_store.alloc(package.clone());
3055            let base_package_id = self.pubgrub.package_store.alloc(base_package.clone());
3056            self.pubgrub
3057                .add_proxy_package(proxy_package, base_package_id, version.clone());
3058        }
3059
3060        let conflict = self.pubgrub.add_package_version_dependencies(
3061            self.next,
3062            for_version.clone(),
3063            dependencies.into_iter().map(|dependency| {
3064                let PubGrubDependency {
3065                    package,
3066                    version,
3067                    parent: _,
3068                    source: _,
3069                } = dependency;
3070                (package, version)
3071            }),
3072        );
3073
3074        // Conflict tracking: If the version was rejected due to its dependencies, record culprit
3075        // and affected.
3076        if let Some(incompatibility) = conflict {
3077            self.record_conflict(for_package, Some(for_version), incompatibility);
3078        }
3079    }
3080
3081    fn record_conflict(
3082        &mut self,
3083        affected: Id<PubGrubPackage>,
3084        version: Option<&Version>,
3085        incompatibility: IncompId<PubGrubPackage, Ranges<Version>, UnavailableReason>,
3086    ) {
3087        let mut culprit_is_real = false;
3088        for (incompatible, _term) in self.pubgrub.incompatibility_store[incompatibility].iter() {
3089            if incompatible == affected {
3090                continue;
3091            }
3092            if self.pubgrub.package_store[affected].name()
3093                == self.pubgrub.package_store[incompatible].name()
3094            {
3095                // Don't track conflicts between a marker package and the main package, when the
3096                // marker is "copying" the obligations from the main package through conflicts.
3097                continue;
3098            }
3099            culprit_is_real = true;
3100            let culprit_count = self
3101                .conflict_tracker
3102                .culprit
3103                .entry(incompatible)
3104                .or_default();
3105            *culprit_count += 1;
3106            if *culprit_count == CONFLICT_THRESHOLD {
3107                self.conflict_tracker.deprioritize.push(incompatible);
3108            }
3109        }
3110        // Don't track conflicts between a marker package and the main package, when the
3111        // marker is "copying" the obligations from the main package through conflicts.
3112        if culprit_is_real {
3113            if tracing::enabled!(Level::DEBUG) {
3114                let incompatibility = self.pubgrub.incompatibility_store[incompatibility]
3115                    .iter()
3116                    .map(|(package, _term)| &self.pubgrub.package_store[package])
3117                    .join(", ");
3118                if let Some(version) = version {
3119                    debug!(
3120                        "Recording dependency conflict of {}=={} from incompatibility of ({})",
3121                        self.pubgrub.package_store[affected], version, incompatibility
3122                    );
3123                } else {
3124                    debug!(
3125                        "Recording unit propagation conflict of {} from incompatibility of ({})",
3126                        self.pubgrub.package_store[affected], incompatibility
3127                    );
3128                }
3129            }
3130
3131            let affected_count = self.conflict_tracker.affected.entry(self.next).or_default();
3132            *affected_count += 1;
3133            if *affected_count == CONFLICT_THRESHOLD {
3134                self.conflict_tracker.prioritize.push(self.next);
3135            }
3136        }
3137    }
3138
3139    fn add_unavailable_version(&mut self, version: Version, reason: UnavailableVersion) {
3140        // Incompatible requires-python versions are special in that we track
3141        // them as incompatible dependencies instead of marking the package version
3142        // as unavailable directly.
3143        if let UnavailableVersion::IncompatibleDist(
3144            IncompatibleDist::Source(IncompatibleSource::RequiresPython(requires_python, kind))
3145            | IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(requires_python, kind)),
3146        ) = reason
3147        {
3148            let package = &self.next;
3149            let python = self.pubgrub.package_store.alloc(PubGrubPackage::from(
3150                PubGrubPackageInner::Python(match kind {
3151                    PythonRequirementKind::Installed => PubGrubPython::Installed,
3152                    PythonRequirementKind::Target => PubGrubPython::Target,
3153                }),
3154            ));
3155            self.pubgrub
3156                .add_incompatibility(Incompatibility::from_dependency(
3157                    *package,
3158                    Range::singleton(version.clone()),
3159                    (python, release_specifiers_to_ranges(requires_python)),
3160                ));
3161            self.pubgrub
3162                .partial_solution
3163                .add_decision(self.next, version);
3164            return;
3165        }
3166        self.pubgrub
3167            .add_incompatibility(Incompatibility::custom_version(
3168                self.next,
3169                version.clone(),
3170                UnavailableReason::Version(reason),
3171            ));
3172    }
3173
3174    /// Subset the current markers with the new markers and update the python requirements fields
3175    /// accordingly.
3176    ///
3177    /// If the fork should be dropped (e.g., because its markers can never be true for its
3178    /// Python requirement), then this returns `None`.
3179    fn with_env(mut self, env: ResolverEnvironment) -> Self {
3180        self.env = env;
3181        // If the fork contains a narrowed Python requirement, apply it.
3182        if let Some(req) = self.env.narrow_python_requirement(&self.python_requirement) {
3183            debug!("Narrowed `requires-python` bound to: {}", req.target());
3184            self.python_requirement = req;
3185        }
3186        self
3187    }
3188
3189    /// Returns the URL or index for a package and version.
3190    ///
3191    /// In practice, exactly one of the returned values will be `Some`.
3192    fn source(
3193        &self,
3194        name: &PackageName,
3195        version: &Version,
3196    ) -> (Option<&VerbatimParsedUrl>, Option<&IndexUrl>) {
3197        let url = self.fork_urls.get(name);
3198        let index = url
3199            .is_none()
3200            .then(|| {
3201                self.pins
3202                    .get(name, version)
3203                    .expect("Every package should be pinned")
3204                    .index()
3205            })
3206            .flatten();
3207        (url, index)
3208    }
3209
3210    fn into_resolution(self) -> Resolution {
3211        let solution: FxHashMap<_, _> = self.pubgrub.partial_solution.extract_solution().collect();
3212        let edge_count: usize = solution
3213            .keys()
3214            .map(|package| self.pubgrub.incompatibilities[package].len())
3215            .sum();
3216        let mut edges: Vec<ResolutionDependencyEdge> = Vec::with_capacity(edge_count);
3217        for (package, self_version) in &solution {
3218            for id in &self.pubgrub.incompatibilities[package] {
3219                let pubgrub::Kind::FromDependencyOf(
3220                    self_package,
3221                    ref self_range,
3222                    dependency_package,
3223                    ref dependency_range,
3224                ) = self.pubgrub.incompatibility_store[*id].kind
3225                else {
3226                    continue;
3227                };
3228                if *package != self_package {
3229                    continue;
3230                }
3231                if !self_range.contains(self_version) {
3232                    continue;
3233                }
3234                let Some(dependency_version) = solution.get(&dependency_package) else {
3235                    continue;
3236                };
3237                if !dependency_range.contains(dependency_version) {
3238                    continue;
3239                }
3240
3241                let self_package = &self.pubgrub.package_store[self_package];
3242                let dependency_package = &self.pubgrub.package_store[dependency_package];
3243
3244                let (self_name, self_extra, self_group) = match &**self_package {
3245                    PubGrubPackageInner::Package {
3246                        name: self_name,
3247                        extra: self_extra,
3248                        group: self_group,
3249                        marker: _,
3250                    } => (Some(self_name), self_extra.as_ref(), self_group.as_ref()),
3251
3252                    PubGrubPackageInner::Root(_) => (None, None, None),
3253
3254                    _ => continue,
3255                };
3256
3257                let (self_url, self_index) = self_name
3258                    .map(|self_name| self.source(self_name, self_version))
3259                    .unwrap_or((None, None));
3260
3261                match **dependency_package {
3262                    PubGrubPackageInner::Package {
3263                        name: ref dependency_name,
3264                        extra: ref dependency_extra,
3265                        group: ref dependency_dev,
3266                        marker: ref dependency_marker,
3267                    } => {
3268                        debug_assert!(
3269                            dependency_extra.is_none(),
3270                            "Packages should depend on an extra proxy"
3271                        );
3272                        debug_assert!(
3273                            dependency_dev.is_none(),
3274                            "Packages should depend on a group proxy"
3275                        );
3276
3277                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3278                        // but allow groups to depend on other groups, or on the package itself.
3279                        if self_group.is_none() {
3280                            if self_name == Some(dependency_name) {
3281                                continue;
3282                            }
3283                        }
3284
3285                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3286
3287                        let edge = ResolutionDependencyEdge {
3288                            from: self_name.cloned(),
3289                            from_version: self_version.clone(),
3290                            from_url: self_url.cloned(),
3291                            from_index: self_index.cloned(),
3292                            from_extra: self_extra.cloned(),
3293                            from_group: self_group.cloned(),
3294                            to: dependency_name.clone(),
3295                            to_version: dependency_version.clone(),
3296                            to_url: to_url.cloned(),
3297                            to_index: to_index.cloned(),
3298                            to_extra: dependency_extra.clone(),
3299                            to_group: dependency_dev.clone(),
3300                            marker: *dependency_marker,
3301                        };
3302                        edges.push(edge);
3303                    }
3304
3305                    PubGrubPackageInner::Marker {
3306                        name: ref dependency_name,
3307                        marker: ref dependency_marker,
3308                    } => {
3309                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3310                        // but allow groups to depend on other groups, or on the package itself.
3311                        if self_group.is_none() {
3312                            if self_name == Some(dependency_name) {
3313                                continue;
3314                            }
3315                        }
3316
3317                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3318
3319                        let edge = ResolutionDependencyEdge {
3320                            from: self_name.cloned(),
3321                            from_version: self_version.clone(),
3322                            from_url: self_url.cloned(),
3323                            from_index: self_index.cloned(),
3324                            from_extra: self_extra.cloned(),
3325                            from_group: self_group.cloned(),
3326                            to: dependency_name.clone(),
3327                            to_version: dependency_version.clone(),
3328                            to_url: to_url.cloned(),
3329                            to_index: to_index.cloned(),
3330                            to_extra: None,
3331                            to_group: None,
3332                            marker: *dependency_marker,
3333                        };
3334                        edges.push(edge);
3335                    }
3336
3337                    PubGrubPackageInner::Extra {
3338                        name: ref dependency_name,
3339                        extra: ref dependency_extra,
3340                        marker: ref dependency_marker,
3341                    } => {
3342                        if self_group.is_none() {
3343                            debug_assert!(
3344                                self_name != Some(dependency_name),
3345                                "Extras should be flattened"
3346                            );
3347                        }
3348                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3349
3350                        // Insert an edge from the dependent package to the extra package.
3351                        let edge = ResolutionDependencyEdge {
3352                            from: self_name.cloned(),
3353                            from_version: self_version.clone(),
3354                            from_url: self_url.cloned(),
3355                            from_index: self_index.cloned(),
3356                            from_extra: self_extra.cloned(),
3357                            from_group: self_group.cloned(),
3358                            to: dependency_name.clone(),
3359                            to_version: dependency_version.clone(),
3360                            to_url: to_url.cloned(),
3361                            to_index: to_index.cloned(),
3362                            to_extra: Some(dependency_extra.clone()),
3363                            to_group: None,
3364                            marker: *dependency_marker,
3365                        };
3366                        edges.push(edge);
3367
3368                        // Insert an edge from the dependent package to the base package.
3369                        let edge = ResolutionDependencyEdge {
3370                            from: self_name.cloned(),
3371                            from_version: self_version.clone(),
3372                            from_url: self_url.cloned(),
3373                            from_index: self_index.cloned(),
3374                            from_extra: self_extra.cloned(),
3375                            from_group: self_group.cloned(),
3376                            to: dependency_name.clone(),
3377                            to_version: dependency_version.clone(),
3378                            to_url: to_url.cloned(),
3379                            to_index: to_index.cloned(),
3380                            to_extra: None,
3381                            to_group: None,
3382                            marker: *dependency_marker,
3383                        };
3384                        edges.push(edge);
3385                    }
3386
3387                    PubGrubPackageInner::Group {
3388                        name: ref dependency_name,
3389                        group: ref dependency_group,
3390                        marker: ref dependency_marker,
3391                    } => {
3392                        debug_assert!(
3393                            self_name != Some(dependency_name),
3394                            "Groups should be flattened"
3395                        );
3396
3397                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3398
3399                        // Add an edge from the dependent package to the dev package, but _not_ the
3400                        // base package.
3401                        let edge = ResolutionDependencyEdge {
3402                            from: self_name.cloned(),
3403                            from_version: self_version.clone(),
3404                            from_url: self_url.cloned(),
3405                            from_index: self_index.cloned(),
3406                            from_extra: self_extra.cloned(),
3407                            from_group: self_group.cloned(),
3408                            to: dependency_name.clone(),
3409                            to_version: dependency_version.clone(),
3410                            to_url: to_url.cloned(),
3411                            to_index: to_index.cloned(),
3412                            to_extra: None,
3413                            to_group: Some(dependency_group.clone()),
3414                            marker: *dependency_marker,
3415                        };
3416                        edges.push(edge);
3417                    }
3418
3419                    _ => {}
3420                }
3421            }
3422        }
3423
3424        let nodes = solution
3425            .into_iter()
3426            .filter_map(|(package, version)| {
3427                if let PubGrubPackageInner::Package {
3428                    name,
3429                    extra,
3430                    group,
3431                    marker: MarkerTree::TRUE,
3432                } = &*self.pubgrub.package_store[package]
3433                {
3434                    let (url, index) = self.source(name, &version);
3435                    Some((
3436                        ResolutionPackage {
3437                            name: name.clone(),
3438                            extra: extra.clone(),
3439                            dev: group.clone(),
3440                            url: url.cloned(),
3441                            index: index.cloned(),
3442                        },
3443                        version,
3444                    ))
3445                } else {
3446                    None
3447                }
3448            })
3449            .collect();
3450
3451        Resolution {
3452            nodes,
3453            edges,
3454            pins: self.pins,
3455            env: self.env,
3456        }
3457    }
3458}
3459
3460/// The resolution from a single fork including the virtual packages and the edges between them.
3461#[derive(Debug)]
3462pub(crate) struct Resolution {
3463    pub(crate) nodes: FxHashMap<ResolutionPackage, Version>,
3464    /// The directed connections between the nodes, where the marker is the node weight. We don't
3465    /// store the requirement itself, but it can be retrieved from the package metadata.
3466    pub(crate) edges: Vec<ResolutionDependencyEdge>,
3467    /// Map each package name, version tuple from `packages` to a distribution.
3468    pub(crate) pins: FilePins,
3469    /// The environment setting this resolution was found under.
3470    pub(crate) env: ResolverEnvironment,
3471}
3472
3473/// Package representation we used during resolution where each extra and also the dev-dependencies
3474/// group are their own package.
3475#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3476pub(crate) struct ResolutionPackage {
3477    pub(crate) name: PackageName,
3478    pub(crate) extra: Option<ExtraName>,
3479    pub(crate) dev: Option<GroupName>,
3480    /// For registry packages, this is `None`; otherwise, the direct URL of the distribution.
3481    pub(crate) url: Option<VerbatimParsedUrl>,
3482    /// For URL packages, this is `None`; otherwise, the index URL of the distribution.
3483    pub(crate) index: Option<IndexUrl>,
3484}
3485
3486/// The `from_` fields and the `to_` fields allow mapping to the originating and target
3487///  [`ResolutionPackage`] respectively. The `marker` is the edge weight.
3488#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3489pub(crate) struct ResolutionDependencyEdge {
3490    /// This value is `None` if the dependency comes from the root package.
3491    pub(crate) from: Option<PackageName>,
3492    pub(crate) from_version: Version,
3493    pub(crate) from_url: Option<VerbatimParsedUrl>,
3494    pub(crate) from_index: Option<IndexUrl>,
3495    pub(crate) from_extra: Option<ExtraName>,
3496    pub(crate) from_group: Option<GroupName>,
3497    pub(crate) to: PackageName,
3498    pub(crate) to_version: Version,
3499    pub(crate) to_url: Option<VerbatimParsedUrl>,
3500    pub(crate) to_index: Option<IndexUrl>,
3501    pub(crate) to_extra: Option<ExtraName>,
3502    pub(crate) to_group: Option<GroupName>,
3503    pub(crate) marker: MarkerTree,
3504}
3505
3506impl ResolutionDependencyEdge {
3507    pub(crate) fn universal_marker(&self) -> UniversalMarker {
3508        // We specifically do not account for conflict
3509        // markers here. Instead, those are computed via
3510        // a traversal on the resolution graph.
3511        UniversalMarker::new(self.marker, ConflictMarker::TRUE)
3512    }
3513}
3514
3515/// Fetch the metadata for an item
3516#[derive(Debug)]
3517#[expect(clippy::large_enum_variant)]
3518pub(crate) enum Request {
3519    /// A request to fetch the metadata for a package.
3520    Package(PackageName, Option<IndexMetadata>),
3521    /// A request to fetch the metadata for a built or source distribution.
3522    Dist(Dist),
3523    /// A request to fetch the metadata from an already-installed distribution.
3524    Installed(InstalledDist),
3525    /// A request to pre-fetch the metadata for a package and the best-guess distribution.
3526    Prefetch(PackageName, Range<Version>, PythonRequirement),
3527}
3528
3529impl<'a> From<ResolvedDistRef<'a>> for Request {
3530    fn from(dist: ResolvedDistRef<'a>) -> Self {
3531        // N.B. This is almost identical to `ResolvedDistRef::to_owned`, but
3532        // creates a `Request` instead of a `ResolvedDist`. There's probably
3533        // some room for DRYing this up a bit. The obvious way would be to
3534        // add a method to create a `Dist`, but a `Dist` cannot be represented
3535        // as an installed dist.
3536        match dist {
3537            ResolvedDistRef::InstallableRegistrySourceDist { sdist, prioritized } => {
3538                // This is okay because we're only here if the prioritized dist
3539                // has an sdist, so this always succeeds.
3540                let source = prioritized.source_dist().expect("a source distribution");
3541                assert_eq!(
3542                    (&sdist.name, &sdist.version),
3543                    (&source.name, &source.version),
3544                    "expected chosen sdist to match prioritized sdist"
3545                );
3546                Self::Dist(Dist::Source(SourceDist::Registry(source)))
3547            }
3548            ResolvedDistRef::InstallableRegistryBuiltDist {
3549                wheel, prioritized, ..
3550            } => {
3551                assert_eq!(
3552                    Some(&wheel.filename),
3553                    prioritized.best_wheel().map(|(wheel, _)| &wheel.filename),
3554                    "expected chosen wheel to match best wheel"
3555                );
3556                // This is okay because we're only here if the prioritized dist
3557                // has at least one wheel, so this always succeeds.
3558                let built = prioritized.built_dist().expect("at least one wheel");
3559                Self::Dist(Dist::Built(BuiltDist::Registry(built)))
3560            }
3561            ResolvedDistRef::Installed { dist } => Self::Installed(dist.clone()),
3562        }
3563    }
3564}
3565
3566impl Display for Request {
3567    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
3568        match self {
3569            Self::Package(package_name, _) => {
3570                write!(f, "Versions {package_name}")
3571            }
3572            Self::Dist(dist) => {
3573                write!(f, "Metadata {dist}")
3574            }
3575            Self::Installed(dist) => {
3576                write!(f, "Installed metadata {dist}")
3577            }
3578            Self::Prefetch(package_name, range, _) => {
3579                write!(f, "Prefetch {package_name} {range}")
3580            }
3581        }
3582    }
3583}
3584
3585#[derive(Debug)]
3586#[expect(clippy::large_enum_variant)]
3587enum Response {
3588    /// The returned metadata for a package hosted on a registry.
3589    Package(PackageName, Option<IndexUrl>, VersionsResponse),
3590    /// The returned metadata for a distribution.
3591    Dist {
3592        dist: Dist,
3593        metadata: MetadataResponse,
3594    },
3595    /// The returned metadata for an already-installed distribution.
3596    Installed {
3597        dist: InstalledDist,
3598        metadata: MetadataResponse,
3599    },
3600}
3601
3602/// Information about the dependencies for a particular package.
3603///
3604/// This effectively distills the dependency metadata of a package down into
3605/// its pubgrub specific constituent parts: each dependency package has a range
3606/// of possible versions.
3607enum Dependencies {
3608    /// Package dependencies are not available.
3609    Unavailable(UnavailableVersion),
3610    /// Container for all available package versions.
3611    ///
3612    /// Note that in universal mode, it is possible and allowed for multiple
3613    /// `PubGrubPackage` values in this list to have the same package name.
3614    /// These conflicts are resolved via `Dependencies::fork`.
3615    Available(Vec<PubGrubDependency>),
3616    /// Dependencies that should never result in a fork.
3617    ///
3618    /// For example, the dependencies of a `Marker` package will have the
3619    /// same name and version, but differ according to marker expressions.
3620    /// But we never want this to result in a fork.
3621    Unforkable(Vec<PubGrubDependency>),
3622}
3623
3624impl Dependencies {
3625    /// Turn this flat list of dependencies into a potential set of forked
3626    /// groups of dependencies.
3627    ///
3628    /// A fork *only* occurs when there are multiple dependencies with the same
3629    /// name *and* those dependency specifications have corresponding marker
3630    /// expressions that are completely disjoint with one another.
3631    fn fork(
3632        self,
3633        env: &ResolverEnvironment,
3634        python_requirement: &PythonRequirement,
3635        conflicts: &Conflicts,
3636    ) -> ForkedDependencies {
3637        let deps = match self {
3638            Self::Available(deps) => deps,
3639            Self::Unforkable(deps) => return ForkedDependencies::Unforked(deps),
3640            Self::Unavailable(err) => return ForkedDependencies::Unavailable(err),
3641        };
3642        let mut name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>> = BTreeMap::new();
3643        for dep in deps {
3644            let name = dep
3645                .package
3646                .name()
3647                .expect("dependency always has a name")
3648                .clone();
3649            name_to_deps.entry(name).or_default().push(dep);
3650        }
3651        let Forks {
3652            mut forks,
3653            diverging_packages,
3654        } = Forks::new(name_to_deps, env, python_requirement, conflicts);
3655        if forks.is_empty() {
3656            ForkedDependencies::Unforked(vec![])
3657        } else if forks.len() == 1 {
3658            ForkedDependencies::Unforked(forks.pop().unwrap().dependencies)
3659        } else {
3660            ForkedDependencies::Forked {
3661                forks,
3662                diverging_packages: diverging_packages.into_iter().collect(),
3663            }
3664        }
3665    }
3666}
3667
3668/// Information about the (possibly forked) dependencies for a particular
3669/// package.
3670///
3671/// This is like `Dependencies` but with an extra variant that only occurs when
3672/// a `Dependencies` list has multiple dependency specifications with the same
3673/// name and non-overlapping marker expressions (i.e., a fork occurs).
3674#[derive(Debug)]
3675enum ForkedDependencies {
3676    /// Package dependencies are not available.
3677    Unavailable(UnavailableVersion),
3678    /// No forking occurred.
3679    ///
3680    /// This is the same as `Dependencies::Available`.
3681    Unforked(Vec<PubGrubDependency>),
3682    /// Forked containers for all available package versions.
3683    ///
3684    /// Note that there is always at least two forks. If there would
3685    /// be fewer than 2 forks, then there is no fork at all and the
3686    /// `Unforked` variant is used instead.
3687    Forked {
3688        forks: Vec<Fork>,
3689        /// The package(s) with different requirements for disjoint markers.
3690        diverging_packages: Vec<PackageName>,
3691    },
3692}
3693
3694/// A list of forks determined from the dependencies of a single package.
3695///
3696/// Any time a marker expression is seen that is not true for all possible
3697/// marker environments, it is possible for it to introduce a new fork.
3698#[derive(Debug, Default)]
3699struct Forks {
3700    /// The forks discovered among the dependencies.
3701    forks: Vec<Fork>,
3702    /// The package(s) that provoked at least one additional fork.
3703    diverging_packages: BTreeSet<PackageName>,
3704}
3705
3706impl Forks {
3707    fn new(
3708        name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>>,
3709        env: &ResolverEnvironment,
3710        python_requirement: &PythonRequirement,
3711        conflicts: &Conflicts,
3712    ) -> Self {
3713        let python_marker = python_requirement.to_marker_tree();
3714
3715        let mut forks = vec![Fork::new(env.clone())];
3716        let mut diverging_packages = BTreeSet::new();
3717        for (name, mut deps) in name_to_deps {
3718            assert!(!deps.is_empty(), "every name has at least one dependency");
3719            // We never fork if there's only one dependency
3720            // specification for a given package name. This particular
3721            // strategy results in a "conservative" approach to forking
3722            // that gives up correctness in some cases in exchange for
3723            // more limited forking. More limited forking results in
3724            // simpler-and-easier-to-understand lock files and faster
3725            // resolving. The correctness we give up manifests when
3726            // two transitive non-sibling dependencies conflict. In
3727            // that case, we don't detect the fork ahead of time (at
3728            // present).
3729            if let [dep] = deps.as_slice() {
3730                // There's one exception: if the requirement increases the minimum-supported Python
3731                // version, we also fork in order to respect that minimum in the subsequent
3732                // resolution.
3733                //
3734                // For example, given `requires-python = ">=3.7"` and `uv ; python_version >= "3.8"`,
3735                // where uv itself only supports Python 3.8 and later, we need to fork to ensure
3736                // that the resolution can find a solution.
3737                if marker::requires_python(dep.package.marker())
3738                    .is_none_or(|bound| !python_requirement.raises(&bound))
3739                {
3740                    let dep = deps.pop().unwrap();
3741                    let marker = dep.package.marker();
3742                    for fork in &mut forks {
3743                        if fork.env.included_by_marker(marker) {
3744                            fork.add_dependency(dep.clone());
3745                        }
3746                    }
3747                    continue;
3748                }
3749            } else {
3750                // If all dependencies have the same markers, we should also avoid forking.
3751                if let Some(dep) = deps.first() {
3752                    let marker = dep.package.marker();
3753                    if deps.iter().all(|dep| marker == dep.package.marker()) {
3754                        // Unless that "same marker" is a Python requirement that is stricter than
3755                        // the current Python requirement. In that case, we need to fork to respect
3756                        // the stricter requirement.
3757                        if marker::requires_python(marker)
3758                            .is_none_or(|bound| !python_requirement.raises(&bound))
3759                        {
3760                            for dep in deps {
3761                                for fork in &mut forks {
3762                                    if fork.env.included_by_marker(marker) {
3763                                        fork.add_dependency(dep.clone());
3764                                    }
3765                                }
3766                            }
3767                            continue;
3768                        }
3769                    }
3770                }
3771            }
3772            for dep in deps {
3773                let mut forker = match ForkingPossibility::new(env, &dep) {
3774                    ForkingPossibility::Possible(forker) => forker,
3775                    ForkingPossibility::DependencyAlwaysExcluded => {
3776                        // If the markers can never be satisfied by the parent
3777                        // fork, then we can drop this dependency unceremoniously.
3778                        continue;
3779                    }
3780                    ForkingPossibility::NoForkingPossible => {
3781                        // Or, if the markers are always true, then we just
3782                        // add the dependency to every fork unconditionally.
3783                        for fork in &mut forks {
3784                            fork.add_dependency(dep.clone());
3785                        }
3786                        continue;
3787                    }
3788                };
3789                // Otherwise, we *should* need to add a new fork...
3790                diverging_packages.insert(name.clone());
3791
3792                let mut new = vec![];
3793                for fork in std::mem::take(&mut forks) {
3794                    let Some((remaining_forker, envs)) = forker.fork(&fork.env) else {
3795                        new.push(fork);
3796                        continue;
3797                    };
3798                    forker = remaining_forker;
3799
3800                    for fork_env in envs {
3801                        let mut new_fork = fork.clone();
3802                        new_fork.set_env(fork_env);
3803                        // We only add the dependency to this fork if it
3804                        // satisfies the fork's markers. Some forks are
3805                        // specifically created to exclude this dependency,
3806                        // so this isn't always true!
3807                        if forker.included(&new_fork.env) {
3808                            new_fork.add_dependency(dep.clone());
3809                        }
3810                        // Filter out any forks we created that are disjoint with our
3811                        // Python requirement.
3812                        if new_fork.env.included_by_marker(python_marker) {
3813                            new.push(new_fork);
3814                        }
3815                    }
3816                }
3817                forks = new;
3818            }
3819        }
3820        // When there is a conflicting group configuration, we need
3821        // to potentially add more forks. Each fork added contains an
3822        // exclusion list of conflicting groups where dependencies with
3823        // the corresponding package and extra name are forcefully
3824        // excluded from that group.
3825        //
3826        // We specifically iterate on conflicting groups and
3827        // potentially re-generate all forks for each one. We do it
3828        // this way in case there are multiple sets of conflicting
3829        // groups that impact the forks here.
3830        //
3831        // For example, if we have conflicting groups {x1, x2} and {x3,
3832        // x4}, we need to make sure the forks generated from one set
3833        // also account for the other set.
3834        for set in conflicts.iter() {
3835            let mut new = vec![];
3836            for fork in std::mem::take(&mut forks) {
3837                // Check if this conflict set is relevant to this fork. We need two conditions:
3838                //
3839                // 1. At least one item has dependencies in this fork (otherwise there's nothing to
3840                //    fork on).
3841                // 2. At least two items are not already excluded in this fork's environment
3842                //    (otherwise the conflict constraint is already satisfied and no fork is
3843                //    needed).
3844                let mut has_conflicting_dependency = false;
3845                for item in set.iter() {
3846                    if fork.contains_conflicting_item(item.as_ref()) {
3847                        has_conflicting_dependency = true;
3848                        diverging_packages.insert(item.package().clone());
3849                        break;
3850                    }
3851                }
3852                if !has_conflicting_dependency {
3853                    new.push(fork);
3854                    continue;
3855                }
3856
3857                // If fewer than two items in this conflict set are still possible (not already
3858                // excluded) in this fork, the conflict constraint is already satisfied by prior
3859                // forking. We can skip the full N+1 fork split if the single remaining non-excluded
3860                // item doesn't appear in any other conflict set (since it would never need its own
3861                // "excluded" variant).
3862                let non_excluded: Vec<_> = set
3863                    .iter()
3864                    .filter(|item| fork.env.included_by_group(item.as_ref()))
3865                    .collect();
3866                if non_excluded.len() < 2 {
3867                    // Check if any non-excluded item still has a live conflict in another set —
3868                    // i.e., another set where this item AND at least one other non-excluded item
3869                    // both appear. If so, we still need to fork to create the "excluded" variant
3870                    // for that item.
3871                    let dominated = non_excluded.iter().all(|item| {
3872                        !conflicts.iter().any(|other_set| {
3873                            !std::ptr::eq(set, other_set)
3874                                && other_set.contains(item.package(), item.kind().as_ref())
3875                                && other_set
3876                                    .iter()
3877                                    .filter(|other_item| {
3878                                        other_item.package() != item.package()
3879                                            || other_item.kind() != item.kind()
3880                                    })
3881                                    .any(|other_item| {
3882                                        fork.env.included_by_group(other_item.as_ref())
3883                                    })
3884                        })
3885                    });
3886                    if dominated {
3887                        // When dependencies are added to forks, we check `included_by_marker` but
3888                        // not on whether the dependency's conflict item is included by the fork's
3889                        // environment so there may be extraneous dependencies and we need to filter
3890                        // the fork to clean up dependencies gated on already-excluded extras.
3891                        let rules: Vec<_> = set
3892                            .iter()
3893                            .filter(|item| !fork.env.included_by_group(item.as_ref()))
3894                            .cloned()
3895                            .map(Err)
3896                            .collect();
3897                        if let Some(filtered) = fork.filter(rules) {
3898                            new.push(filtered);
3899                        }
3900                        continue;
3901                    }
3902                }
3903
3904                // Create a fork that excludes ALL conflicts.
3905                if let Some(fork_none) = fork.clone().filter(set.iter().cloned().map(Err)) {
3906                    new.push(fork_none);
3907                }
3908
3909                // Now create a fork for each conflicting group, where
3910                // that fork excludes every *other* conflicting group.
3911                //
3912                // So if we have conflicting extras foo, bar and baz,
3913                // then this creates three forks: one that excludes
3914                // {foo, bar}, one that excludes {foo, baz} and one
3915                // that excludes {bar, baz}.
3916                for (i, _) in set.iter().enumerate() {
3917                    let fork_allows_group = fork.clone().filter(
3918                        set.iter()
3919                            .cloned()
3920                            .enumerate()
3921                            .map(|(j, group)| if i == j { Ok(group) } else { Err(group) }),
3922                    );
3923                    if let Some(fork_allows_group) = fork_allows_group {
3924                        new.push(fork_allows_group);
3925                    }
3926                }
3927            }
3928            forks = new;
3929        }
3930        Self {
3931            forks,
3932            diverging_packages,
3933        }
3934    }
3935}
3936
3937/// A single fork in a list of dependencies.
3938///
3939/// A fork corresponds to the full list of dependencies for a package,
3940/// but with any conflicting dependency specifications omitted. For
3941/// example, if we have `a<2 ; sys_platform == 'foo'` and `a>=2 ;
3942/// sys_platform == 'bar'`, then because the dependency specifications
3943/// have the same name and because the marker expressions are disjoint,
3944/// a fork occurs. One fork will contain `a<2` but not `a>=2`, while
3945/// the other fork will contain `a>=2` but not `a<2`.
3946#[derive(Clone, Debug)]
3947struct Fork {
3948    /// The list of dependencies for this fork, guaranteed to be conflict
3949    /// free. (i.e., There are no two packages with the same name with
3950    /// non-overlapping marker expressions.)
3951    ///
3952    /// Note that callers shouldn't mutate this sequence directly. Instead,
3953    /// they should use `add_forked_package` or `add_nonfork_package`. Namely,
3954    /// it should be impossible for a package with a marker expression that is
3955    /// disjoint from the marker expression on this fork to be added.
3956    dependencies: Vec<PubGrubDependency>,
3957    /// The conflicting groups in this fork.
3958    ///
3959    /// This exists to make some access patterns more efficient. Namely,
3960    /// it makes it easy to check whether there's a dependency with a
3961    /// particular conflicting group in this fork.
3962    conflicts: crate::FxHashbrownSet<ConflictItem>,
3963    /// The resolver environment for this fork.
3964    ///
3965    /// Principally, this corresponds to the markers in this for. So in the
3966    /// example above, the `a<2` fork would have `sys_platform == 'foo'`, while
3967    /// the `a>=2` fork would have `sys_platform == 'bar'`.
3968    ///
3969    /// If this fork was generated from another fork, then this *includes*
3970    /// the criteria from its parent. i.e., Its marker expression represents
3971    /// the intersection of the marker expression from its parent and any
3972    /// additional marker expression generated by addition forking based on
3973    /// conflicting dependency specifications.
3974    env: ResolverEnvironment,
3975}
3976
3977impl Fork {
3978    /// Create a new fork with no dependencies with the given resolver
3979    /// environment.
3980    fn new(env: ResolverEnvironment) -> Self {
3981        Self {
3982            dependencies: vec![],
3983            conflicts: crate::FxHashbrownSet::default(),
3984            env,
3985        }
3986    }
3987
3988    /// Add a dependency to this fork.
3989    fn add_dependency(&mut self, dep: PubGrubDependency) {
3990        if let Some(conflicting_item) = dep.conflicting_item() {
3991            self.conflicts.insert(conflicting_item.to_owned());
3992        }
3993        self.dependencies.push(dep);
3994    }
3995
3996    /// Sets the resolver environment to the one given.
3997    ///
3998    /// Any dependency in this fork that does not satisfy the given environment
3999    /// is removed.
4000    fn set_env(&mut self, env: ResolverEnvironment) {
4001        self.env = env;
4002        self.dependencies.retain(|dep| {
4003            let marker = dep.package.marker();
4004            if self.env.included_by_marker(marker) {
4005                return true;
4006            }
4007            if let Some(conflicting_item) = dep.conflicting_item() {
4008                self.conflicts.remove(&conflicting_item);
4009            }
4010            false
4011        });
4012    }
4013
4014    /// Returns true if any of the dependencies in this fork contain a
4015    /// dependency with the given package and extra values.
4016    fn contains_conflicting_item(&self, item: ConflictItemRef<'_>) -> bool {
4017        self.conflicts.contains(&item)
4018    }
4019
4020    /// Include or Exclude the given groups from this fork.
4021    ///
4022    /// This removes all dependencies matching the given conflicting groups.
4023    ///
4024    /// If the exclusion rules would result in a fork with an unsatisfiable
4025    /// resolver environment, then this returns `None`.
4026    fn filter(
4027        mut self,
4028        rules: impl IntoIterator<Item = Result<ConflictItem, ConflictItem>>,
4029    ) -> Option<Self> {
4030        self.env = self.env.filter_by_group(rules)?;
4031        self.dependencies.retain(|dep| {
4032            let Some(conflicting_item) = dep.conflicting_item() else {
4033                return true;
4034            };
4035            if self.env.included_by_group(conflicting_item) {
4036                return true;
4037            }
4038            match conflicting_item.kind() {
4039                // We should not filter entire projects unless they're a top-level dependency
4040                // Otherwise, we'll fail to solve for children of the project, like extras
4041                ConflictKindRef::Project => {
4042                    if dep.parent.is_some() {
4043                        return true;
4044                    }
4045                }
4046                ConflictKindRef::Group(_) => {}
4047                ConflictKindRef::Extra(_) => {}
4048            }
4049            self.conflicts.remove(&conflicting_item);
4050            false
4051        });
4052        Some(self)
4053    }
4054
4055    /// Compare forks, preferring forks with g `requires-python` requirements.
4056    fn cmp_requires_python(&self, other: &Self) -> Ordering {
4057        // A higher `requires-python` requirement indicates a _higher-priority_ fork.
4058        //
4059        // This ordering ensures that we prefer choosing the highest version for each fork based on
4060        // its `requires-python` requirement.
4061        //
4062        // The reverse would prefer choosing fewer versions, at the cost of using older package
4063        // versions on newer Python versions. For example, if reversed, we'd prefer to solve `<3.7
4064        // before solving `>=3.7`, since the resolution produced by the former might work for the
4065        // latter, but the inverse is unlikely to be true.
4066        let self_bound = self.env.requires_python().unwrap_or_default();
4067        let other_bound = other.env.requires_python().unwrap_or_default();
4068        self_bound.lower().cmp(other_bound.lower())
4069    }
4070
4071    /// Compare forks, preferring forks with upper bounds.
4072    fn cmp_upper_bounds(&self, other: &Self) -> Ordering {
4073        // We'd prefer to solve `numpy <= 2` before solving `numpy >= 1`, since the resolution
4074        // produced by the former might work for the latter, but the inverse is unlikely to be true
4075        // due to maximum version selection. (Selecting `numpy==2.0.0` would satisfy both forks, but
4076        // selecting the latest `numpy` would not.)
4077        let self_upper_bounds = self
4078            .dependencies
4079            .iter()
4080            .filter(|dep| {
4081                dep.version
4082                    .bounding_range()
4083                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4084            })
4085            .count();
4086        let other_upper_bounds = other
4087            .dependencies
4088            .iter()
4089            .filter(|dep| {
4090                dep.version
4091                    .bounding_range()
4092                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4093            })
4094            .count();
4095
4096        self_upper_bounds.cmp(&other_upper_bounds)
4097    }
4098}
4099
4100impl Eq for Fork {}
4101
4102impl PartialEq for Fork {
4103    fn eq(&self, other: &Self) -> bool {
4104        self.dependencies == other.dependencies && self.env == other.env
4105    }
4106}
4107
4108#[derive(Debug, Clone)]
4109pub(crate) struct VersionFork {
4110    /// The environment to use in the fork.
4111    env: ResolverEnvironment,
4112    /// The initial package to select in the fork.
4113    id: Id<PubGrubPackage>,
4114    /// The initial version to set for the selected package in the fork.
4115    version: Option<Version>,
4116}
4117
4118/// Enrich a [`ResolveError`] with additional information about why a given package was included.
4119fn enrich_dependency_error(
4120    error: ResolveError,
4121    id: Id<PubGrubPackage>,
4122    version: &Version,
4123    pubgrub: &State<UvDependencyProvider>,
4124) -> ResolveError {
4125    let Some(name) = pubgrub.package_store[id].name_no_root() else {
4126        return error;
4127    };
4128    let chain = DerivationChainBuilder::from_state(id, version, pubgrub).unwrap_or_default();
4129    ResolveError::Dependencies(Box::new(error), name.clone(), version.clone(), chain)
4130}
4131
4132/// Compute the set of markers for which a package is known to be relevant.
4133fn find_environments(id: Id<PubGrubPackage>, state: &State<UvDependencyProvider>) -> MarkerTree {
4134    let package = &state.package_store[id];
4135    if package.is_root() {
4136        return MarkerTree::TRUE;
4137    }
4138
4139    // First, collect the reverse-dependency closure for the package. We limit the propagation
4140    // below to this subgraph so cycles in unrelated packages don't matter here.
4141    let mut ancestors = FxHashSet::default();
4142    let mut stack = vec![id];
4143    let mut root = None;
4144    ancestors.insert(id);
4145
4146    while let Some(current) = stack.pop() {
4147        let Some(incompatibilities) = state.incompatibilities.get(&current) else {
4148            continue;
4149        };
4150
4151        for index in incompatibilities {
4152            let incompat = &state.incompatibility_store[*index];
4153            if let Kind::FromDependencyOf(parent, _, child, _) = &incompat.kind {
4154                if current != *child {
4155                    continue;
4156                }
4157                if ancestors.insert(*parent) {
4158                    if state.package_store[*parent].is_root() {
4159                        root = Some(*parent);
4160                    }
4161                    stack.push(*parent);
4162                }
4163            }
4164        }
4165    }
4166
4167    let Some(root) = root else {
4168        return MarkerTree::FALSE;
4169    };
4170
4171    // Propagate markers forward from the root through the collected subgraph. This reaches a
4172    // fixpoint even in the presence of cycles, unlike the recursive reverse walk above.
4173    let mut environments = FxHashMap::default();
4174    let mut queue = VecDeque::from([root]);
4175    environments.insert(root, MarkerTree::TRUE);
4176
4177    while let Some(current) = queue.pop_front() {
4178        let Some(current_environment) = environments.get(&current).copied() else {
4179            continue;
4180        };
4181        let Some(incompatibilities) = state.incompatibilities.get(&current) else {
4182            continue;
4183        };
4184
4185        for index in incompatibilities {
4186            let incompat = &state.incompatibility_store[*index];
4187            let Kind::FromDependencyOf(parent, _, child, _) = &incompat.kind else {
4188                continue;
4189            };
4190            if current != *parent || !ancestors.contains(child) {
4191                continue;
4192            }
4193
4194            let mut next_environment = state.package_store[*child].marker();
4195            next_environment.and(current_environment);
4196
4197            let entry = environments.entry(*child).or_insert(MarkerTree::FALSE);
4198            let mut combined = *entry;
4199            combined.or(next_environment);
4200            if combined != *entry {
4201                *entry = combined;
4202                queue.push_back(*child);
4203            }
4204        }
4205    }
4206
4207    environments.remove(&id).unwrap_or(MarkerTree::FALSE)
4208}
4209
4210#[derive(Debug, Default, Clone)]
4211struct ConflictTracker {
4212    /// How often a decision on the package was discarded due to another package decided earlier.
4213    affected: FxHashMap<Id<PubGrubPackage>, usize>,
4214    /// Package(s) to be prioritized after the next unit propagation
4215    ///
4216    /// Distilled from `affected` for fast checking in the hot loop.
4217    prioritize: Vec<Id<PubGrubPackage>>,
4218    /// How often a package was decided earlier and caused another package to be discarded.
4219    culprit: FxHashMap<Id<PubGrubPackage>, usize>,
4220    /// Package(s) to be de-prioritized after the next unit propagation
4221    ///
4222    /// Distilled from `culprit` for fast checking in the hot loop.
4223    deprioritize: Vec<Id<PubGrubPackage>>,
4224}