Skip to main content

uv_resolver/resolver/
mod.rs

1//! Given a set of requirements, find a set of compatible packages.
2
3use std::borrow::Cow;
4use std::cmp::Ordering;
5use std::collections::{BTreeMap, BTreeSet, VecDeque};
6use std::fmt::{Display, Formatter, Write};
7use std::ops::Bound;
8use std::sync::Arc;
9use std::time::Instant;
10use std::{iter, slice, thread};
11
12use dashmap::DashMap;
13use either::Either;
14use futures::{FutureExt, StreamExt};
15use itertools::Itertools;
16use pubgrub::{Id, IncompId, Incompatibility, Kind, Range, Ranges, State};
17use rustc_hash::{FxHashMap, FxHashSet};
18use tokio::sync::mpsc::{self, Receiver, Sender};
19use tokio::sync::oneshot;
20use tokio_stream::wrappers::ReceiverStream;
21use tracing::{Level, debug, info, instrument, trace, warn};
22
23use uv_configuration::{Constraints, Excludes, Overrides};
24use uv_distribution::{ArchiveMetadata, DistributionDatabase};
25use uv_distribution_types::{
26    BuiltDist, CompatibleDist, DerivationChain, Dist, DistErrorKind, Identifier, IncompatibleDist,
27    IncompatibleSource, IncompatibleWheel, IndexCapabilities, IndexLocations, IndexMetadata,
28    IndexUrl, InstalledDist, Name, PythonRequirementKind, RemoteSource, Requirement, ResolvedDist,
29    ResolvedDistRef, SourceDist, VersionOrUrlRef, implied_markers,
30};
31use uv_git::GitResolver;
32use uv_normalize::{ExtraName, GroupName, PackageName};
33use uv_pep440::{MIN_VERSION, Version, VersionSpecifiers, release_specifiers_to_ranges};
34use uv_pep508::{
35    MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree, MarkerValueString,
36};
37use uv_platform_tags::{IncompatibleTag, Tags};
38use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKindRef, Conflicts, VerbatimParsedUrl};
39use uv_static::EnvVars;
40use uv_torch::TorchStrategy;
41use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
42use uv_warnings::warn_user_once;
43
44use crate::candidate_selector::{Candidate, CandidateDist, CandidateSelector};
45use crate::dependency_provider::UvDependencyProvider;
46use crate::error::{NoSolutionError, ResolveError};
47use crate::fork_indexes::ForkIndexes;
48use crate::fork_strategy::ForkStrategy;
49use crate::fork_urls::ForkUrls;
50use crate::manifest::Manifest;
51use crate::pins::FilePins;
52use crate::preferences::{PreferenceSource, Preferences};
53use crate::pubgrub::{
54    DependencySource, PubGrubDependency, PubGrubPackage, PubGrubPackageInner, PubGrubPriorities,
55    PubGrubPython,
56};
57use crate::python_requirement::PythonRequirement;
58use crate::resolution::ResolverOutput;
59use crate::resolution_mode::ResolutionStrategy;
60pub(crate) use crate::resolver::availability::{
61    ResolverVersion, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
62    UnavailableVersion,
63};
64use crate::resolver::batch_prefetch::BatchPrefetcher;
65pub use crate::resolver::derivation::DerivationChainBuilder;
66pub use crate::resolver::environment::ResolverEnvironment;
67use crate::resolver::environment::{
68    ForkingPossibility, fork_version_by_marker, fork_version_by_python_requirement,
69};
70pub(crate) use crate::resolver::fork_map::{ForkMap, ForkSet};
71pub use crate::resolver::index::InMemoryIndex;
72use crate::resolver::indexes::Indexes;
73pub use crate::resolver::provider::{
74    DefaultResolverProvider, MetadataResponse, PackageVersionsResult, ResolverProvider,
75    VersionsResponse, WheelMetadataResult,
76};
77pub use crate::resolver::reporter::{BuildId, Reporter};
78use crate::resolver::system::SystemDependency;
79pub(crate) use crate::resolver::urls::Urls;
80use crate::universal_marker::{ConflictMarker, UniversalMarker};
81use crate::yanks::AllowedYanks;
82use crate::{DependencyMode, Exclusions, FlatIndex, Options, ResolutionMode, VersionMap, marker};
83pub(crate) use provider::MetadataUnavailable;
84
85mod availability;
86mod batch_prefetch;
87mod derivation;
88mod environment;
89mod fork_map;
90mod index;
91mod indexes;
92mod provider;
93mod reporter;
94mod system;
95mod urls;
96
97/// The number of conflicts a package may accumulate before we re-prioritize and backtrack.
98const CONFLICT_THRESHOLD: usize = 5;
99
100pub struct Resolver<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
101    state: ResolverState<InstalledPackages>,
102    provider: Provider,
103}
104
105/// State that is shared between the prefetcher and the PubGrub solver during
106/// resolution, across all forks.
107struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
108    project: Option<PackageName>,
109    requirements: Vec<Requirement>,
110    constraints: Constraints,
111    overrides: Overrides,
112    excludes: Excludes,
113    preferences: Preferences,
114    git: GitResolver,
115    capabilities: IndexCapabilities,
116    locations: IndexLocations,
117    exclusions: Exclusions,
118    urls: Urls,
119    indexes: Indexes,
120    dependency_mode: DependencyMode,
121    hasher: HashStrategy,
122    env: ResolverEnvironment,
123    // The environment of the current Python interpreter.
124    current_environment: MarkerEnvironment,
125    tags: Option<Tags>,
126    python_requirement: PythonRequirement,
127    conflicts: Conflicts,
128    workspace_members: BTreeSet<PackageName>,
129    selector: CandidateSelector,
130    index: InMemoryIndex,
131    installed_packages: InstalledPackages,
132    /// Incompatibilities for packages that are entirely unavailable.
133    unavailable_packages: DashMap<PackageName, UnavailablePackage>,
134    /// Incompatibilities for packages that are unavailable at specific versions.
135    incomplete_packages: DashMap<PackageName, DashMap<Version, MetadataUnavailable>>,
136    /// The options that were used to configure this resolver.
137    options: Options,
138    /// The reporter to use for this resolver.
139    reporter: Option<Arc<dyn Reporter>>,
140}
141
142impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
143    Resolver<DefaultResolverProvider<'a, Context>, InstalledPackages>
144{
145    /// Initialize a new resolver using the default backend doing real requests.
146    ///
147    /// Reads the flat index entries.
148    ///
149    /// # Marker environment
150    ///
151    /// The marker environment is optional.
152    ///
153    /// When a marker environment is not provided, the resolver is said to be
154    /// in "universal" mode. When in universal mode, the resolution produced
155    /// may contain multiple versions of the same package. And thus, in order
156    /// to use the resulting resolution, there must be a "universal"-aware
157    /// reader of the resolution that knows to exclude distributions that can't
158    /// be used in the current environment.
159    ///
160    /// When a marker environment is provided, the resolver is in
161    /// "non-universal" mode, which corresponds to standard `pip` behavior that
162    /// works only for a specific marker environment.
163    pub fn new(
164        manifest: Manifest,
165        options: Options,
166        python_requirement: &'a PythonRequirement,
167        env: ResolverEnvironment,
168        current_environment: &MarkerEnvironment,
169        conflicts: Conflicts,
170        tags: Option<&'a Tags>,
171        flat_index: &'a FlatIndex,
172        index: &'a InMemoryIndex,
173        hasher: &'a HashStrategy,
174        build_context: &'a Context,
175        installed_packages: InstalledPackages,
176        database: DistributionDatabase<'a, Context>,
177    ) -> Result<Self, ResolveError> {
178        let provider = DefaultResolverProvider::new(
179            database,
180            flat_index,
181            tags,
182            python_requirement.target(),
183            AllowedYanks::from_manifest(&manifest, &env, options.dependency_mode),
184            hasher,
185            options.exclude_newer.clone(),
186            build_context.locations(),
187            build_context.build_options(),
188            build_context.capabilities(),
189        );
190
191        Self::new_custom_io(
192            manifest,
193            options,
194            hasher,
195            env,
196            current_environment,
197            tags.cloned(),
198            python_requirement,
199            conflicts,
200            index,
201            build_context.git(),
202            build_context.capabilities(),
203            build_context.locations(),
204            provider,
205            installed_packages,
206        )
207    }
208}
209
210impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
211    Resolver<Provider, InstalledPackages>
212{
213    /// Initialize a new resolver using a user provided backend.
214    pub fn new_custom_io(
215        manifest: Manifest,
216        options: Options,
217        hasher: &HashStrategy,
218        env: ResolverEnvironment,
219        current_environment: &MarkerEnvironment,
220        tags: Option<Tags>,
221        python_requirement: &PythonRequirement,
222        conflicts: Conflicts,
223        index: &InMemoryIndex,
224        git: &GitResolver,
225        capabilities: &IndexCapabilities,
226        locations: &IndexLocations,
227        provider: Provider,
228        installed_packages: InstalledPackages,
229    ) -> Result<Self, ResolveError> {
230        let state = ResolverState {
231            index: index.clone(),
232            git: git.clone(),
233            capabilities: capabilities.clone(),
234            selector: CandidateSelector::for_resolution(&options, &manifest, &env),
235            dependency_mode: options.dependency_mode,
236            urls: Urls::from_manifest(&manifest, &env, git, options.dependency_mode),
237            indexes: Indexes::from_manifest(&manifest, &env, options.dependency_mode),
238            project: manifest.project,
239            workspace_members: manifest.workspace_members,
240            requirements: manifest.requirements,
241            constraints: manifest.constraints,
242            overrides: manifest.overrides,
243            excludes: manifest.excludes,
244            preferences: manifest.preferences,
245            exclusions: manifest.exclusions,
246            hasher: hasher.clone(),
247            locations: locations.clone(),
248            env,
249            current_environment: current_environment.clone(),
250            tags,
251            python_requirement: python_requirement.clone(),
252            conflicts,
253            installed_packages,
254            unavailable_packages: DashMap::default(),
255            incomplete_packages: DashMap::default(),
256            options,
257            reporter: None,
258        };
259        Ok(Self { state, provider })
260    }
261
262    /// Set the [`Reporter`] to use for this installer.
263    #[must_use]
264    pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
265        Self {
266            state: ResolverState {
267                reporter: Some(reporter.clone()),
268                ..self.state
269            },
270            provider: self
271                .provider
272                .with_reporter(reporter.into_distribution_reporter()),
273        }
274    }
275
276    /// Resolve a set of requirements into a set of pinned versions.
277    pub async fn resolve(self) -> Result<ResolverOutput, ResolveError> {
278        let state = Arc::new(self.state);
279        let provider = Arc::new(self.provider);
280
281        // A channel to fetch package metadata (e.g., given `flask`, fetch all versions) and version
282        // metadata (e.g., given `flask==1.0.0`, fetch the metadata for that version).
283        // Channel size is set large to accommodate batch prefetching.
284        let (request_sink, request_stream) = mpsc::channel(300);
285
286        // Run the fetcher.
287        let requests_fut = state.clone().fetch(provider.clone(), request_stream).fuse();
288
289        // Spawn the PubGrub solver on a dedicated thread.
290        let solver = state.clone();
291        let (tx, rx) = oneshot::channel();
292        thread::Builder::new()
293            .name("uv-resolver".into())
294            .spawn(move || {
295                let result = solver.solve(&request_sink);
296
297                // This may fail if the main thread returned early due to an error.
298                let _ = tx.send(result);
299            })
300            .unwrap();
301
302        let resolve_fut = async move { rx.await.map_err(|_| ResolveError::ChannelClosed) };
303
304        // Wait for both to complete.
305        let ((), resolution) = tokio::try_join!(requests_fut, resolve_fut)?;
306
307        state.on_complete();
308        resolution
309    }
310}
311
312impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackages> {
313    #[instrument(skip_all)]
314    fn solve(
315        self: Arc<Self>,
316        request_sink: &Sender<Request>,
317    ) -> Result<ResolverOutput, ResolveError> {
318        debug!(
319            "Solving with installed Python version: {}",
320            self.python_requirement.exact()
321        );
322        debug!(
323            "Solving with target Python version: {}",
324            self.python_requirement.target()
325        );
326        if !self.options.exclude_newer.is_empty() {
327            debug!("Solving with exclude-newer: {}", self.options.exclude_newer);
328        }
329
330        let mut visited = FxHashSet::default();
331
332        let root = PubGrubPackage::from(PubGrubPackageInner::Root(self.project.clone()));
333        let pubgrub = State::init(root.clone(), MIN_VERSION.clone());
334        let prefetcher = BatchPrefetcher::new(
335            self.capabilities.clone(),
336            self.index.clone(),
337            request_sink.clone(),
338        );
339        let state = ForkState::new(
340            pubgrub,
341            self.env.clone(),
342            self.python_requirement.clone(),
343            prefetcher,
344        );
345        let mut preferences = self.preferences.clone();
346        let mut forked_states = self.env.initial_forked_states(state)?;
347        let mut resolutions = vec![];
348
349        'FORK: while let Some(mut state) = forked_states.pop() {
350            if let Some(split) = state.env.end_user_fork_display() {
351                let requires_python = state.python_requirement.target();
352                debug!("Solving {split} (requires-python: {requires_python:?})");
353            }
354            let start = Instant::now();
355            loop {
356                let highest_priority_pkg =
357                    if let Some(initial) = state.initial_id.take() {
358                        // If we just forked based on `requires-python`, we can skip unit
359                        // propagation, since we already propagated the package that initiated
360                        // the fork.
361                        initial
362                    } else {
363                        // Run unit propagation.
364                        let result = state.pubgrub.unit_propagation(state.next);
365                        match result {
366                            Err(err) => {
367                                // If unit propagation failed, there is no solution.
368                                return Err(self.convert_no_solution_err(
369                                    err,
370                                    state.fork_urls,
371                                    state.fork_indexes,
372                                    state.env,
373                                    self.current_environment.clone(),
374                                    &visited,
375                                ));
376                            }
377                            Ok(conflicts) => {
378                                for (affected, incompatibility) in conflicts {
379                                    // Conflict tracking: If there was a conflict, track affected and
380                                    // culprit for all root cause incompatibilities
381                                    state.record_conflict(affected, None, incompatibility);
382                                }
383                            }
384                        }
385
386                        // Pre-visit all candidate packages, to allow metadata to be fetched in parallel.
387                        if self.dependency_mode.is_transitive() {
388                            Self::pre_visit(
389                                state
390                                    .pubgrub
391                                    .partial_solution
392                                    .prioritized_packages()
393                                    .map(|(id, range)| (&state.pubgrub.package_store[id], range)),
394                                &self.urls,
395                                &self.indexes,
396                                &state.python_requirement,
397                                request_sink,
398                            )?;
399                        }
400
401                        Self::reprioritize_conflicts(&mut state);
402
403                        trace!(
404                            "Assigned packages: {}",
405                            state
406                                .pubgrub
407                                .partial_solution
408                                .extract_solution()
409                                .filter(|(p, _)| !state.pubgrub.package_store[*p].is_proxy())
410                                .map(|(p, v)| format!("{}=={}", state.pubgrub.package_store[p], v))
411                                .join(", ")
412                        );
413                        // Choose a package.
414                        // We aren't allowed to use the term intersection as it would extend the
415                        // mutable borrow of `state`.
416                        let Some((highest_priority_pkg, _)) =
417                            state.pubgrub.partial_solution.pick_highest_priority_pkg(
418                                |id, _range| state.priorities.get(&state.pubgrub.package_store[id]),
419                            )
420                        else {
421                            // All packages have been assigned, the fork has been successfully resolved
422                            if tracing::enabled!(Level::DEBUG) {
423                                state.prefetcher.log_tried_versions();
424                            }
425                            debug!(
426                                "{} resolution took {:.3}s",
427                                state.env,
428                                start.elapsed().as_secs_f32()
429                            );
430
431                            let resolution = state.into_resolution();
432
433                            // Walk over the selected versions, and mark them as preferences. We have to
434                            // add forks back as to not override the preferences from the lockfile for
435                            // the next fork
436                            //
437                            // If we're using a resolution mode that varies based on whether a dependency is
438                            // direct or transitive, skip preferences, as we risk adding a preference from
439                            // one fork (in which it's a transitive dependency) to another fork (in which
440                            // it's direct).
441                            if matches!(
442                                self.options.resolution_mode,
443                                ResolutionMode::Lowest | ResolutionMode::Highest
444                            ) {
445                                for (package, version) in &resolution.nodes {
446                                    preferences.insert(
447                                        package.name.clone(),
448                                        package.index.clone(),
449                                        resolution
450                                            .env
451                                            .try_universal_markers()
452                                            .unwrap_or(UniversalMarker::TRUE),
453                                        version.clone(),
454                                        PreferenceSource::Resolver,
455                                    );
456                                }
457                            }
458
459                            resolutions.push(resolution);
460                            continue 'FORK;
461                        };
462                        trace!(
463                            "Chose package for decision: {}. remaining choices: {}",
464                            state.pubgrub.package_store[highest_priority_pkg],
465                            state
466                                .pubgrub
467                                .partial_solution
468                                .undecided_packages()
469                                .filter(|(p, _)| !state.pubgrub.package_store[**p].is_proxy())
470                                .map(|(p, _)| state.pubgrub.package_store[*p].to_string())
471                                .join(", ")
472                        );
473
474                        highest_priority_pkg
475                    };
476
477                state.next = highest_priority_pkg;
478
479                // TODO(charlie): Remove as many usages of `next_package` as we can.
480                let next_id = state.next;
481                let next_package = &state.pubgrub.package_store[state.next];
482
483                let url = next_package
484                    .name()
485                    .and_then(|name| state.fork_urls.get(name));
486                let index = next_package
487                    .name()
488                    .and_then(|name| state.fork_indexes.get(name));
489
490                // Consider:
491                // ```toml
492                // dependencies = [
493                //   "iniconfig == 1.1.1 ; python_version < '3.12'",
494                //   "iniconfig @ https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl ; python_version >= '3.12'",
495                // ]
496                // ```
497                // In the `python_version < '3.12'` case, we haven't pre-visited `iniconfig` yet,
498                // since we weren't sure whether it might also be a URL requirement when
499                // transforming the requirements. For that case, we do another request here
500                // (idempotent due to caching).
501                self.request_package(next_package, url, index, request_sink)?;
502
503                let version = if let Some(version) = state.initial_version.take() {
504                    // If we just forked based on platform support, we can skip version selection,
505                    // since the fork operation itself already selected the appropriate version for
506                    // the platform.
507                    version
508                } else {
509                    let term_intersection = state
510                        .pubgrub
511                        .partial_solution
512                        .term_intersection_for_package(next_id)
513                        .expect("a package was chosen but we don't have a term");
514                    let decision = self.choose_version(
515                        next_package,
516                        next_id,
517                        index.map(IndexMetadata::url),
518                        term_intersection.unwrap_positive(),
519                        &mut state.pins,
520                        &preferences,
521                        &state.fork_urls,
522                        &state.env,
523                        &state.python_requirement,
524                        &state.pubgrub,
525                        &mut visited,
526                        request_sink,
527                    )?;
528
529                    // Pick the next compatible version.
530                    let Some(version) = decision else {
531                        debug!("No compatible version found for: {next_package}");
532
533                        let term_intersection = state
534                            .pubgrub
535                            .partial_solution
536                            .term_intersection_for_package(next_id)
537                            .expect("a package was chosen but we don't have a term");
538
539                        if let PubGrubPackageInner::Package { name, .. } = &**next_package {
540                            // Check if the decision was due to the package being unavailable
541                            if let Some(entry) = self.unavailable_packages.get(name) {
542                                state
543                                    .pubgrub
544                                    .add_incompatibility(Incompatibility::custom_term(
545                                        next_id,
546                                        term_intersection.clone(),
547                                        UnavailableReason::Package(entry.clone()),
548                                    ));
549                                continue;
550                            }
551                        }
552
553                        state
554                            .pubgrub
555                            .add_incompatibility(Incompatibility::no_versions(
556                                next_id,
557                                term_intersection.clone(),
558                            ));
559                        continue;
560                    };
561
562                    let version = match version {
563                        ResolverVersion::Unforked(version) => version,
564                        ResolverVersion::Forked(forks) => {
565                            forked_states.extend(self.version_forks_to_fork_states(state, forks));
566                            continue 'FORK;
567                        }
568                        ResolverVersion::Unavailable(version, reason) => {
569                            state.add_unavailable_version(version, reason);
570                            continue;
571                        }
572                    };
573
574                    // Only consider registry packages for prefetch.
575                    if url.is_none() {
576                        state.prefetcher.prefetch_batches(
577                            next_package,
578                            index,
579                            &version,
580                            term_intersection.unwrap_positive(),
581                            state
582                                .pubgrub
583                                .partial_solution
584                                .unchanging_term_for_package(next_id),
585                            &state.python_requirement,
586                            &self.selector,
587                            &state.env,
588                        )?;
589                    }
590
591                    version
592                };
593
594                state.prefetcher.version_tried(next_package, &version);
595
596                self.on_progress(next_package, &version);
597
598                if !state
599                    .added_dependencies
600                    .entry(next_id)
601                    .or_default()
602                    .insert(version.clone())
603                {
604                    // `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
605                    // terms and can add the decision directly.
606                    state
607                        .pubgrub
608                        .partial_solution
609                        .add_decision(next_id, version);
610                    continue;
611                }
612
613                // Retrieve that package dependencies.
614                let forked_deps = self.get_dependencies_forking(
615                    next_id,
616                    next_package,
617                    &version,
618                    &state.pins,
619                    &state.fork_urls,
620                    &state.env,
621                    &state.python_requirement,
622                    &state.pubgrub,
623                )?;
624
625                match forked_deps {
626                    ForkedDependencies::Unavailable(reason) => {
627                        // Then here, if we get a reason that we consider unrecoverable, we should
628                        // show the derivation chain.
629                        state
630                            .pubgrub
631                            .add_incompatibility(Incompatibility::custom_version(
632                                next_id,
633                                version.clone(),
634                                UnavailableReason::Version(reason),
635                            ));
636                    }
637                    ForkedDependencies::Unforked(dependencies) => {
638                        // Enrich the state with any URLs, etc.
639                        state
640                            .visit_package_version_dependencies(
641                                next_id,
642                                &version,
643                                &self.urls,
644                                &self.indexes,
645                                &dependencies,
646                                &self.git,
647                                &self.workspace_members,
648                                self.selector.resolution_strategy(),
649                            )
650                            .map_err(|err| {
651                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
652                            })?;
653
654                        // Emit a request to fetch the metadata for each registry package.
655                        self.visit_dependencies(&dependencies, &state, request_sink)
656                            .map_err(|err| {
657                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
658                            })?;
659
660                        // Add the dependencies to the state.
661                        state.add_package_version_dependencies(next_id, &version, dependencies);
662                    }
663                    ForkedDependencies::Forked {
664                        mut forks,
665                        diverging_packages,
666                    } => {
667                        debug!(
668                            "Pre-fork {} took {:.3}s",
669                            state.env,
670                            start.elapsed().as_secs_f32()
671                        );
672
673                        // Prioritize the forks.
674                        match (self.options.fork_strategy, self.options.resolution_mode) {
675                            (ForkStrategy::Fewest, _) | (_, ResolutionMode::Lowest) => {
676                                // Prefer solving forks with lower Python bounds, since they're more
677                                // likely to produce solutions that work for forks with higher
678                                // Python bounds (whereas the inverse is not true).
679                                forks.sort_by(|a, b| {
680                                    a.cmp_requires_python(b)
681                                        .reverse()
682                                        .then_with(|| a.cmp_upper_bounds(b))
683                                });
684                            }
685                            (ForkStrategy::RequiresPython, _) => {
686                                // Otherwise, prefer solving forks with higher Python bounds, since
687                                // we want to prioritize choosing the latest-compatible package
688                                // version for each Python version.
689                                forks.sort_by(|a, b| {
690                                    a.cmp_requires_python(b).then_with(|| a.cmp_upper_bounds(b))
691                                });
692                            }
693                        }
694
695                        for new_fork_state in self.forks_to_fork_states(
696                            state,
697                            &version,
698                            forks,
699                            request_sink,
700                            &diverging_packages,
701                        ) {
702                            forked_states.push(new_fork_state?);
703                        }
704                        continue 'FORK;
705                    }
706                }
707            }
708        }
709        if resolutions.len() > 1 {
710            info!(
711                "Solved your requirements for {} environments",
712                resolutions.len()
713            );
714        }
715        if tracing::enabled!(Level::DEBUG) {
716            for resolution in &resolutions {
717                if let Some(env) = resolution.env.end_user_fork_display() {
718                    let packages: FxHashSet<_> = resolution
719                        .nodes
720                        .keys()
721                        .map(|package| &package.name)
722                        .collect();
723                    debug!(
724                        "Distinct solution for {env} with {} package(s)",
725                        packages.len()
726                    );
727                }
728            }
729        }
730        for resolution in &resolutions {
731            Self::trace_resolution(resolution);
732        }
733        ResolverOutput::from_state(
734            &resolutions,
735            &self.requirements,
736            &self.constraints,
737            &self.overrides,
738            &self.preferences,
739            &self.index,
740            &self.git,
741            &self.python_requirement,
742            &self.conflicts,
743            self.selector.resolution_strategy(),
744            self.options.clone(),
745        )
746    }
747
748    /// Change the priority of often conflicting packages and backtrack.
749    ///
750    /// To be called after unit propagation.
751    fn reprioritize_conflicts(state: &mut ForkState) {
752        for package in state.conflict_tracker.prioritize.drain(..) {
753            let changed = state
754                .priorities
755                .mark_conflict_early(&state.pubgrub.package_store[package]);
756            if changed {
757                debug!(
758                    "Package {} has too many conflicts (affected), prioritizing",
759                    &state.pubgrub.package_store[package]
760                );
761            } else {
762                debug!(
763                    "Package {} has too many conflicts (affected), already {:?}",
764                    state.pubgrub.package_store[package],
765                    state.priorities.get(&state.pubgrub.package_store[package])
766                );
767            }
768        }
769
770        for package in state.conflict_tracker.deprioritize.drain(..) {
771            let changed = state
772                .priorities
773                .mark_conflict_late(&state.pubgrub.package_store[package]);
774            if changed {
775                debug!(
776                    "Package {} has too many conflicts (culprit), deprioritizing and backtracking",
777                    state.pubgrub.package_store[package],
778                );
779                let backtrack_level = state.pubgrub.backtrack_package(package);
780                if let Some(backtrack_level) = backtrack_level {
781                    debug!("Backtracked {backtrack_level} decisions");
782                } else {
783                    debug!(
784                        "Package {} is not decided, cannot backtrack",
785                        state.pubgrub.package_store[package]
786                    );
787                }
788            } else {
789                debug!(
790                    "Package {} has too many conflicts (culprit), already {:?}",
791                    state.pubgrub.package_store[package],
792                    state.priorities.get(&state.pubgrub.package_store[package])
793                );
794            }
795        }
796    }
797
798    /// When trace level logging is enabled, we dump the final
799    /// set of resolutions, including markers, to help with
800    /// debugging. Namely, this tells use precisely the state
801    /// emitted by the resolver before going off to construct a
802    /// resolution graph.
803    fn trace_resolution(combined: &Resolution) {
804        if !tracing::enabled!(Level::TRACE) {
805            return;
806        }
807        trace!("Resolution: {:?}", combined.env);
808        for edge in &combined.edges {
809            trace!(
810                "Resolution edge: {} -> {}",
811                edge.from
812                    .as_ref()
813                    .map(PackageName::as_str)
814                    .unwrap_or("ROOT"),
815                edge.to,
816            );
817            // The unwraps below are OK because `write`ing to
818            // a String can never fail (except for OOM).
819            let mut msg = String::new();
820            write!(msg, "{}", edge.from_version).unwrap();
821            if let Some(ref extra) = edge.from_extra {
822                write!(msg, " (extra: {extra})").unwrap();
823            }
824            if let Some(ref dev) = edge.from_group {
825                write!(msg, " (group: {dev})").unwrap();
826            }
827
828            write!(msg, " -> ").unwrap();
829
830            write!(msg, "{}", edge.to_version).unwrap();
831            if let Some(ref extra) = edge.to_extra {
832                write!(msg, " (extra: {extra})").unwrap();
833            }
834            if let Some(ref dev) = edge.to_group {
835                write!(msg, " (group: {dev})").unwrap();
836            }
837            if let Some(marker) = edge.marker.contents() {
838                write!(msg, " ; {marker}").unwrap();
839            }
840            trace!("Resolution edge:     {msg}");
841        }
842    }
843
844    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
845    fn forks_to_fork_states<'a>(
846        &'a self,
847        current_state: ForkState,
848        version: &'a Version,
849        forks: Vec<Fork>,
850        request_sink: &'a Sender<Request>,
851        diverging_packages: &'a [PackageName],
852    ) -> impl Iterator<Item = Result<ForkState, ResolveError>> + 'a {
853        debug!(
854            "Splitting resolution on {}=={} over {} into {} resolution{} with separate markers",
855            current_state.pubgrub.package_store[current_state.next],
856            version,
857            diverging_packages
858                .iter()
859                .map(ToString::to_string)
860                .join(", "),
861            forks.len(),
862            if forks.len() == 1 { "" } else { "s" }
863        );
864        assert!(forks.len() >= 2);
865        // This is a somewhat tortured technique to ensure
866        // that our resolver state is only cloned as much
867        // as it needs to be. We basically move the state
868        // into `forked_states`, and then only clone it if
869        // there is at least one more fork to visit.
870        let package = current_state.next;
871        let mut cur_state = Some(current_state);
872        let forks_len = forks.len();
873        forks
874            .into_iter()
875            .enumerate()
876            .map(move |(i, fork)| {
877                let is_last = i == forks_len - 1;
878                let forked_state = cur_state.take().unwrap();
879                if !is_last {
880                    cur_state = Some(forked_state.clone());
881                }
882
883                let env = fork.env.clone();
884                (fork, forked_state.with_env(env))
885            })
886            .map(move |(fork, mut forked_state)| {
887                // Enrich the state with any URLs, etc.
888                forked_state
889                    .visit_package_version_dependencies(
890                        package,
891                        version,
892                        &self.urls,
893                        &self.indexes,
894                        &fork.dependencies,
895                        &self.git,
896                        &self.workspace_members,
897                        self.selector.resolution_strategy(),
898                    )
899                    .map_err(|err| {
900                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
901                    })?;
902
903                // Emit a request to fetch the metadata for each registry package.
904                self.visit_dependencies(&fork.dependencies, &forked_state, request_sink)
905                    .map_err(|err| {
906                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
907                    })?;
908
909                // Add the dependencies to the state.
910                forked_state.add_package_version_dependencies(package, version, fork.dependencies);
911
912                Ok(forked_state)
913            })
914    }
915
916    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
917    #[expect(clippy::unused_self)]
918    fn version_forks_to_fork_states(
919        &self,
920        current_state: ForkState,
921        forks: Vec<VersionFork>,
922    ) -> impl Iterator<Item = ForkState> + '_ {
923        // This is a somewhat tortured technique to ensure
924        // that our resolver state is only cloned as much
925        // as it needs to be. We basically move the state
926        // into `forked_states`, and then only clone it if
927        // there is at least one more fork to visit.
928        let mut cur_state = Some(current_state);
929        let forks_len = forks.len();
930        forks.into_iter().enumerate().map(move |(i, fork)| {
931            let is_last = i == forks_len - 1;
932            let mut forked_state = cur_state.take().unwrap();
933            if !is_last {
934                cur_state = Some(forked_state.clone());
935            }
936            forked_state.initial_id = Some(fork.id);
937            forked_state.initial_version = fork.version;
938            forked_state.with_env(fork.env)
939        })
940    }
941
942    /// Visit a set of [`PubGrubDependency`] entities prior to selection.
943    fn visit_dependencies(
944        &self,
945        dependencies: &[PubGrubDependency],
946        state: &ForkState,
947        request_sink: &Sender<Request>,
948    ) -> Result<(), ResolveError> {
949        for dependency in dependencies {
950            let PubGrubDependency {
951                package,
952                version: _,
953                parent: _,
954                source: _,
955            } = dependency;
956            let url = package.name().and_then(|name| state.fork_urls.get(name));
957            let index = package.name().and_then(|name| state.fork_indexes.get(name));
958            self.visit_package(package, url, index, request_sink)?;
959        }
960        Ok(())
961    }
962
963    /// Visit a [`PubGrubPackage`] prior to selection. This should be called on a [`PubGrubPackage`]
964    /// before it is selected, to allow metadata to be fetched in parallel.
965    fn visit_package(
966        &self,
967        package: &PubGrubPackage,
968        url: Option<&VerbatimParsedUrl>,
969        index: Option<&IndexMetadata>,
970        request_sink: &Sender<Request>,
971    ) -> Result<(), ResolveError> {
972        // Ignore unresolved URL packages, i.e., packages that use a direct URL in some forks.
973        if url.is_none()
974            && package
975                .name()
976                .map(|name| self.urls.any_url(name))
977                .unwrap_or(true)
978        {
979            return Ok(());
980        }
981
982        self.request_package(package, url, index, request_sink)
983    }
984
985    fn request_package(
986        &self,
987        package: &PubGrubPackage,
988        url: Option<&VerbatimParsedUrl>,
989        index: Option<&IndexMetadata>,
990        request_sink: &Sender<Request>,
991    ) -> Result<(), ResolveError> {
992        // Only request real packages.
993        let Some(name) = package.name_no_root() else {
994            return Ok(());
995        };
996
997        if let Some(url) = url {
998            // Verify that the package is allowed under the hash-checking policy.
999            if !self.hasher.allows_url(&url.verbatim) {
1000                return Err(ResolveError::UnhashedPackage(name.clone()));
1001            }
1002
1003            // Emit a request to fetch the metadata for this distribution.
1004            let dist = Dist::from_url(name.clone(), url.clone())?;
1005            if self.index.distributions().register(dist.distribution_id()) {
1006                request_sink.blocking_send(Request::Dist(dist))?;
1007            }
1008        } else if let Some(index) = index {
1009            // Emit a request to fetch the metadata for this package on the index.
1010            if self
1011                .index
1012                .explicit()
1013                .register((name.clone(), index.url().clone()))
1014            {
1015                request_sink.blocking_send(Request::Package(name.clone(), Some(index.clone())))?;
1016            }
1017        } else {
1018            // Emit a request to fetch the metadata for this package.
1019            if self.index.implicit().register(name.clone()) {
1020                request_sink.blocking_send(Request::Package(name.clone(), None))?;
1021            }
1022        }
1023        Ok(())
1024    }
1025
1026    /// Visit the set of [`PubGrubPackage`] candidates prior to selection. This allows us to fetch
1027    /// metadata for all packages in parallel.
1028    fn pre_visit<'data>(
1029        packages: impl Iterator<Item = (&'data PubGrubPackage, &'data Range<Version>)>,
1030        urls: &Urls,
1031        indexes: &Indexes,
1032        python_requirement: &PythonRequirement,
1033        request_sink: &Sender<Request>,
1034    ) -> Result<(), ResolveError> {
1035        // Iterate over the potential packages, and fetch file metadata for any of them. These
1036        // represent our current best guesses for the versions that we _might_ select.
1037        for (package, range) in packages {
1038            let PubGrubPackageInner::Package {
1039                name,
1040                extra: None,
1041                group: None,
1042                marker: MarkerTree::TRUE,
1043            } = &**package
1044            else {
1045                continue;
1046            };
1047            // Avoid pre-visiting packages that have any URLs in any fork. At this point we can't
1048            // tell whether they are registry distributions or which url they use.
1049            if urls.any_url(name) {
1050                continue;
1051            }
1052            // Avoid visiting packages that may use an explicit index.
1053            if indexes.contains_key(name) {
1054                continue;
1055            }
1056            request_sink.blocking_send(Request::Prefetch(
1057                name.clone(),
1058                range.clone(),
1059                python_requirement.clone(),
1060            ))?;
1061        }
1062        Ok(())
1063    }
1064
1065    /// Given a candidate package, choose the next version in range to try.
1066    ///
1067    /// Returns `None` when there are no versions in the given range, rejecting the current partial
1068    /// solution.
1069    // TODO(konsti): re-enable tracing. This trace is crucial to understanding the
1070    // tracing-durations-export diagrams, but it took ~5% resolver thread runtime for apache-airflow
1071    // when I last measured.
1072    #[cfg_attr(feature = "tracing-durations-export", instrument(skip_all, fields(%package)))]
1073    fn choose_version(
1074        &self,
1075        package: &PubGrubPackage,
1076        id: Id<PubGrubPackage>,
1077        index: Option<&IndexUrl>,
1078        range: &Range<Version>,
1079        pins: &mut FilePins,
1080        preferences: &Preferences,
1081        fork_urls: &ForkUrls,
1082        env: &ResolverEnvironment,
1083        python_requirement: &PythonRequirement,
1084        pubgrub: &State<UvDependencyProvider>,
1085        visited: &mut FxHashSet<PackageName>,
1086        request_sink: &Sender<Request>,
1087    ) -> Result<Option<ResolverVersion>, ResolveError> {
1088        match &**package {
1089            PubGrubPackageInner::Root(_) => {
1090                Ok(Some(ResolverVersion::Unforked(MIN_VERSION.clone())))
1091            }
1092
1093            PubGrubPackageInner::Python(_) => {
1094                // Dependencies on Python are only added when a package is incompatible; as such,
1095                // we don't need to do anything here.
1096                Ok(None)
1097            }
1098
1099            PubGrubPackageInner::System(_) => {
1100                // We don't care what the actual version is here, just that it's consistent across
1101                // the dependency graph.
1102                let Some(version) = range.as_singleton() else {
1103                    return Ok(None);
1104                };
1105                Ok(Some(ResolverVersion::Unforked(version.clone())))
1106            }
1107
1108            PubGrubPackageInner::Marker { name, .. }
1109            | PubGrubPackageInner::Extra { name, .. }
1110            | PubGrubPackageInner::Group { name, .. }
1111            | PubGrubPackageInner::Package { name, .. } => {
1112                if let Some(url) = package.name().and_then(|name| fork_urls.get(name)) {
1113                    self.choose_version_url(id, name, range, url, env, python_requirement, pubgrub)
1114                } else {
1115                    self.choose_version_registry(
1116                        package,
1117                        id,
1118                        name,
1119                        index,
1120                        range,
1121                        preferences,
1122                        env,
1123                        python_requirement,
1124                        pubgrub,
1125                        pins,
1126                        visited,
1127                        request_sink,
1128                    )
1129                }
1130            }
1131        }
1132    }
1133
1134    /// Select a version for a URL requirement. Since there is only one version per URL, we return
1135    /// that version if it is in range and `None` otherwise.
1136    fn choose_version_url(
1137        &self,
1138        id: Id<PubGrubPackage>,
1139        name: &PackageName,
1140        range: &Range<Version>,
1141        url: &VerbatimParsedUrl,
1142        env: &ResolverEnvironment,
1143        python_requirement: &PythonRequirement,
1144        pubgrub: &State<UvDependencyProvider>,
1145    ) -> Result<Option<ResolverVersion>, ResolveError> {
1146        debug!(
1147            "Searching for a compatible version of {name} @ {} ({range})",
1148            url.verbatim
1149        );
1150
1151        let dist = Dist::from_url(name.clone(), url.clone())?;
1152        let distribution_id = dist.distribution_id();
1153        let response = self
1154            .index
1155            .distributions()
1156            .wait_blocking(&distribution_id)
1157            .ok_or_else(|| ResolveError::UnregisteredTask(dist.to_string()))?;
1158
1159        // If we failed to fetch the metadata for a URL, we can't proceed.
1160        let metadata = match &*response {
1161            MetadataResponse::Found(archive) => &archive.metadata,
1162            MetadataResponse::Unavailable(reason) => {
1163                self.unavailable_packages
1164                    .insert(name.clone(), reason.into());
1165                return Ok(None);
1166            }
1167            // TODO(charlie): Add derivation chain for URL dependencies. In practice, this isn't
1168            // critical since we fetch URL dependencies _prior_ to invoking the resolver.
1169            MetadataResponse::Error(dist, err) => {
1170                return Err(ResolveError::Dist(
1171                    DistErrorKind::from_requested_dist(dist, &**err),
1172                    dist.clone(),
1173                    DerivationChain::default(),
1174                    err.clone(),
1175                ));
1176            }
1177        };
1178
1179        let version = &metadata.version;
1180
1181        // The version is incompatible with the requirement.
1182        if !range.contains(version) {
1183            return Ok(None);
1184        }
1185
1186        // If the URL points to a pre-built wheel, and the wheel's supported Python versions don't
1187        // match our `Requires-Python`, mark it as incompatible.
1188        if let Dist::Built(dist) = &dist {
1189            let filename = match &dist {
1190                BuiltDist::Registry(dist) => &dist.best_wheel().filename,
1191                BuiltDist::DirectUrl(dist) => &dist.filename,
1192                BuiltDist::Path(dist) => &dist.filename,
1193            };
1194
1195            // If the wheel does _not_ cover an environment that requires artifact coverage, it's
1196            // incompatible.
1197            if env.marker_environment().is_none() && !self.options.artifact_environments.is_empty()
1198            {
1199                let wheel_marker = implied_markers(filename);
1200                // If the caller marked an environment as requiring artifact coverage, ensure it
1201                // has coverage.
1202                for environment_marker in self.options.artifact_environments.iter().copied() {
1203                    // If the platform is part of the current environment...
1204                    if env.included_by_marker(environment_marker)
1205                        && !find_environments(id, pubgrub).is_disjoint(environment_marker)
1206                    {
1207                        // ...but the wheel doesn't support it, it's incompatible.
1208                        if wheel_marker.is_disjoint(environment_marker) {
1209                            return Ok(Some(ResolverVersion::Unavailable(
1210                                version.clone(),
1211                                UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1212                                    IncompatibleWheel::MissingPlatform(environment_marker),
1213                                )),
1214                            )));
1215                        }
1216                    }
1217                }
1218            }
1219
1220            // If the wheel's Python tag doesn't match the target Python, it's incompatible.
1221            if !python_requirement.target().matches_wheel_tag(filename) {
1222                return Ok(Some(ResolverVersion::Unavailable(
1223                    filename.version.clone(),
1224                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1225                        IncompatibleWheel::Tag(IncompatibleTag::AbiPythonVersion),
1226                    )),
1227                )));
1228            }
1229        }
1230
1231        // The version is incompatible due to its `Requires-Python` requirement.
1232        if let Some(requires_python) = metadata.requires_python.as_ref() {
1233            if !python_requirement.target().is_contained_by(requires_python) {
1234                let kind = if python_requirement.installed() == python_requirement.target() {
1235                    PythonRequirementKind::Installed
1236                } else {
1237                    PythonRequirementKind::Target
1238                };
1239                return Ok(Some(ResolverVersion::Unavailable(
1240                    version.clone(),
1241                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1242                        IncompatibleSource::RequiresPython(requires_python.clone(), kind),
1243                    )),
1244                )));
1245            }
1246        }
1247
1248        Ok(Some(ResolverVersion::Unforked(version.clone())))
1249    }
1250
1251    /// Given a candidate registry requirement, choose the next version in range to try, or `None`
1252    /// if there is no version in this range.
1253    fn choose_version_registry(
1254        &self,
1255        package: &PubGrubPackage,
1256        id: Id<PubGrubPackage>,
1257        name: &PackageName,
1258        index: Option<&IndexUrl>,
1259        range: &Range<Version>,
1260        preferences: &Preferences,
1261        env: &ResolverEnvironment,
1262        python_requirement: &PythonRequirement,
1263        pubgrub: &State<UvDependencyProvider>,
1264        pins: &mut FilePins,
1265        visited: &mut FxHashSet<PackageName>,
1266        request_sink: &Sender<Request>,
1267    ) -> Result<Option<ResolverVersion>, ResolveError> {
1268        // Wait for the metadata to be available.
1269        let versions_response = if let Some(index) = index {
1270            self.index
1271                .explicit()
1272                .wait_blocking(&(name.clone(), index.clone()))
1273                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1274        } else {
1275            self.index
1276                .implicit()
1277                .wait_blocking(name)
1278                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1279        };
1280        visited.insert(name.clone());
1281
1282        let version_maps = match *versions_response {
1283            VersionsResponse::Found(ref version_maps) => version_maps.as_slice(),
1284            VersionsResponse::NoIndex => {
1285                self.unavailable_packages
1286                    .insert(name.clone(), UnavailablePackage::NoIndex);
1287                &[]
1288            }
1289            VersionsResponse::Offline => {
1290                self.unavailable_packages
1291                    .insert(name.clone(), UnavailablePackage::Offline);
1292                &[]
1293            }
1294            VersionsResponse::NotFound => {
1295                self.unavailable_packages
1296                    .insert(name.clone(), UnavailablePackage::NotFound);
1297                &[]
1298            }
1299        };
1300
1301        debug!("Searching for a compatible version of {package} ({range})");
1302
1303        // Find a version.
1304        let Some(candidate) = self.selector.select(
1305            name,
1306            range,
1307            version_maps,
1308            preferences,
1309            &self.installed_packages,
1310            &self.exclusions,
1311            index,
1312            env,
1313            self.tags.as_ref(),
1314        ) else {
1315            // Short circuit: we couldn't find _any_ versions for a package.
1316            return Ok(None);
1317        };
1318
1319        let dist = match candidate.dist() {
1320            CandidateDist::Compatible(dist) => dist,
1321            CandidateDist::Incompatible {
1322                incompatible_dist: incompatibility,
1323                prioritized_dist: _,
1324            } => {
1325                // If the version is incompatible because no distributions are compatible, exit early.
1326                return Ok(Some(ResolverVersion::Unavailable(
1327                    candidate.version().clone(),
1328                    // TODO(charlie): We can avoid this clone; the candidate is dropped here and
1329                    // owns the incompatibility.
1330                    UnavailableVersion::IncompatibleDist(incompatibility.clone()),
1331                )));
1332            }
1333        };
1334
1335        // Check whether the version is incompatible due to its Python requirement.
1336        if let Some((requires_python, incompatibility)) =
1337            Self::check_requires_python(dist, python_requirement)
1338        {
1339            if matches!(self.options.fork_strategy, ForkStrategy::RequiresPython) {
1340                if env.marker_environment().is_none() {
1341                    let forks = fork_version_by_python_requirement(
1342                        requires_python,
1343                        python_requirement,
1344                        env,
1345                    );
1346                    if !forks.is_empty() {
1347                        debug!(
1348                            "Forking Python requirement `{}` on `{}` for {}=={} ({})",
1349                            python_requirement.target(),
1350                            requires_python,
1351                            name,
1352                            candidate.version(),
1353                            forks
1354                                .iter()
1355                                .map(ToString::to_string)
1356                                .collect::<Vec<_>>()
1357                                .join(", ")
1358                        );
1359                        let forks = forks
1360                            .into_iter()
1361                            .map(|env| VersionFork {
1362                                env,
1363                                id,
1364                                version: None,
1365                            })
1366                            .collect();
1367                        return Ok(Some(ResolverVersion::Forked(forks)));
1368                    }
1369                }
1370            }
1371
1372            return Ok(Some(ResolverVersion::Unavailable(
1373                candidate.version().clone(),
1374                UnavailableVersion::IncompatibleDist(incompatibility),
1375            )));
1376        }
1377
1378        // Check whether this version covers all supported platforms; and, if not, generate a fork.
1379        if let Some(forked) = self.fork_version_registry(
1380            &candidate,
1381            dist,
1382            version_maps,
1383            package,
1384            id,
1385            name,
1386            index,
1387            range,
1388            preferences,
1389            env,
1390            pubgrub,
1391            pins,
1392            request_sink,
1393        )? {
1394            return Ok(Some(forked));
1395        }
1396
1397        let filename = match dist.for_installation() {
1398            ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1399                .filename()
1400                .unwrap_or(Cow::Borrowed("unknown filename")),
1401            ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1402                .filename()
1403                .unwrap_or(Cow::Borrowed("unknown filename")),
1404            ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1405        };
1406
1407        debug!(
1408            "Selecting: {}=={} [{}] ({})",
1409            name,
1410            candidate.version(),
1411            candidate.choice_kind(),
1412            filename,
1413        );
1414        self.visit_candidate(&candidate, dist, package, name, pins, request_sink)?;
1415
1416        let version = candidate.version().clone();
1417        Ok(Some(ResolverVersion::Unforked(version)))
1418    }
1419
1420    /// Determine whether a candidate covers all supported platforms; and, if not, generate a fork.
1421    ///
1422    /// This only ever applies to versions that lack source distributions And, for now, we only
1423    /// apply it in two cases:
1424    ///
1425    /// 1. Local versions, where the non-local version has greater platform coverage. The intent is
1426    ///    such that, if we're resolving PyTorch, and we choose `torch==2.5.2+cpu`, we want to
1427    ///    fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't
1428    ///    include any macOS wheels).
1429    /// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user
1430    ///    might require that the generated resolution always includes wheels for x86 macOS, and
1431    ///    fails entirely if the platform is unsupported.
1432    fn fork_version_registry(
1433        &self,
1434        candidate: &Candidate,
1435        dist: &CompatibleDist,
1436        version_maps: &[VersionMap],
1437        package: &PubGrubPackage,
1438        id: Id<PubGrubPackage>,
1439        name: &PackageName,
1440        index: Option<&IndexUrl>,
1441        range: &Range<Version>,
1442        preferences: &Preferences,
1443        env: &ResolverEnvironment,
1444        pubgrub: &State<UvDependencyProvider>,
1445        pins: &mut FilePins,
1446        request_sink: &Sender<Request>,
1447    ) -> Result<Option<ResolverVersion>, ResolveError> {
1448        // This only applies to universal resolutions.
1449        if env.marker_environment().is_some() {
1450            return Ok(None);
1451        }
1452
1453        // If the package is already compatible with all environments (as is the case for
1454        // packages that include a source distribution), we don't need to fork.
1455        if dist.implied_markers().is_true() {
1456            return Ok(None);
1457        }
1458
1459        // If the caller marked an environment as requiring artifact coverage, ensure it has
1460        // coverage.
1461        for marker in self.options.artifact_environments.iter().copied() {
1462            // If the platform is part of the current environment...
1463            if env.included_by_marker(marker) {
1464                // But isn't supported by the distribution...
1465                if dist.implied_markers().is_disjoint(marker)
1466                    && !find_environments(id, pubgrub).is_disjoint(marker)
1467                {
1468                    // Then we need to fork.
1469                    let Some((left, right)) = fork_version_by_marker(env, marker) else {
1470                        return Ok(Some(ResolverVersion::Unavailable(
1471                            candidate.version().clone(),
1472                            UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1473                                IncompatibleWheel::MissingPlatform(marker),
1474                            )),
1475                        )));
1476                    };
1477
1478                    debug!(
1479                        "Forking on required platform `{}` for {}=={} ({})",
1480                        marker.try_to_string().unwrap_or_else(|| "true".to_string()),
1481                        name,
1482                        candidate.version(),
1483                        [&left, &right]
1484                            .iter()
1485                            .map(ToString::to_string)
1486                            .collect::<Vec<_>>()
1487                            .join(", ")
1488                    );
1489                    let forks = vec![
1490                        VersionFork {
1491                            env: left,
1492                            id,
1493                            version: None,
1494                        },
1495                        VersionFork {
1496                            env: right,
1497                            id,
1498                            version: None,
1499                        },
1500                    ];
1501                    return Ok(Some(ResolverVersion::Forked(forks)));
1502                }
1503            }
1504        }
1505
1506        // For now, we only apply this to local versions.
1507        if !candidate.version().is_local() {
1508            return Ok(None);
1509        }
1510
1511        debug!(
1512            "Looking at local version: {}=={}",
1513            name,
1514            candidate.version()
1515        );
1516
1517        // If there's a non-local version...
1518        let range = range.clone().intersection(&Range::singleton(
1519            candidate.version().clone().without_local(),
1520        ));
1521
1522        let Some(base_candidate) = self.selector.select(
1523            name,
1524            &range,
1525            version_maps,
1526            preferences,
1527            &self.installed_packages,
1528            &self.exclusions,
1529            index,
1530            env,
1531            self.tags.as_ref(),
1532        ) else {
1533            return Ok(None);
1534        };
1535        let CandidateDist::Compatible(base_dist) = base_candidate.dist() else {
1536            return Ok(None);
1537        };
1538
1539        // ...and the non-local version has greater platform support...
1540        let mut remainder = {
1541            let mut remainder = base_dist.implied_markers();
1542            remainder.and(dist.implied_markers().negate());
1543            remainder
1544        };
1545        if remainder.is_false() {
1546            return Ok(None);
1547        }
1548
1549        // If the remainder isn't relevant to the current environment, there's no need to fork.
1550        // For example, if we're solving for `sys_platform == 'darwin'` but the remainder is
1551        // `sys_platform == 'linux'`, we don't need to fork.
1552        if !env.included_by_marker(remainder) {
1553            return Ok(None);
1554        }
1555
1556        // Similarly, if the local distribution is incompatible with the current environment, then
1557        // use the base distribution instead (but don't fork).
1558        if !env.included_by_marker(dist.implied_markers()) {
1559            let filename = match dist.for_installation() {
1560                ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1561                    .filename()
1562                    .unwrap_or(Cow::Borrowed("unknown filename")),
1563                ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1564                    .filename()
1565                    .unwrap_or(Cow::Borrowed("unknown filename")),
1566                ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1567            };
1568
1569            debug!(
1570                "Preferring non-local candidate: {}=={} [{}] ({})",
1571                name,
1572                base_candidate.version(),
1573                base_candidate.choice_kind(),
1574                filename,
1575            );
1576            self.visit_candidate(
1577                &base_candidate,
1578                base_dist,
1579                package,
1580                name,
1581                pins,
1582                request_sink,
1583            )?;
1584
1585            return Ok(Some(ResolverVersion::Unforked(
1586                base_candidate.version().clone(),
1587            )));
1588        }
1589
1590        // If the implied markers includes _some_ macOS environments, but the remainder doesn't,
1591        // then we can extend the implied markers to include _all_ macOS environments. Same goes for
1592        // Linux and Windows.
1593        //
1594        // The idea here is that the base version could support (e.g.) ARM macOS, but not Intel
1595        // macOS. But if _neither_ version supports Intel macOS, we'd rather use `sys_platform == 'darwin'`
1596        // instead of `sys_platform == 'darwin' and platform_machine == 'arm64'`, since it's much
1597        // simpler, and _neither_ version will succeed with Intel macOS anyway.
1598        for value in [
1599            arcstr::literal!("darwin"),
1600            arcstr::literal!("linux"),
1601            arcstr::literal!("win32"),
1602        ] {
1603            let sys_platform = MarkerTree::expression(MarkerExpression::String {
1604                key: MarkerValueString::SysPlatform,
1605                operator: MarkerOperator::Equal,
1606                value,
1607            });
1608            if dist.implied_markers().is_disjoint(sys_platform)
1609                && !remainder.is_disjoint(sys_platform)
1610            {
1611                remainder.or(sys_platform);
1612            }
1613        }
1614
1615        // Otherwise, we need to fork.
1616        let Some((base_env, local_env)) = fork_version_by_marker(env, remainder) else {
1617            return Ok(None);
1618        };
1619
1620        debug!(
1621            "Forking platform for {}=={} ({})",
1622            name,
1623            candidate.version(),
1624            [&base_env, &local_env]
1625                .iter()
1626                .map(ToString::to_string)
1627                .collect::<Vec<_>>()
1628                .join(", ")
1629        );
1630        self.visit_candidate(candidate, dist, package, name, pins, request_sink)?;
1631        self.visit_candidate(
1632            &base_candidate,
1633            base_dist,
1634            package,
1635            name,
1636            pins,
1637            request_sink,
1638        )?;
1639
1640        let forks = vec![
1641            VersionFork {
1642                env: base_env.clone(),
1643                id,
1644                version: Some(base_candidate.version().clone()),
1645            },
1646            VersionFork {
1647                env: local_env.clone(),
1648                id,
1649                version: Some(candidate.version().clone()),
1650            },
1651        ];
1652        Ok(Some(ResolverVersion::Forked(forks)))
1653    }
1654
1655    /// Visit a selected candidate.
1656    fn visit_candidate(
1657        &self,
1658        candidate: &Candidate,
1659        dist: &CompatibleDist,
1660        package: &PubGrubPackage,
1661        name: &PackageName,
1662        pins: &mut FilePins,
1663        request_sink: &Sender<Request>,
1664    ) -> Result<(), ResolveError> {
1665        // We want to return a package pinned to a specific version; but we _also_ want to
1666        // store the exact file that we selected to satisfy that version.
1667        pins.insert(candidate, dist);
1668
1669        // Emit a request to fetch the metadata for this version.
1670        if matches!(&**package, PubGrubPackageInner::Package { .. }) {
1671            if self.dependency_mode.is_transitive() {
1672                let dist = dist.for_resolution();
1673                if self.index.distributions().register(dist.distribution_id()) {
1674                    if name != dist.name() {
1675                        return Err(ResolveError::MismatchedPackageName {
1676                            request: "distribution",
1677                            expected: name.clone(),
1678                            actual: dist.name().clone(),
1679                        });
1680                    }
1681                    // Verify that the package is allowed under the hash-checking policy.
1682                    if !self
1683                        .hasher
1684                        .allows_package(candidate.name(), candidate.version())
1685                    {
1686                        return Err(ResolveError::UnhashedPackage(candidate.name().clone()));
1687                    }
1688
1689                    let request = Request::from(dist);
1690                    request_sink.blocking_send(request)?;
1691                }
1692            }
1693        }
1694
1695        Ok(())
1696    }
1697
1698    /// Check if the distribution is incompatible with the Python requirement, and if so, return
1699    /// the incompatibility.
1700    fn check_requires_python<'dist>(
1701        dist: &'dist CompatibleDist,
1702        python_requirement: &PythonRequirement,
1703    ) -> Option<(&'dist VersionSpecifiers, IncompatibleDist)> {
1704        let requires_python = dist.requires_python()?;
1705        if python_requirement.target().is_contained_by(requires_python) {
1706            None
1707        } else {
1708            let incompatibility = if matches!(dist, CompatibleDist::CompatibleWheel { .. }) {
1709                IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(
1710                    requires_python.clone(),
1711                    if python_requirement.installed() == python_requirement.target() {
1712                        PythonRequirementKind::Installed
1713                    } else {
1714                        PythonRequirementKind::Target
1715                    },
1716                ))
1717            } else {
1718                IncompatibleDist::Source(IncompatibleSource::RequiresPython(
1719                    requires_python.clone(),
1720                    if python_requirement.installed() == python_requirement.target() {
1721                        PythonRequirementKind::Installed
1722                    } else {
1723                        PythonRequirementKind::Target
1724                    },
1725                ))
1726            };
1727            Some((requires_python, incompatibility))
1728        }
1729    }
1730
1731    /// Given a candidate package and version, return its dependencies.
1732    #[instrument(skip_all, fields(%package, %version))]
1733    fn get_dependencies_forking(
1734        &self,
1735        id: Id<PubGrubPackage>,
1736        package: &PubGrubPackage,
1737        version: &Version,
1738        pins: &FilePins,
1739        fork_urls: &ForkUrls,
1740        env: &ResolverEnvironment,
1741        python_requirement: &PythonRequirement,
1742        pubgrub: &State<UvDependencyProvider>,
1743    ) -> Result<ForkedDependencies, ResolveError> {
1744        let result = self.get_dependencies(
1745            id,
1746            package,
1747            version,
1748            pins,
1749            fork_urls,
1750            env,
1751            python_requirement,
1752            pubgrub,
1753        );
1754        if env.marker_environment().is_some() {
1755            result.map(|deps| match deps {
1756                Dependencies::Available(deps) | Dependencies::Unforkable(deps) => {
1757                    ForkedDependencies::Unforked(deps)
1758                }
1759                Dependencies::Unavailable(err) => ForkedDependencies::Unavailable(err),
1760            })
1761        } else {
1762            Ok(result?.fork(env, python_requirement, &self.conflicts))
1763        }
1764    }
1765
1766    /// Given a candidate package and version, return its dependencies.
1767    #[instrument(skip_all, fields(%package, %version))]
1768    fn get_dependencies(
1769        &self,
1770        id: Id<PubGrubPackage>,
1771        package: &PubGrubPackage,
1772        version: &Version,
1773        pins: &FilePins,
1774        fork_urls: &ForkUrls,
1775        env: &ResolverEnvironment,
1776        python_requirement: &PythonRequirement,
1777        pubgrub: &State<UvDependencyProvider>,
1778    ) -> Result<Dependencies, ResolveError> {
1779        let dependencies = match &**package {
1780            PubGrubPackageInner::Root(_) => {
1781                let no_dev_deps = BTreeMap::default();
1782                let requirements = self.flatten_requirements(
1783                    &self.requirements,
1784                    &no_dev_deps,
1785                    None,
1786                    None,
1787                    None,
1788                    env,
1789                    python_requirement,
1790                );
1791
1792                requirements
1793                    .flat_map(move |requirement| {
1794                        PubGrubDependency::from_requirement(
1795                            &self.conflicts,
1796                            requirement,
1797                            None,
1798                            Some(package),
1799                        )
1800                    })
1801                    .collect()
1802            }
1803
1804            PubGrubPackageInner::Package {
1805                name,
1806                extra,
1807                group,
1808                marker: _,
1809            } => {
1810                // If we're excluding transitive dependencies, short-circuit.
1811                if self.dependency_mode.is_direct() {
1812                    return Ok(Dependencies::Unforkable(Vec::default()));
1813                }
1814
1815                // Look up the distribution ID from the pins (common case) or fork URLs.
1816                let owned_id;
1817                let distribution_id = if let Some((_, metadata_id)) =
1818                    pins.dist_and_id(name, version)
1819                {
1820                    metadata_id
1821                } else if let Some(url) = fork_urls.get(name) {
1822                    let dist = Dist::from_url(name.clone(), url.clone())?;
1823                    owned_id = dist.distribution_id();
1824                    &owned_id
1825                } else {
1826                    debug_assert!(
1827                        false,
1828                        "Dependencies were requested for a package without a pinned distribution"
1829                    );
1830                    return Err(ResolveError::UnregisteredTask(format!("{name}=={version}")));
1831                };
1832
1833                // If the package does not exist in the registry or locally, we cannot fetch its dependencies
1834                if self.dependency_mode.is_transitive()
1835                    && self.unavailable_packages.get(name).is_some()
1836                    && self.installed_packages.get_packages(name).is_empty()
1837                {
1838                    debug_assert!(
1839                        false,
1840                        "Dependencies were requested for a package that is not available"
1841                    );
1842                    return Err(ResolveError::PackageUnavailable(name.clone()));
1843                }
1844
1845                // Wait for the metadata to be available.
1846                let response = self
1847                    .index
1848                    .distributions()
1849                    .wait_blocking(distribution_id)
1850                    .ok_or_else(|| ResolveError::UnregisteredTask(format!("{name}=={version}")))?;
1851
1852                let metadata = match &*response {
1853                    MetadataResponse::Found(archive) => &archive.metadata,
1854                    MetadataResponse::Unavailable(reason) => {
1855                        let unavailable_version = UnavailableVersion::from(reason);
1856                        let message = unavailable_version.singular_message();
1857                        if let Some(err) = reason.source() {
1858                            // Show the detailed error for metadata parse errors.
1859                            warn!("{name} {message}: {err}");
1860                        } else {
1861                            warn!("{name} {message}");
1862                        }
1863                        self.incomplete_packages
1864                            .entry(name.clone())
1865                            .or_default()
1866                            .insert(version.clone(), reason.clone());
1867                        return Ok(Dependencies::Unavailable(unavailable_version));
1868                    }
1869                    MetadataResponse::Error(dist, err) => {
1870                        let chain = DerivationChainBuilder::from_state(id, version, pubgrub)
1871                            .unwrap_or_default();
1872                        return Err(ResolveError::Dist(
1873                            DistErrorKind::from_requested_dist(dist, &**err),
1874                            dist.clone(),
1875                            chain,
1876                            err.clone(),
1877                        ));
1878                    }
1879                };
1880
1881                // If there was no requires-python on the index page, we may have an incompatible
1882                // distribution.
1883                if let Some(requires_python) = &metadata.requires_python {
1884                    if !python_requirement.target().is_contained_by(requires_python) {
1885                        return Ok(Dependencies::Unavailable(
1886                            UnavailableVersion::RequiresPython(requires_python.clone()),
1887                        ));
1888                    }
1889                }
1890
1891                // Identify any system dependencies based on the index URL.
1892                let system_dependencies = self
1893                    .options
1894                    .torch_backend
1895                    .as_ref()
1896                    .filter(|torch_backend| matches!(torch_backend, TorchStrategy::Cuda { .. }))
1897                    .filter(|torch_backend| torch_backend.has_system_dependency(name))
1898                    .and_then(|_| pins.get(name, version).and_then(ResolvedDist::index))
1899                    .map(IndexUrl::url)
1900                    .and_then(SystemDependency::from_index)
1901                    .into_iter()
1902                    .inspect(|system_dependency| {
1903                        debug!(
1904                            "Adding system dependency `{}` for `{package}@{version}`",
1905                            system_dependency
1906                        );
1907                    })
1908                    .map(PubGrubDependency::from);
1909
1910                let requirements = self.flatten_requirements(
1911                    &metadata.requires_dist,
1912                    &metadata.dependency_groups,
1913                    extra.as_ref(),
1914                    group.as_ref(),
1915                    Some(name),
1916                    env,
1917                    python_requirement,
1918                );
1919
1920                requirements
1921                    .filter(|requirement| !self.excludes.contains(&requirement.name))
1922                    .flat_map(|requirement| {
1923                        PubGrubDependency::from_requirement(
1924                            &self.conflicts,
1925                            requirement,
1926                            group.as_ref(),
1927                            Some(package),
1928                        )
1929                    })
1930                    .chain(system_dependencies)
1931                    .collect()
1932            }
1933
1934            PubGrubPackageInner::Python(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1935
1936            PubGrubPackageInner::System(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1937
1938            // Add a dependency on both the marker and base package.
1939            PubGrubPackageInner::Marker { name, marker } => {
1940                return Ok(Dependencies::Unforkable(
1941                    [MarkerTree::TRUE, *marker]
1942                        .into_iter()
1943                        .map(move |marker| PubGrubDependency {
1944                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1945                                name: name.clone(),
1946                                extra: None,
1947                                group: None,
1948                                marker,
1949                            }),
1950                            version: Range::singleton(version.clone()),
1951                            parent: None,
1952                            source: DependencySource::Unspecified,
1953                        })
1954                        .collect(),
1955                ));
1956            }
1957
1958            // Add a dependency on both the extra and base package, with and without the marker.
1959            PubGrubPackageInner::Extra {
1960                name,
1961                extra,
1962                marker,
1963            } => {
1964                return Ok(Dependencies::Unforkable(
1965                    [MarkerTree::TRUE, *marker]
1966                        .into_iter()
1967                        .dedup()
1968                        .flat_map(move |marker| {
1969                            [None, Some(extra)]
1970                                .into_iter()
1971                                .map(move |extra| PubGrubDependency {
1972                                    package: PubGrubPackage::from(PubGrubPackageInner::Package {
1973                                        name: name.clone(),
1974                                        extra: extra.cloned(),
1975                                        group: None,
1976                                        marker,
1977                                    }),
1978                                    version: Range::singleton(version.clone()),
1979                                    parent: None,
1980                                    source: DependencySource::Unspecified,
1981                                })
1982                        })
1983                        .collect(),
1984                ));
1985            }
1986
1987            // Add a dependency on the dependency group, with and without the marker.
1988            PubGrubPackageInner::Group {
1989                name,
1990                group,
1991                marker,
1992            } => {
1993                return Ok(Dependencies::Unforkable(
1994                    [MarkerTree::TRUE, *marker]
1995                        .into_iter()
1996                        .dedup()
1997                        .map(|marker| PubGrubDependency {
1998                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1999                                name: name.clone(),
2000                                extra: None,
2001                                group: Some(group.clone()),
2002                                marker,
2003                            }),
2004                            version: Range::singleton(version.clone()),
2005                            parent: None,
2006                            source: DependencySource::Unspecified,
2007                        })
2008                        .collect(),
2009                ));
2010            }
2011        };
2012        Ok(Dependencies::Available(dependencies))
2013    }
2014
2015    /// The regular and dev dependencies filtered by Python version and the markers of this fork,
2016    /// plus the extras dependencies of the current package (e.g., `black` depending on
2017    /// `black[colorama]`).
2018    fn flatten_requirements<'a>(
2019        &'a self,
2020        dependencies: &'a [Requirement],
2021        dev_dependencies: &'a BTreeMap<GroupName, Box<[Requirement]>>,
2022        extra: Option<&'a ExtraName>,
2023        dev: Option<&'a GroupName>,
2024        name: Option<&PackageName>,
2025        env: &'a ResolverEnvironment,
2026        python_requirement: &'a PythonRequirement,
2027    ) -> impl Iterator<Item = Cow<'a, Requirement>> {
2028        let python_marker = python_requirement.to_marker_tree();
2029
2030        if let Some(dev) = dev {
2031            // Dependency groups can include the project itself, so no need to flatten recursive
2032            // dependencies.
2033            Either::Left(Either::Left(self.requirements_for_extra(
2034                dev_dependencies.get(dev).into_iter().flatten(),
2035                extra,
2036                env,
2037                python_marker,
2038                python_requirement,
2039            )))
2040        } else if !dependencies
2041            .iter()
2042            .any(|req| name == Some(&req.name) && !req.extras.is_empty())
2043        {
2044            // If the project doesn't define any recursive dependencies, take the fast path.
2045            Either::Left(Either::Right(self.requirements_for_extra(
2046                dependencies.iter(),
2047                extra,
2048                env,
2049                python_marker,
2050                python_requirement,
2051            )))
2052        } else {
2053            let mut requirements = self
2054                .requirements_for_extra(
2055                    dependencies.iter(),
2056                    extra,
2057                    env,
2058                    python_marker,
2059                    python_requirement,
2060                )
2061                .collect::<Vec<_>>();
2062
2063            // Transitively process all extras that are recursively included, starting with the current
2064            // extra.
2065            let mut seen = FxHashSet::<(ExtraName, MarkerTree)>::default();
2066            let mut queue: VecDeque<_> = requirements
2067                .iter()
2068                .filter(|req| name == Some(&req.name))
2069                .flat_map(|req| req.extras.iter().cloned().map(|extra| (extra, req.marker)))
2070                .collect();
2071            while let Some((extra, marker)) = queue.pop_front() {
2072                if !seen.insert((extra.clone(), marker)) {
2073                    continue;
2074                }
2075                for requirement in self.requirements_for_extra(
2076                    dependencies,
2077                    Some(&extra),
2078                    env,
2079                    python_marker,
2080                    python_requirement,
2081                ) {
2082                    let requirement = match requirement {
2083                        Cow::Owned(mut requirement) => {
2084                            requirement.marker.and(marker);
2085                            requirement
2086                        }
2087                        Cow::Borrowed(requirement) => {
2088                            let mut marker = marker;
2089                            marker.and(requirement.marker);
2090                            Requirement {
2091                                name: requirement.name.clone(),
2092                                extras: requirement.extras.clone(),
2093                                groups: requirement.groups.clone(),
2094                                source: requirement.source.clone(),
2095                                origin: requirement.origin.clone(),
2096                                marker: marker.simplify_extras(slice::from_ref(&extra)),
2097                            }
2098                        }
2099                    };
2100                    if name == Some(&requirement.name) {
2101                        // Add each transitively included extra.
2102                        queue.extend(
2103                            requirement
2104                                .extras
2105                                .iter()
2106                                .cloned()
2107                                .map(|extra| (extra, requirement.marker)),
2108                        );
2109                    } else {
2110                        // Add the requirements for that extra.
2111                        requirements.push(Cow::Owned(requirement));
2112                    }
2113                }
2114            }
2115
2116            // Retain any self-constraints for that extra, e.g., if `project[foo]` includes
2117            // `project[bar]>1.0`, as a dependency, we need to propagate `project>1.0`, in addition to
2118            // transitively expanding `project[bar]`.
2119            let mut self_constraints = vec![];
2120            for req in &requirements {
2121                if name == Some(&req.name) && !req.source.is_empty() {
2122                    self_constraints.push(Requirement {
2123                        name: req.name.clone(),
2124                        extras: Box::new([]),
2125                        groups: req.groups.clone(),
2126                        source: req.source.clone(),
2127                        origin: req.origin.clone(),
2128                        marker: req.marker,
2129                    });
2130                }
2131            }
2132
2133            // Drop all the self-requirements now that we flattened them out.
2134            requirements.retain(|req| name != Some(&req.name) || req.extras.is_empty());
2135            requirements.extend(self_constraints.into_iter().map(Cow::Owned));
2136
2137            Either::Right(requirements.into_iter())
2138        }
2139    }
2140
2141    /// The set of the regular and dev dependencies, filtered by Python version,
2142    /// the markers of this fork and the requested extra.
2143    fn requirements_for_extra<'data, 'parameters>(
2144        &'data self,
2145        dependencies: impl IntoIterator<Item = &'data Requirement> + 'parameters,
2146        extra: Option<&'parameters ExtraName>,
2147        env: &'parameters ResolverEnvironment,
2148        python_marker: MarkerTree,
2149        python_requirement: &'parameters PythonRequirement,
2150    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2151    where
2152        'data: 'parameters,
2153    {
2154        self.overrides
2155            .apply(dependencies)
2156            .filter(move |requirement| {
2157                Self::is_requirement_applicable(
2158                    requirement,
2159                    extra,
2160                    env,
2161                    python_marker,
2162                    python_requirement,
2163                )
2164            })
2165            .flat_map(move |requirement| {
2166                iter::once(requirement.clone()).chain(self.constraints_for_requirement(
2167                    requirement,
2168                    extra,
2169                    env,
2170                    python_marker,
2171                    python_requirement,
2172                ))
2173            })
2174    }
2175
2176    /// Whether a requirement is applicable for the Python version, the markers of this fork and the
2177    /// requested extra.
2178    fn is_requirement_applicable(
2179        requirement: &Requirement,
2180        extra: Option<&ExtraName>,
2181        env: &ResolverEnvironment,
2182        python_marker: MarkerTree,
2183        python_requirement: &PythonRequirement,
2184    ) -> bool {
2185        // If the requirement isn't relevant for the current platform, skip it.
2186        match extra {
2187            Some(source_extra) => {
2188                // Only include requirements that are relevant for the current extra.
2189                if requirement.evaluate_markers(env.marker_environment(), &[]) {
2190                    return false;
2191                }
2192                if !requirement
2193                    .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2194                {
2195                    return false;
2196                }
2197                if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2198                {
2199                    return false;
2200                }
2201            }
2202            None => {
2203                if !requirement.evaluate_markers(env.marker_environment(), &[]) {
2204                    return false;
2205                }
2206            }
2207        }
2208
2209        // If the requirement would not be selected with any Python version
2210        // supported by the root, skip it.
2211        if python_marker.is_disjoint(requirement.marker) {
2212            trace!(
2213                "Skipping {requirement} because of Requires-Python: {requires_python}",
2214                requires_python = python_requirement.target(),
2215            );
2216            return false;
2217        }
2218
2219        // If we're in a fork in universal mode, ignore any dependency that isn't part of
2220        // this fork (but will be part of another fork).
2221        if !env.included_by_marker(requirement.marker) {
2222            trace!("Skipping {requirement} because of {env}");
2223            return false;
2224        }
2225
2226        true
2227    }
2228
2229    /// The constraints applicable to the requirement, filtered by Python version, the markers of
2230    /// this fork and the requested extra.
2231    fn constraints_for_requirement<'data, 'parameters>(
2232        &'data self,
2233        requirement: Cow<'data, Requirement>,
2234        extra: Option<&'parameters ExtraName>,
2235        env: &'parameters ResolverEnvironment,
2236        python_marker: MarkerTree,
2237        python_requirement: &'parameters PythonRequirement,
2238    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2239    where
2240        'data: 'parameters,
2241    {
2242        self.constraints
2243            .get(&requirement.name)
2244            .into_iter()
2245            .flatten()
2246            .filter_map(move |constraint| {
2247                // If the requirement would not be selected with any Python version
2248                // supported by the root, skip it.
2249                let constraint = if constraint.marker.is_true() {
2250                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2251                    // and the constraint is `requests ; python_version == '3.6'`, the
2252                    // constraint should only apply when _both_ markers are true.
2253                    if requirement.marker.is_true() {
2254                        Cow::Borrowed(constraint)
2255                    } else {
2256                        let mut marker = constraint.marker;
2257                        marker.and(requirement.marker);
2258
2259                        if marker.is_false() {
2260                            trace!(
2261                                "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2262                                constraint.marker.try_to_string().unwrap(),
2263                                requirement.marker.try_to_string().unwrap(),
2264                            );
2265                            return None;
2266                        }
2267
2268                        Cow::Owned(Requirement {
2269                            name: constraint.name.clone(),
2270                            extras: constraint.extras.clone(),
2271                            groups: constraint.groups.clone(),
2272                            source: constraint.source.clone(),
2273                            origin: constraint.origin.clone(),
2274                            marker,
2275                        })
2276                    }
2277                } else {
2278                    let requires_python = python_requirement.target();
2279
2280                    let mut marker = constraint.marker;
2281                    marker.and(requirement.marker);
2282
2283                    if marker.is_false() {
2284                        trace!(
2285                            "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2286                            constraint.marker.try_to_string().unwrap(),
2287                            requirement.marker.try_to_string().unwrap(),
2288                        );
2289                        return None;
2290                    }
2291
2292                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2293                    // and the constraint is `requests ; python_version == '3.6'`, the
2294                    // constraint should only apply when _both_ markers are true.
2295                    if python_marker.is_disjoint(marker) {
2296                        trace!(
2297                            "Skipping constraint {requirement} because of Requires-Python: {requires_python}"
2298                        );
2299                        return None;
2300                    }
2301
2302                    if marker == constraint.marker {
2303                        Cow::Borrowed(constraint)
2304                    } else {
2305                        Cow::Owned(Requirement {
2306                            name: constraint.name.clone(),
2307                            extras: constraint.extras.clone(),
2308                            groups: constraint.groups.clone(),
2309                            source: constraint.source.clone(),
2310                            origin: constraint.origin.clone(),
2311                            marker,
2312                        })
2313                    }
2314                };
2315
2316                // If we're in a fork in universal mode, ignore any dependency that isn't part of
2317                // this fork (but will be part of another fork).
2318                if !env.included_by_marker(constraint.marker) {
2319                    trace!("Skipping {constraint} because of {env}");
2320                    return None;
2321                }
2322
2323                // If the constraint isn't relevant for the current platform, skip it.
2324                match extra {
2325                    Some(source_extra) => {
2326                        if !constraint
2327                            .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2328                        {
2329                            return None;
2330                        }
2331                        if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2332                        {
2333                            return None;
2334                        }
2335                    }
2336                    None => {
2337                        if !constraint.evaluate_markers(env.marker_environment(), &[]) {
2338                            return None;
2339                        }
2340                    }
2341                }
2342
2343                Some(constraint)
2344            })
2345    }
2346
2347    /// Fetch the metadata for a stream of packages and versions.
2348    async fn fetch<Provider: ResolverProvider>(
2349        self: Arc<Self>,
2350        provider: Arc<Provider>,
2351        request_stream: Receiver<Request>,
2352    ) -> Result<(), ResolveError> {
2353        let mut response_stream = ReceiverStream::new(request_stream)
2354            .map(|request| self.process_request(request, &*provider).boxed_local())
2355            // Allow as many futures as possible to start in the background.
2356            // Backpressure is provided by at a more granular level by `DistributionDatabase`
2357            // and `SourceDispatch`, as well as the bounded request channel.
2358            .buffer_unordered(usize::MAX);
2359
2360        while let Some(response) = response_stream.next().await {
2361            match response? {
2362                Some(Response::Package(name, index, version_map)) => {
2363                    trace!("Received package metadata for: {name}");
2364                    if let Some(index) = index {
2365                        self.index
2366                            .explicit()
2367                            .done((name, index), Arc::new(version_map));
2368                    } else {
2369                        self.index.implicit().done(name, Arc::new(version_map));
2370                    }
2371                }
2372                Some(Response::Installed { dist, metadata }) => {
2373                    trace!("Received installed distribution metadata for: {dist}");
2374                    self.index
2375                        .distributions()
2376                        .done(dist.distribution_id(), Arc::new(metadata));
2377                }
2378                Some(Response::Dist { dist, metadata }) => {
2379                    let dist_kind = match dist {
2380                        Dist::Built(_) => "built",
2381                        Dist::Source(_) => "source",
2382                    };
2383                    trace!("Received {dist_kind} distribution metadata for: {dist}");
2384                    if let MetadataResponse::Unavailable(reason) = &metadata {
2385                        let message = UnavailableVersion::from(reason).singular_message();
2386                        if let Some(err) = reason.source() {
2387                            // Show the detailed error for metadata parse errors.
2388                            warn!("{dist} {message}: {err}");
2389                        } else {
2390                            warn!("{dist} {message}");
2391                        }
2392                    }
2393                    self.index
2394                        .distributions()
2395                        .done(dist.distribution_id(), Arc::new(metadata));
2396                }
2397                None => {}
2398            }
2399        }
2400
2401        Ok::<(), ResolveError>(())
2402    }
2403
2404    #[instrument(skip_all, fields(%request))]
2405    async fn process_request<Provider: ResolverProvider>(
2406        &self,
2407        request: Request,
2408        provider: &Provider,
2409    ) -> Result<Option<Response>, ResolveError> {
2410        match request {
2411            // Fetch package metadata from the registry.
2412            Request::Package(package_name, index) => {
2413                let package_versions = provider
2414                    .get_package_versions(&package_name, index.as_ref())
2415                    .boxed_local()
2416                    .await
2417                    .map_err(ResolveError::Client)?;
2418
2419                Ok(Some(Response::Package(
2420                    package_name,
2421                    index.map(IndexMetadata::into_url),
2422                    package_versions,
2423                )))
2424            }
2425
2426            // Fetch distribution metadata from the distribution database.
2427            Request::Dist(dist) => {
2428                if let Some(version) = dist.version() {
2429                    if let Some(index) = dist.index() {
2430                        // Check the implicit indexes for pre-provided metadata.
2431                        let versions_response = self.index.implicit().get(dist.name());
2432                        if let Some(VersionsResponse::Found(version_maps)) =
2433                            versions_response.as_deref()
2434                        {
2435                            for version_map in version_maps {
2436                                if version_map.index() == Some(index) {
2437                                    let Some(metadata) = version_map.get_metadata(version) else {
2438                                        continue;
2439                                    };
2440                                    debug!("Found registry-provided metadata for: {dist}");
2441                                    return Ok(Some(Response::Dist {
2442                                        dist,
2443                                        metadata: MetadataResponse::Found(
2444                                            ArchiveMetadata::from_metadata23(metadata.clone()),
2445                                        ),
2446                                    }));
2447                                }
2448                            }
2449                        }
2450
2451                        // Check the explicit indexes for pre-provided metadata.
2452                        let versions_response = self
2453                            .index
2454                            .explicit()
2455                            .get(&(dist.name().clone(), index.clone()));
2456                        if let Some(VersionsResponse::Found(version_maps)) =
2457                            versions_response.as_deref()
2458                        {
2459                            for version_map in version_maps {
2460                                let Some(metadata) = version_map.get_metadata(version) else {
2461                                    continue;
2462                                };
2463                                debug!("Found registry-provided metadata for: {dist}");
2464                                return Ok(Some(Response::Dist {
2465                                    dist,
2466                                    metadata: MetadataResponse::Found(
2467                                        ArchiveMetadata::from_metadata23(metadata.clone()),
2468                                    ),
2469                                }));
2470                            }
2471                        }
2472                    }
2473                }
2474
2475                let metadata = provider
2476                    .get_or_build_wheel_metadata(&dist)
2477                    .boxed_local()
2478                    .await?;
2479
2480                if let MetadataResponse::Found(metadata) = &metadata {
2481                    if &metadata.metadata.name != dist.name() {
2482                        return Err(ResolveError::MismatchedPackageName {
2483                            request: "distribution metadata",
2484                            expected: dist.name().clone(),
2485                            actual: metadata.metadata.name.clone(),
2486                        });
2487                    }
2488                }
2489
2490                Ok(Some(Response::Dist { dist, metadata }))
2491            }
2492
2493            Request::Installed(dist) => {
2494                let metadata = provider.get_installed_metadata(&dist).boxed_local().await?;
2495
2496                if let MetadataResponse::Found(metadata) = &metadata {
2497                    if &metadata.metadata.name != dist.name() {
2498                        return Err(ResolveError::MismatchedPackageName {
2499                            request: "installed metadata",
2500                            expected: dist.name().clone(),
2501                            actual: metadata.metadata.name.clone(),
2502                        });
2503                    }
2504                }
2505
2506                Ok(Some(Response::Installed { dist, metadata }))
2507            }
2508
2509            // Pre-fetch the package and distribution metadata.
2510            Request::Prefetch(package_name, range, python_requirement) => {
2511                // Wait for the package metadata to become available.
2512                let versions_response = self
2513                    .index
2514                    .implicit()
2515                    .wait(&package_name)
2516                    .await
2517                    .ok_or_else(|| ResolveError::UnregisteredTask(package_name.to_string()))?;
2518
2519                let version_map = match *versions_response {
2520                    VersionsResponse::Found(ref version_map) => version_map,
2521                    // Short-circuit if we did not find any versions for the package
2522                    VersionsResponse::NoIndex => {
2523                        self.unavailable_packages
2524                            .insert(package_name.clone(), UnavailablePackage::NoIndex);
2525
2526                        return Ok(None);
2527                    }
2528                    VersionsResponse::Offline => {
2529                        self.unavailable_packages
2530                            .insert(package_name.clone(), UnavailablePackage::Offline);
2531
2532                        return Ok(None);
2533                    }
2534                    VersionsResponse::NotFound => {
2535                        self.unavailable_packages
2536                            .insert(package_name.clone(), UnavailablePackage::NotFound);
2537
2538                        return Ok(None);
2539                    }
2540                };
2541
2542                // We don't have access to the fork state when prefetching, so assume that
2543                // pre-release versions are allowed.
2544                let env = ResolverEnvironment::universal(vec![]);
2545
2546                // Try to find a compatible version. If there aren't any compatible versions,
2547                // short-circuit.
2548                let Some(candidate) = self.selector.select(
2549                    &package_name,
2550                    &range,
2551                    version_map,
2552                    &self.preferences,
2553                    &self.installed_packages,
2554                    &self.exclusions,
2555                    None,
2556                    &env,
2557                    self.tags.as_ref(),
2558                ) else {
2559                    return Ok(None);
2560                };
2561
2562                // If there is not a compatible distribution, short-circuit.
2563                let Some(dist) = candidate.compatible() else {
2564                    return Ok(None);
2565                };
2566
2567                // If the registry provided metadata for this distribution, use it.
2568                for version_map in version_map {
2569                    if let Some(metadata) = version_map.get_metadata(candidate.version()) {
2570                        let dist = dist.for_resolution();
2571                        if version_map.index() == dist.index() {
2572                            debug!("Found registry-provided metadata for: {dist}");
2573
2574                            let metadata = MetadataResponse::Found(
2575                                ArchiveMetadata::from_metadata23(metadata.clone()),
2576                            );
2577
2578                            let dist = dist.to_owned();
2579                            if &package_name != dist.name() {
2580                                return Err(ResolveError::MismatchedPackageName {
2581                                    request: "distribution",
2582                                    expected: package_name,
2583                                    actual: dist.name().clone(),
2584                                });
2585                            }
2586
2587                            let response = match dist {
2588                                ResolvedDist::Installable { dist, .. } => Response::Dist {
2589                                    dist: (*dist).clone(),
2590                                    metadata,
2591                                },
2592                                ResolvedDist::Installed { dist } => Response::Installed {
2593                                    dist: (*dist).clone(),
2594                                    metadata,
2595                                },
2596                            };
2597
2598                            return Ok(Some(response));
2599                        }
2600                    }
2601                }
2602
2603                // Avoid prefetching source distributions with unbounded lower-bound ranges. This
2604                // often leads to failed attempts to build legacy versions of packages that are
2605                // incompatible with modern build tools.
2606                if dist.wheel().is_none() {
2607                    if !self.selector.use_highest_version(&package_name, &env) {
2608                        if let Some((lower, _)) = range.iter().next() {
2609                            if lower == &Bound::Unbounded {
2610                                debug!(
2611                                    "Skipping prefetch for unbounded minimum-version range: {package_name} ({range})"
2612                                );
2613                                return Ok(None);
2614                            }
2615                        }
2616                    }
2617                }
2618
2619                // Validate the Python requirement.
2620                let requires_python = match dist {
2621                    CompatibleDist::InstalledDist(_) => None,
2622                    CompatibleDist::SourceDist { sdist, .. }
2623                    | CompatibleDist::IncompatibleWheel { sdist, .. } => {
2624                        sdist.file.requires_python.as_ref()
2625                    }
2626                    CompatibleDist::CompatibleWheel { wheel, .. } => {
2627                        wheel.file.requires_python.as_ref()
2628                    }
2629                };
2630                if let Some(requires_python) = requires_python.as_ref() {
2631                    if !python_requirement.target().is_contained_by(requires_python) {
2632                        return Ok(None);
2633                    }
2634                }
2635
2636                // Verify that the package is allowed under the hash-checking policy.
2637                if !self
2638                    .hasher
2639                    .allows_package(candidate.name(), candidate.version())
2640                {
2641                    return Ok(None);
2642                }
2643
2644                // Emit a request to fetch the metadata for this version.
2645                let dist = dist.for_resolution();
2646                if self.index.distributions().register(dist.distribution_id()) {
2647                    let dist = dist.to_owned();
2648                    if &package_name != dist.name() {
2649                        return Err(ResolveError::MismatchedPackageName {
2650                            request: "distribution",
2651                            expected: package_name,
2652                            actual: dist.name().clone(),
2653                        });
2654                    }
2655
2656                    let response = match dist {
2657                        ResolvedDist::Installable { dist, .. } => {
2658                            let metadata = provider
2659                                .get_or_build_wheel_metadata(&dist)
2660                                .boxed_local()
2661                                .await?;
2662
2663                            Response::Dist {
2664                                dist: (*dist).clone(),
2665                                metadata,
2666                            }
2667                        }
2668                        ResolvedDist::Installed { dist } => {
2669                            let metadata =
2670                                provider.get_installed_metadata(&dist).boxed_local().await?;
2671
2672                            Response::Installed {
2673                                dist: (*dist).clone(),
2674                                metadata,
2675                            }
2676                        }
2677                    };
2678
2679                    Ok(Some(response))
2680                } else {
2681                    Ok(None)
2682                }
2683            }
2684        }
2685    }
2686
2687    fn convert_no_solution_err(
2688        &self,
2689        mut err: pubgrub::NoSolutionError<UvDependencyProvider>,
2690        fork_urls: ForkUrls,
2691        fork_indexes: ForkIndexes,
2692        env: ResolverEnvironment,
2693        current_environment: MarkerEnvironment,
2694        visited: &FxHashSet<PackageName>,
2695    ) -> ResolveError {
2696        err = NoSolutionError::collapse_local_version_segments(NoSolutionError::collapse_proxies(
2697            err,
2698        ));
2699
2700        let mut unavailable_packages = FxHashMap::default();
2701        for package in err.packages() {
2702            if let PubGrubPackageInner::Package { name, .. } = &**package {
2703                if let Some(reason) = self.unavailable_packages.get(name) {
2704                    unavailable_packages.insert(name.clone(), reason.clone());
2705                }
2706            }
2707        }
2708
2709        let mut incomplete_packages = FxHashMap::default();
2710        for package in err.packages() {
2711            if let PubGrubPackageInner::Package { name, .. } = &**package {
2712                if let Some(versions) = self.incomplete_packages.get(name) {
2713                    for entry in versions.iter() {
2714                        let (version, reason) = entry.pair();
2715                        incomplete_packages
2716                            .entry(name.clone())
2717                            .or_insert_with(BTreeMap::default)
2718                            .insert(version.clone(), reason.clone());
2719                    }
2720                }
2721            }
2722        }
2723
2724        let mut available_indexes = FxHashMap::default();
2725        let mut included_versions = FxHashMap::default();
2726        let mut available_versions = FxHashMap::default();
2727
2728        let available_version_cutoff: Option<jiff::Timestamp> =
2729            std::env::var(EnvVars::UV_TEST_AVAILABLE_VERSION_CUTOFF)
2730                .ok()
2731                .and_then(|s| s.parse().ok());
2732
2733        for package in err.packages() {
2734            let Some(name) = package.name() else { continue };
2735            if !visited.contains(name) {
2736                // Avoid including version data for packages that exist in the derivation
2737                // tree, but were never visited during resolution. We _may_ have metadata for
2738                // these packages, but it's non-deterministic, and omitting them ensures that
2739                // we represent the state of the resolver at the time of failure.
2740                continue;
2741            }
2742            let versions_response = if let Some(index) = fork_indexes.get(name) {
2743                self.index
2744                    .explicit()
2745                    .get(&(name.clone(), index.url().clone()))
2746            } else {
2747                self.index.implicit().get(name)
2748            };
2749            if let Some(response) = versions_response {
2750                if let VersionsResponse::Found(ref version_maps) = *response {
2751                    // Track included and available versions, across all indexes.
2752                    for version_map in version_maps {
2753                        let package_included_versions = included_versions
2754                            .entry(name.clone())
2755                            .or_insert_with(BTreeSet::new);
2756                        let package_available_versions = available_versions
2757                            .entry(name.clone())
2758                            .or_insert_with(BTreeSet::new);
2759
2760                        for (version, dists) in version_map.iter(&Ranges::full()) {
2761                            // Included versions are those that survive the effective
2762                            // `exclude-newer` filter used during resolution. Files with
2763                            // missing upload times are treated as excluded (matching
2764                            // the resolution behavior in `version_map.rs`).
2765                            let excluded_from_included = || {
2766                                let Some(included_version_cutoff) =
2767                                    version_map.included_version_cutoff()
2768                                else {
2769                                    return false;
2770                                };
2771                                let Some(prioritized_dist) = dists.prioritized_dist() else {
2772                                    return true;
2773                                };
2774                                prioritized_dist.files().all(|file| {
2775                                    file.upload_time_utc_ms.is_none_or(|upload_time| {
2776                                        upload_time >= included_version_cutoff.as_millisecond()
2777                                    })
2778                                })
2779                            };
2780
2781                            if !excluded_from_included() {
2782                                package_included_versions.insert(version.clone());
2783                            }
2784
2785                            // Available versions are used in resolver error reporting,
2786                            // and can be bounded by a test-only cutoff for deterministic
2787                            // snapshots. Files with missing upload times are *not*
2788                            // excluded, since we only filter versions we can confirm
2789                            // were published after the cutoff.
2790                            let excluded_from_available = || {
2791                                let Some(ref exclude_newer) = available_version_cutoff else {
2792                                    return false;
2793                                };
2794                                let Some(prioritized_dist) = dists.prioritized_dist() else {
2795                                    return false;
2796                                };
2797                                prioritized_dist.files().all(|file| {
2798                                    file.upload_time_utc_ms.is_some_and(|upload_time| {
2799                                        upload_time >= exclude_newer.as_millisecond()
2800                                    })
2801                                })
2802                            };
2803
2804                            if !excluded_from_available() {
2805                                package_available_versions.insert(version.clone());
2806                            }
2807                        }
2808                    }
2809
2810                    // Track the indexes in which the package is available.
2811                    available_indexes
2812                        .entry(name.clone())
2813                        .or_insert(BTreeSet::new())
2814                        .extend(
2815                            version_maps
2816                                .iter()
2817                                .filter_map(|version_map| version_map.index().cloned()),
2818                        );
2819                }
2820            }
2821        }
2822
2823        ResolveError::NoSolution(Box::new(NoSolutionError::new(
2824            err,
2825            self.index.clone(),
2826            included_versions,
2827            available_versions,
2828            available_indexes,
2829            self.selector.clone(),
2830            self.python_requirement.clone(),
2831            self.locations.clone(),
2832            self.capabilities.clone(),
2833            unavailable_packages,
2834            incomplete_packages,
2835            fork_urls,
2836            fork_indexes,
2837            env,
2838            current_environment,
2839            self.tags.clone(),
2840            self.workspace_members.clone(),
2841            self.options.clone(),
2842        )))
2843    }
2844
2845    fn on_progress(&self, package: &PubGrubPackage, version: &Version) {
2846        if let Some(reporter) = self.reporter.as_ref() {
2847            match &**package {
2848                PubGrubPackageInner::Root(_) => {}
2849                PubGrubPackageInner::Python(_) => {}
2850                PubGrubPackageInner::System(_) => {}
2851                PubGrubPackageInner::Marker { .. } => {}
2852                PubGrubPackageInner::Extra { .. } => {}
2853                PubGrubPackageInner::Group { .. } => {}
2854                PubGrubPackageInner::Package { name, .. } => {
2855                    reporter.on_progress(name, &VersionOrUrlRef::Version(version));
2856                }
2857            }
2858        }
2859    }
2860
2861    fn on_complete(&self) {
2862        if let Some(reporter) = self.reporter.as_ref() {
2863            reporter.on_complete();
2864        }
2865    }
2866}
2867
2868/// State that is used during unit propagation in the resolver, one instance per fork.
2869#[derive(Clone)]
2870pub(crate) struct ForkState {
2871    /// The internal state used by the resolver.
2872    ///
2873    /// Note that not all parts of this state are strictly internal. For
2874    /// example, the edges in the dependency graph generated as part of the
2875    /// output of resolution are derived from the "incompatibilities" tracked
2876    /// in this state. We also ultimately retrieve the final set of version
2877    /// assignments (to packages) from this state's "partial solution."
2878    pubgrub: State<UvDependencyProvider>,
2879    /// The initial package to select. If set, the first iteration over this state will avoid
2880    /// asking PubGrub for the highest-priority package, and will instead use the provided package.
2881    initial_id: Option<Id<PubGrubPackage>>,
2882    /// The initial version to select. If set, the first iteration over this state will avoid
2883    /// asking PubGrub for the highest-priority version, and will instead use the provided version.
2884    initial_version: Option<Version>,
2885    /// The next package on which to run unit propagation.
2886    next: Id<PubGrubPackage>,
2887    /// The set of pinned versions we accrue throughout resolution.
2888    ///
2889    /// The key of this map is a package name, and each package name maps to
2890    /// a set of versions for that package. Each version in turn is mapped
2891    /// to the concrete distribution selected for installation, along with the
2892    /// concrete distribution whose metadata was used during resolution.
2893    /// After resolution is finished, this map is consulted to recover both the
2894    /// locked artifact and the metadata backing the resolved dependency edges.
2895    pins: FilePins,
2896    /// Ensure we don't have duplicate URLs in any branch.
2897    ///
2898    /// Unlike [`Urls`], we add only the URLs we have seen in this branch, and there can be only
2899    /// one URL per package. By prioritizing direct URL dependencies over registry dependencies,
2900    /// this map is populated for all direct URL packages before we look at any registry packages.
2901    fork_urls: ForkUrls,
2902    /// Ensure we don't have duplicate indexes in any branch.
2903    ///
2904    /// Unlike [`Indexes`], we add only the indexes we have seen in this branch, and there can be
2905    /// only one index per package.
2906    fork_indexes: ForkIndexes,
2907    /// When dependencies for a package are retrieved, this map of priorities
2908    /// is updated based on how each dependency was specified. Certain types
2909    /// of dependencies have more "priority" than others (like direct URL
2910    /// dependencies). These priorities help determine which package to
2911    /// consider next during resolution.
2912    priorities: PubGrubPriorities,
2913    /// This keeps track of the set of versions for each package that we've
2914    /// already visited during resolution. This avoids doing redundant work.
2915    added_dependencies: FxHashMap<Id<PubGrubPackage>, FxHashSet<Version>>,
2916    /// The marker expression that created this state.
2917    ///
2918    /// The root state always corresponds to a marker expression that is always
2919    /// `true` for every `MarkerEnvironment`.
2920    ///
2921    /// In non-universal mode, forking never occurs and so this marker
2922    /// expression is always `true`.
2923    ///
2924    /// Whenever dependencies are fetched, all requirement specifications
2925    /// are checked for disjointness with the marker expression of the fork
2926    /// in which those dependencies were fetched. If a requirement has a
2927    /// completely disjoint marker expression (i.e., it can never be true given
2928    /// that the marker expression that provoked the fork is true), then that
2929    /// dependency is completely ignored.
2930    env: ResolverEnvironment,
2931    /// The Python requirement for this fork. Defaults to the Python requirement for
2932    /// the resolution, but may be narrowed if a `python_version` marker is present
2933    /// in a given fork.
2934    ///
2935    /// For example, in:
2936    /// ```text
2937    /// numpy >=1.26 ; python_version >= "3.9"
2938    /// numpy <1.26 ; python_version < "3.9"
2939    /// ```
2940    ///
2941    /// The top fork has a narrower Python compatibility range, and thus can find a
2942    /// solution that omits Python 3.8 support.
2943    python_requirement: PythonRequirement,
2944    conflict_tracker: ConflictTracker,
2945    /// Prefetch package versions for packages with many rejected versions.
2946    ///
2947    /// Tracked on the fork state to avoid counting each identical version between forks as new try.
2948    prefetcher: BatchPrefetcher,
2949}
2950
2951impl ForkState {
2952    fn new(
2953        pubgrub: State<UvDependencyProvider>,
2954        env: ResolverEnvironment,
2955        python_requirement: PythonRequirement,
2956        prefetcher: BatchPrefetcher,
2957    ) -> Self {
2958        Self {
2959            initial_id: None,
2960            initial_version: None,
2961            next: pubgrub.root_package,
2962            pubgrub,
2963            pins: FilePins::default(),
2964            fork_urls: ForkUrls::default(),
2965            fork_indexes: ForkIndexes::default(),
2966            priorities: PubGrubPriorities::default(),
2967            added_dependencies: FxHashMap::default(),
2968            env,
2969            python_requirement,
2970            conflict_tracker: ConflictTracker::default(),
2971            prefetcher,
2972        }
2973    }
2974
2975    /// Visit the dependencies for the selected version of the current package, incorporating any
2976    /// relevant URLs and pinned indexes into the [`ForkState`].
2977    fn visit_package_version_dependencies(
2978        &mut self,
2979        for_package: Id<PubGrubPackage>,
2980        for_version: &Version,
2981        urls: &Urls,
2982        indexes: &Indexes,
2983        dependencies: &[PubGrubDependency],
2984        git: &GitResolver,
2985        workspace_members: &BTreeSet<PackageName>,
2986        resolution_strategy: &ResolutionStrategy,
2987    ) -> Result<(), ResolveError> {
2988        for dependency in dependencies {
2989            let PubGrubDependency {
2990                package,
2991                version,
2992                parent: _,
2993                source,
2994            } = dependency;
2995
2996            let mut has_url = false;
2997            if let Some(name) = package.name() {
2998                // From the [`Requirement`] to [`PubGrubDependency`] conversion, we get a URL if the
2999                // requirement was a URL requirement. `Urls` applies canonicalization to this and
3000                // override URLs to both URL and registry requirements, which we then check for
3001                // conflicts using [`ForkUrl`].
3002                for url in urls.get_url(&self.env, name, source.verbatim_url(), git)? {
3003                    self.fork_urls.insert(name, url, &self.env)?;
3004                    has_url = true;
3005                }
3006
3007                if let Some(index) = source.explicit_index() {
3008                    self.fork_indexes.insert(name, index, &self.env)?;
3009                }
3010
3011                // If the package is pinned to an exact index, add it to the fork.
3012                for index in indexes.get(name, &self.env) {
3013                    self.fork_indexes.insert(name, index, &self.env)?;
3014                }
3015            }
3016
3017            if let Some(name) = self.pubgrub.package_store[for_package]
3018                .name_no_root()
3019                .filter(|name| !workspace_members.contains(name))
3020            {
3021                debug!(
3022                    "Adding transitive dependency for {name}=={for_version}: {package}{version}"
3023                );
3024            } else {
3025                // A dependency from the root package or `requirements.txt`.
3026                debug!("Adding direct dependency: {package}{version}");
3027
3028                // Warn the user if a direct dependency lacks a lower bound in `--lowest` resolution.
3029                let missing_lower_bound = version
3030                    .bounding_range()
3031                    .map(|(lowest, _highest)| lowest == Bound::Unbounded)
3032                    .unwrap_or(true);
3033                let strategy_lowest = matches!(
3034                    resolution_strategy,
3035                    ResolutionStrategy::Lowest | ResolutionStrategy::LowestDirect(..)
3036                );
3037
3038                if !has_url && missing_lower_bound && strategy_lowest {
3039                    let name = package.name_no_root().unwrap();
3040                    // Handle cases where a package is listed both without and with a lower bound.
3041                    // Example:
3042                    // ```
3043                    // "coverage[toml] ; python_version < '3.11'",
3044                    // "coverage >= 7.10.0",
3045                    // ```
3046                    let bound_on_other_package = dependencies.iter().any(|other| {
3047                        Some(name) == other.package.name()
3048                            && !other
3049                                .version
3050                                .bounding_range()
3051                                .map(|(lowest, _highest)| lowest == Bound::Unbounded)
3052                                .unwrap_or(true)
3053                    });
3054
3055                    if !bound_on_other_package {
3056                        warn_user_once!(
3057                            "The direct dependency `{name}` is unpinned. \
3058                            Consider setting a lower bound when using `--resolution lowest` \
3059                            or `--resolution lowest-direct` to avoid using outdated versions.",
3060                        );
3061                    }
3062                }
3063            }
3064
3065            // Update the package priorities.
3066            self.priorities.insert(package, version, &self.fork_urls);
3067            // As we're adding an incompatibility from the proxy package to the base package,
3068            // we need to register the base package.
3069            if let Some(base_package) = package.base_package() {
3070                self.priorities
3071                    .insert(&base_package, version, &self.fork_urls);
3072            }
3073        }
3074
3075        Ok(())
3076    }
3077
3078    /// Add the dependencies for the selected version of the current package.
3079    fn add_package_version_dependencies(
3080        &mut self,
3081        for_package: Id<PubGrubPackage>,
3082        for_version: &Version,
3083        dependencies: Vec<PubGrubDependency>,
3084    ) {
3085        for dependency in &dependencies {
3086            let PubGrubDependency {
3087                package,
3088                version,
3089                parent: _,
3090                source: _,
3091            } = dependency;
3092
3093            let Some(base_package) = package.base_package() else {
3094                continue;
3095            };
3096
3097            let proxy_package = self.pubgrub.package_store.alloc(package.clone());
3098            let base_package_id = self.pubgrub.package_store.alloc(base_package.clone());
3099            self.pubgrub
3100                .add_proxy_package(proxy_package, base_package_id, version.clone());
3101        }
3102
3103        let conflict = self.pubgrub.add_package_version_dependencies(
3104            self.next,
3105            for_version.clone(),
3106            dependencies.into_iter().map(|dependency| {
3107                let PubGrubDependency {
3108                    package,
3109                    version,
3110                    parent: _,
3111                    source: _,
3112                } = dependency;
3113                (package, version)
3114            }),
3115        );
3116
3117        // Conflict tracking: If the version was rejected due to its dependencies, record culprit
3118        // and affected.
3119        if let Some(incompatibility) = conflict {
3120            self.record_conflict(for_package, Some(for_version), incompatibility);
3121        }
3122    }
3123
3124    fn record_conflict(
3125        &mut self,
3126        affected: Id<PubGrubPackage>,
3127        version: Option<&Version>,
3128        incompatibility: IncompId<PubGrubPackage, Ranges<Version>, UnavailableReason>,
3129    ) {
3130        let mut culprit_is_real = false;
3131        for (incompatible, _term) in self.pubgrub.incompatibility_store[incompatibility].iter() {
3132            if incompatible == affected {
3133                continue;
3134            }
3135            if self.pubgrub.package_store[affected].name()
3136                == self.pubgrub.package_store[incompatible].name()
3137            {
3138                // Don't track conflicts between a marker package and the main package, when the
3139                // marker is "copying" the obligations from the main package through conflicts.
3140                continue;
3141            }
3142            culprit_is_real = true;
3143            let culprit_count = self
3144                .conflict_tracker
3145                .culprit
3146                .entry(incompatible)
3147                .or_default();
3148            *culprit_count += 1;
3149            if *culprit_count == CONFLICT_THRESHOLD {
3150                self.conflict_tracker.deprioritize.push(incompatible);
3151            }
3152        }
3153        // Don't track conflicts between a marker package and the main package, when the
3154        // marker is "copying" the obligations from the main package through conflicts.
3155        if culprit_is_real {
3156            if tracing::enabled!(Level::DEBUG) {
3157                let incompatibility = self.pubgrub.incompatibility_store[incompatibility]
3158                    .iter()
3159                    .map(|(package, _term)| &self.pubgrub.package_store[package])
3160                    .join(", ");
3161                if let Some(version) = version {
3162                    debug!(
3163                        "Recording dependency conflict of {}=={} from incompatibility of ({})",
3164                        self.pubgrub.package_store[affected], version, incompatibility
3165                    );
3166                } else {
3167                    debug!(
3168                        "Recording unit propagation conflict of {} from incompatibility of ({})",
3169                        self.pubgrub.package_store[affected], incompatibility
3170                    );
3171                }
3172            }
3173
3174            let affected_count = self.conflict_tracker.affected.entry(self.next).or_default();
3175            *affected_count += 1;
3176            if *affected_count == CONFLICT_THRESHOLD {
3177                self.conflict_tracker.prioritize.push(self.next);
3178            }
3179        }
3180    }
3181
3182    fn add_unavailable_version(&mut self, version: Version, reason: UnavailableVersion) {
3183        // Incompatible requires-python versions are special in that we track
3184        // them as incompatible dependencies instead of marking the package version
3185        // as unavailable directly.
3186        if let UnavailableVersion::IncompatibleDist(
3187            IncompatibleDist::Source(IncompatibleSource::RequiresPython(requires_python, kind))
3188            | IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(requires_python, kind)),
3189        ) = reason
3190        {
3191            let package = &self.next;
3192            let python = self.pubgrub.package_store.alloc(PubGrubPackage::from(
3193                PubGrubPackageInner::Python(match kind {
3194                    PythonRequirementKind::Installed => PubGrubPython::Installed,
3195                    PythonRequirementKind::Target => PubGrubPython::Target,
3196                }),
3197            ));
3198            self.pubgrub
3199                .add_incompatibility(Incompatibility::from_dependency(
3200                    *package,
3201                    Range::singleton(version.clone()),
3202                    (python, release_specifiers_to_ranges(requires_python)),
3203                ));
3204            self.pubgrub
3205                .partial_solution
3206                .add_decision(self.next, version);
3207            return;
3208        }
3209        self.pubgrub
3210            .add_incompatibility(Incompatibility::custom_version(
3211                self.next,
3212                version.clone(),
3213                UnavailableReason::Version(reason),
3214            ));
3215    }
3216
3217    /// Subset the current markers with the new markers and update the python requirements fields
3218    /// accordingly.
3219    ///
3220    /// If the fork should be dropped (e.g., because its markers can never be true for its
3221    /// Python requirement), then this returns `None`.
3222    fn with_env(mut self, env: ResolverEnvironment) -> Self {
3223        self.env = env;
3224        // If the fork contains a narrowed Python requirement, apply it.
3225        if let Some(req) = self.env.narrow_python_requirement(&self.python_requirement) {
3226            debug!("Narrowed `requires-python` bound to: {}", req.target());
3227            self.python_requirement = req;
3228        }
3229        self
3230    }
3231
3232    /// Returns the URL or index for a package and version.
3233    ///
3234    /// In practice, exactly one of the returned values will be `Some`.
3235    fn source(
3236        &self,
3237        name: &PackageName,
3238        version: &Version,
3239    ) -> (Option<&VerbatimParsedUrl>, Option<&IndexUrl>) {
3240        let url = self.fork_urls.get(name);
3241        let index = url
3242            .is_none()
3243            .then(|| {
3244                self.pins
3245                    .get(name, version)
3246                    .expect("Every package should be pinned")
3247                    .index()
3248            })
3249            .flatten();
3250        (url, index)
3251    }
3252
3253    fn into_resolution(self) -> Resolution {
3254        let solution: FxHashMap<_, _> = self.pubgrub.partial_solution.extract_solution().collect();
3255        let edge_count: usize = solution
3256            .keys()
3257            .map(|package| self.pubgrub.incompatibilities[package].len())
3258            .sum();
3259        let mut edges: Vec<ResolutionDependencyEdge> = Vec::with_capacity(edge_count);
3260        for (package, self_version) in &solution {
3261            for id in &self.pubgrub.incompatibilities[package] {
3262                let pubgrub::Kind::FromDependencyOf(
3263                    self_package,
3264                    ref self_range,
3265                    dependency_package,
3266                    ref dependency_range,
3267                ) = self.pubgrub.incompatibility_store[*id].kind
3268                else {
3269                    continue;
3270                };
3271                if *package != self_package {
3272                    continue;
3273                }
3274                if !self_range.contains(self_version) {
3275                    continue;
3276                }
3277                let Some(dependency_version) = solution.get(&dependency_package) else {
3278                    continue;
3279                };
3280                if !dependency_range.contains(dependency_version) {
3281                    continue;
3282                }
3283
3284                let self_package = &self.pubgrub.package_store[self_package];
3285                let dependency_package = &self.pubgrub.package_store[dependency_package];
3286
3287                let (self_name, self_extra, self_group) = match &**self_package {
3288                    PubGrubPackageInner::Package {
3289                        name: self_name,
3290                        extra: self_extra,
3291                        group: self_group,
3292                        marker: _,
3293                    } => (Some(self_name), self_extra.as_ref(), self_group.as_ref()),
3294
3295                    PubGrubPackageInner::Root(_) => (None, None, None),
3296
3297                    _ => continue,
3298                };
3299
3300                let (self_url, self_index) = self_name
3301                    .map(|self_name| self.source(self_name, self_version))
3302                    .unwrap_or((None, None));
3303
3304                match **dependency_package {
3305                    PubGrubPackageInner::Package {
3306                        name: ref dependency_name,
3307                        extra: ref dependency_extra,
3308                        group: ref dependency_dev,
3309                        marker: ref dependency_marker,
3310                    } => {
3311                        debug_assert!(
3312                            dependency_extra.is_none(),
3313                            "Packages should depend on an extra proxy"
3314                        );
3315                        debug_assert!(
3316                            dependency_dev.is_none(),
3317                            "Packages should depend on a group proxy"
3318                        );
3319
3320                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3321                        // but allow groups to depend on other groups, or on the package itself.
3322                        if self_group.is_none() {
3323                            if self_name == Some(dependency_name) {
3324                                continue;
3325                            }
3326                        }
3327
3328                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3329
3330                        let edge = ResolutionDependencyEdge {
3331                            from: self_name.cloned(),
3332                            from_version: self_version.clone(),
3333                            from_url: self_url.cloned(),
3334                            from_index: self_index.cloned(),
3335                            from_extra: self_extra.cloned(),
3336                            from_group: self_group.cloned(),
3337                            to: dependency_name.clone(),
3338                            to_version: dependency_version.clone(),
3339                            to_url: to_url.cloned(),
3340                            to_index: to_index.cloned(),
3341                            to_extra: dependency_extra.clone(),
3342                            to_group: dependency_dev.clone(),
3343                            marker: *dependency_marker,
3344                        };
3345                        edges.push(edge);
3346                    }
3347
3348                    PubGrubPackageInner::Marker {
3349                        name: ref dependency_name,
3350                        marker: ref dependency_marker,
3351                    } => {
3352                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3353                        // but allow groups to depend on other groups, or on the package itself.
3354                        if self_group.is_none() {
3355                            if self_name == Some(dependency_name) {
3356                                continue;
3357                            }
3358                        }
3359
3360                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3361
3362                        let edge = ResolutionDependencyEdge {
3363                            from: self_name.cloned(),
3364                            from_version: self_version.clone(),
3365                            from_url: self_url.cloned(),
3366                            from_index: self_index.cloned(),
3367                            from_extra: self_extra.cloned(),
3368                            from_group: self_group.cloned(),
3369                            to: dependency_name.clone(),
3370                            to_version: dependency_version.clone(),
3371                            to_url: to_url.cloned(),
3372                            to_index: to_index.cloned(),
3373                            to_extra: None,
3374                            to_group: None,
3375                            marker: *dependency_marker,
3376                        };
3377                        edges.push(edge);
3378                    }
3379
3380                    PubGrubPackageInner::Extra {
3381                        name: ref dependency_name,
3382                        extra: ref dependency_extra,
3383                        marker: ref dependency_marker,
3384                    } => {
3385                        if self_group.is_none() {
3386                            debug_assert!(
3387                                self_name != Some(dependency_name),
3388                                "Extras should be flattened"
3389                            );
3390                        }
3391                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3392
3393                        // Insert an edge from the dependent package to the extra package.
3394                        let edge = ResolutionDependencyEdge {
3395                            from: self_name.cloned(),
3396                            from_version: self_version.clone(),
3397                            from_url: self_url.cloned(),
3398                            from_index: self_index.cloned(),
3399                            from_extra: self_extra.cloned(),
3400                            from_group: self_group.cloned(),
3401                            to: dependency_name.clone(),
3402                            to_version: dependency_version.clone(),
3403                            to_url: to_url.cloned(),
3404                            to_index: to_index.cloned(),
3405                            to_extra: Some(dependency_extra.clone()),
3406                            to_group: None,
3407                            marker: *dependency_marker,
3408                        };
3409                        edges.push(edge);
3410
3411                        // Insert an edge from the dependent package to the base package.
3412                        let edge = ResolutionDependencyEdge {
3413                            from: self_name.cloned(),
3414                            from_version: self_version.clone(),
3415                            from_url: self_url.cloned(),
3416                            from_index: self_index.cloned(),
3417                            from_extra: self_extra.cloned(),
3418                            from_group: self_group.cloned(),
3419                            to: dependency_name.clone(),
3420                            to_version: dependency_version.clone(),
3421                            to_url: to_url.cloned(),
3422                            to_index: to_index.cloned(),
3423                            to_extra: None,
3424                            to_group: None,
3425                            marker: *dependency_marker,
3426                        };
3427                        edges.push(edge);
3428                    }
3429
3430                    PubGrubPackageInner::Group {
3431                        name: ref dependency_name,
3432                        group: ref dependency_group,
3433                        marker: ref dependency_marker,
3434                    } => {
3435                        debug_assert!(
3436                            self_name != Some(dependency_name),
3437                            "Groups should be flattened"
3438                        );
3439
3440                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3441
3442                        // Add an edge from the dependent package to the dev package, but _not_ the
3443                        // base package.
3444                        let edge = ResolutionDependencyEdge {
3445                            from: self_name.cloned(),
3446                            from_version: self_version.clone(),
3447                            from_url: self_url.cloned(),
3448                            from_index: self_index.cloned(),
3449                            from_extra: self_extra.cloned(),
3450                            from_group: self_group.cloned(),
3451                            to: dependency_name.clone(),
3452                            to_version: dependency_version.clone(),
3453                            to_url: to_url.cloned(),
3454                            to_index: to_index.cloned(),
3455                            to_extra: None,
3456                            to_group: Some(dependency_group.clone()),
3457                            marker: *dependency_marker,
3458                        };
3459                        edges.push(edge);
3460                    }
3461
3462                    _ => {}
3463                }
3464            }
3465        }
3466
3467        let nodes = solution
3468            .into_iter()
3469            .filter_map(|(package, version)| {
3470                if let PubGrubPackageInner::Package {
3471                    name,
3472                    extra,
3473                    group,
3474                    marker: MarkerTree::TRUE,
3475                } = &*self.pubgrub.package_store[package]
3476                {
3477                    let (url, index) = self.source(name, &version);
3478                    Some((
3479                        ResolutionPackage {
3480                            name: name.clone(),
3481                            extra: extra.clone(),
3482                            dev: group.clone(),
3483                            url: url.cloned(),
3484                            index: index.cloned(),
3485                        },
3486                        version,
3487                    ))
3488                } else {
3489                    None
3490                }
3491            })
3492            .collect();
3493
3494        Resolution {
3495            nodes,
3496            edges,
3497            pins: self.pins,
3498            env: self.env,
3499        }
3500    }
3501}
3502
3503/// The resolution from a single fork including the virtual packages and the edges between them.
3504#[derive(Debug)]
3505pub(crate) struct Resolution {
3506    pub(crate) nodes: FxHashMap<ResolutionPackage, Version>,
3507    /// The directed connections between the nodes, where the marker is the node weight. We don't
3508    /// store the requirement itself, but it can be retrieved from the package metadata.
3509    pub(crate) edges: Vec<ResolutionDependencyEdge>,
3510    /// Map each package name, version tuple from `packages` to a distribution.
3511    pub(crate) pins: FilePins,
3512    /// The environment setting this resolution was found under.
3513    pub(crate) env: ResolverEnvironment,
3514}
3515
3516/// Package representation we used during resolution where each extra and also the dev-dependencies
3517/// group are their own package.
3518#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3519pub(crate) struct ResolutionPackage {
3520    pub(crate) name: PackageName,
3521    pub(crate) extra: Option<ExtraName>,
3522    pub(crate) dev: Option<GroupName>,
3523    /// For registry packages, this is `None`; otherwise, the direct URL of the distribution.
3524    pub(crate) url: Option<VerbatimParsedUrl>,
3525    /// For URL packages, this is `None`; otherwise, the index URL of the distribution.
3526    pub(crate) index: Option<IndexUrl>,
3527}
3528
3529/// The `from_` fields and the `to_` fields allow mapping to the originating and target
3530///  [`ResolutionPackage`] respectively. The `marker` is the edge weight.
3531#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3532pub(crate) struct ResolutionDependencyEdge {
3533    /// This value is `None` if the dependency comes from the root package.
3534    pub(crate) from: Option<PackageName>,
3535    pub(crate) from_version: Version,
3536    pub(crate) from_url: Option<VerbatimParsedUrl>,
3537    pub(crate) from_index: Option<IndexUrl>,
3538    pub(crate) from_extra: Option<ExtraName>,
3539    pub(crate) from_group: Option<GroupName>,
3540    pub(crate) to: PackageName,
3541    pub(crate) to_version: Version,
3542    pub(crate) to_url: Option<VerbatimParsedUrl>,
3543    pub(crate) to_index: Option<IndexUrl>,
3544    pub(crate) to_extra: Option<ExtraName>,
3545    pub(crate) to_group: Option<GroupName>,
3546    pub(crate) marker: MarkerTree,
3547}
3548
3549impl ResolutionDependencyEdge {
3550    pub(crate) fn universal_marker(&self) -> UniversalMarker {
3551        // We specifically do not account for conflict
3552        // markers here. Instead, those are computed via
3553        // a traversal on the resolution graph.
3554        UniversalMarker::new(self.marker, ConflictMarker::TRUE)
3555    }
3556}
3557
3558/// Fetch the metadata for an item
3559#[derive(Debug)]
3560#[expect(clippy::large_enum_variant)]
3561pub(crate) enum Request {
3562    /// A request to fetch the metadata for a package.
3563    Package(PackageName, Option<IndexMetadata>),
3564    /// A request to fetch the metadata for a built or source distribution.
3565    Dist(Dist),
3566    /// A request to fetch the metadata from an already-installed distribution.
3567    Installed(InstalledDist),
3568    /// A request to pre-fetch the metadata for a package and the best-guess distribution.
3569    Prefetch(PackageName, Range<Version>, PythonRequirement),
3570}
3571
3572impl<'a> From<ResolvedDistRef<'a>> for Request {
3573    fn from(dist: ResolvedDistRef<'a>) -> Self {
3574        // N.B. This is almost identical to `ResolvedDistRef::to_owned`, but
3575        // creates a `Request` instead of a `ResolvedDist`. There's probably
3576        // some room for DRYing this up a bit. The obvious way would be to
3577        // add a method to create a `Dist`, but a `Dist` cannot be represented
3578        // as an installed dist.
3579        match dist {
3580            ResolvedDistRef::InstallableRegistrySourceDist { sdist, prioritized } => {
3581                // This is okay because we're only here if the prioritized dist
3582                // has an sdist, so this always succeeds.
3583                let source = prioritized.source_dist().expect("a source distribution");
3584                assert_eq!(
3585                    (&sdist.name, &sdist.version),
3586                    (&source.name, &source.version),
3587                    "expected chosen sdist to match prioritized sdist"
3588                );
3589                Self::Dist(Dist::Source(SourceDist::Registry(source)))
3590            }
3591            ResolvedDistRef::InstallableRegistryBuiltDist {
3592                wheel, prioritized, ..
3593            } => {
3594                assert_eq!(
3595                    Some(&wheel.filename),
3596                    prioritized.best_wheel().map(|(wheel, _)| &wheel.filename),
3597                    "expected chosen wheel to match best wheel"
3598                );
3599                // This is okay because we're only here if the prioritized dist
3600                // has at least one wheel, so this always succeeds.
3601                let built = prioritized.built_dist().expect("at least one wheel");
3602                Self::Dist(Dist::Built(BuiltDist::Registry(built)))
3603            }
3604            ResolvedDistRef::Installed { dist } => Self::Installed(dist.clone()),
3605        }
3606    }
3607}
3608
3609impl Display for Request {
3610    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
3611        match self {
3612            Self::Package(package_name, _) => {
3613                write!(f, "Versions {package_name}")
3614            }
3615            Self::Dist(dist) => {
3616                write!(f, "Metadata {dist}")
3617            }
3618            Self::Installed(dist) => {
3619                write!(f, "Installed metadata {dist}")
3620            }
3621            Self::Prefetch(package_name, range, _) => {
3622                write!(f, "Prefetch {package_name} {range}")
3623            }
3624        }
3625    }
3626}
3627
3628#[derive(Debug)]
3629#[expect(clippy::large_enum_variant)]
3630enum Response {
3631    /// The returned metadata for a package hosted on a registry.
3632    Package(PackageName, Option<IndexUrl>, VersionsResponse),
3633    /// The returned metadata for a distribution.
3634    Dist {
3635        dist: Dist,
3636        metadata: MetadataResponse,
3637    },
3638    /// The returned metadata for an already-installed distribution.
3639    Installed {
3640        dist: InstalledDist,
3641        metadata: MetadataResponse,
3642    },
3643}
3644
3645/// Information about the dependencies for a particular package.
3646///
3647/// This effectively distills the dependency metadata of a package down into
3648/// its pubgrub specific constituent parts: each dependency package has a range
3649/// of possible versions.
3650enum Dependencies {
3651    /// Package dependencies are not available.
3652    Unavailable(UnavailableVersion),
3653    /// Container for all available package versions.
3654    ///
3655    /// Note that in universal mode, it is possible and allowed for multiple
3656    /// `PubGrubPackage` values in this list to have the same package name.
3657    /// These conflicts are resolved via `Dependencies::fork`.
3658    Available(Vec<PubGrubDependency>),
3659    /// Dependencies that should never result in a fork.
3660    ///
3661    /// For example, the dependencies of a `Marker` package will have the
3662    /// same name and version, but differ according to marker expressions.
3663    /// But we never want this to result in a fork.
3664    Unforkable(Vec<PubGrubDependency>),
3665}
3666
3667impl Dependencies {
3668    /// Turn this flat list of dependencies into a potential set of forked
3669    /// groups of dependencies.
3670    ///
3671    /// A fork *only* occurs when there are multiple dependencies with the same
3672    /// name *and* those dependency specifications have corresponding marker
3673    /// expressions that are completely disjoint with one another.
3674    fn fork(
3675        self,
3676        env: &ResolverEnvironment,
3677        python_requirement: &PythonRequirement,
3678        conflicts: &Conflicts,
3679    ) -> ForkedDependencies {
3680        let deps = match self {
3681            Self::Available(deps) => deps,
3682            Self::Unforkable(deps) => return ForkedDependencies::Unforked(deps),
3683            Self::Unavailable(err) => return ForkedDependencies::Unavailable(err),
3684        };
3685        let mut name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>> = BTreeMap::new();
3686        for dep in deps {
3687            let name = dep
3688                .package
3689                .name()
3690                .expect("dependency always has a name")
3691                .clone();
3692            name_to_deps.entry(name).or_default().push(dep);
3693        }
3694        let Forks {
3695            mut forks,
3696            diverging_packages,
3697        } = Forks::new(name_to_deps, env, python_requirement, conflicts);
3698        if forks.is_empty() {
3699            ForkedDependencies::Unforked(vec![])
3700        } else if forks.len() == 1 {
3701            ForkedDependencies::Unforked(forks.pop().unwrap().dependencies)
3702        } else {
3703            ForkedDependencies::Forked {
3704                forks,
3705                diverging_packages: diverging_packages.into_iter().collect(),
3706            }
3707        }
3708    }
3709}
3710
3711/// Information about the (possibly forked) dependencies for a particular
3712/// package.
3713///
3714/// This is like `Dependencies` but with an extra variant that only occurs when
3715/// a `Dependencies` list has multiple dependency specifications with the same
3716/// name and non-overlapping marker expressions (i.e., a fork occurs).
3717#[derive(Debug)]
3718enum ForkedDependencies {
3719    /// Package dependencies are not available.
3720    Unavailable(UnavailableVersion),
3721    /// No forking occurred.
3722    ///
3723    /// This is the same as `Dependencies::Available`.
3724    Unforked(Vec<PubGrubDependency>),
3725    /// Forked containers for all available package versions.
3726    ///
3727    /// Note that there is always at least two forks. If there would
3728    /// be fewer than 2 forks, then there is no fork at all and the
3729    /// `Unforked` variant is used instead.
3730    Forked {
3731        forks: Vec<Fork>,
3732        /// The package(s) with different requirements for disjoint markers.
3733        diverging_packages: Vec<PackageName>,
3734    },
3735}
3736
3737/// A list of forks determined from the dependencies of a single package.
3738///
3739/// Any time a marker expression is seen that is not true for all possible
3740/// marker environments, it is possible for it to introduce a new fork.
3741#[derive(Debug, Default)]
3742struct Forks {
3743    /// The forks discovered among the dependencies.
3744    forks: Vec<Fork>,
3745    /// The package(s) that provoked at least one additional fork.
3746    diverging_packages: BTreeSet<PackageName>,
3747}
3748
3749impl Forks {
3750    fn new(
3751        name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>>,
3752        env: &ResolverEnvironment,
3753        python_requirement: &PythonRequirement,
3754        conflicts: &Conflicts,
3755    ) -> Self {
3756        let python_marker = python_requirement.to_marker_tree();
3757
3758        let mut forks = vec![Fork::new(env.clone())];
3759        let mut diverging_packages = BTreeSet::new();
3760        for (name, mut deps) in name_to_deps {
3761            assert!(!deps.is_empty(), "every name has at least one dependency");
3762            // We never fork if there's only one dependency
3763            // specification for a given package name. This particular
3764            // strategy results in a "conservative" approach to forking
3765            // that gives up correctness in some cases in exchange for
3766            // more limited forking. More limited forking results in
3767            // simpler-and-easier-to-understand lock files and faster
3768            // resolving. The correctness we give up manifests when
3769            // two transitive non-sibling dependencies conflict. In
3770            // that case, we don't detect the fork ahead of time (at
3771            // present).
3772            if let [dep] = deps.as_slice() {
3773                // There's one exception: if the requirement increases the minimum-supported Python
3774                // version, we also fork in order to respect that minimum in the subsequent
3775                // resolution.
3776                //
3777                // For example, given `requires-python = ">=3.7"` and `uv ; python_version >= "3.8"`,
3778                // where uv itself only supports Python 3.8 and later, we need to fork to ensure
3779                // that the resolution can find a solution.
3780                if marker::requires_python(dep.package.marker())
3781                    .is_none_or(|bound| !python_requirement.raises(&bound))
3782                {
3783                    let dep = deps.pop().unwrap();
3784                    let marker = dep.package.marker();
3785                    for fork in &mut forks {
3786                        if fork.env.included_by_marker(marker) {
3787                            fork.add_dependency(dep.clone());
3788                        }
3789                    }
3790                    continue;
3791                }
3792            } else {
3793                // If all dependencies have the same markers, we should also avoid forking.
3794                if let Some(dep) = deps.first() {
3795                    let marker = dep.package.marker();
3796                    if deps.iter().all(|dep| marker == dep.package.marker()) {
3797                        // Unless that "same marker" is a Python requirement that is stricter than
3798                        // the current Python requirement. In that case, we need to fork to respect
3799                        // the stricter requirement.
3800                        if marker::requires_python(marker)
3801                            .is_none_or(|bound| !python_requirement.raises(&bound))
3802                        {
3803                            for dep in deps {
3804                                for fork in &mut forks {
3805                                    if fork.env.included_by_marker(marker) {
3806                                        fork.add_dependency(dep.clone());
3807                                    }
3808                                }
3809                            }
3810                            continue;
3811                        }
3812                    }
3813                }
3814            }
3815            for dep in deps {
3816                let mut forker = match ForkingPossibility::new(env, &dep) {
3817                    ForkingPossibility::Possible(forker) => forker,
3818                    ForkingPossibility::DependencyAlwaysExcluded => {
3819                        // If the markers can never be satisfied by the parent
3820                        // fork, then we can drop this dependency unceremoniously.
3821                        continue;
3822                    }
3823                    ForkingPossibility::NoForkingPossible => {
3824                        // Or, if the markers are always true, then we just
3825                        // add the dependency to every fork unconditionally.
3826                        for fork in &mut forks {
3827                            fork.add_dependency(dep.clone());
3828                        }
3829                        continue;
3830                    }
3831                };
3832                // Otherwise, we *should* need to add a new fork...
3833                diverging_packages.insert(name.clone());
3834
3835                let mut new = vec![];
3836                for fork in std::mem::take(&mut forks) {
3837                    let Some((remaining_forker, envs)) = forker.fork(&fork.env) else {
3838                        new.push(fork);
3839                        continue;
3840                    };
3841                    forker = remaining_forker;
3842
3843                    for fork_env in envs {
3844                        let mut new_fork = fork.clone();
3845                        new_fork.set_env(fork_env);
3846                        // We only add the dependency to this fork if it
3847                        // satisfies the fork's markers. Some forks are
3848                        // specifically created to exclude this dependency,
3849                        // so this isn't always true!
3850                        if forker.included(&new_fork.env) {
3851                            new_fork.add_dependency(dep.clone());
3852                        }
3853                        // Filter out any forks we created that are disjoint with our
3854                        // Python requirement.
3855                        if new_fork.env.included_by_marker(python_marker) {
3856                            new.push(new_fork);
3857                        }
3858                    }
3859                }
3860                forks = new;
3861            }
3862        }
3863        // When there is a conflicting group configuration, we need
3864        // to potentially add more forks. Each fork added contains an
3865        // exclusion list of conflicting groups where dependencies with
3866        // the corresponding package and extra name are forcefully
3867        // excluded from that group.
3868        //
3869        // We specifically iterate on conflicting groups and
3870        // potentially re-generate all forks for each one. We do it
3871        // this way in case there are multiple sets of conflicting
3872        // groups that impact the forks here.
3873        //
3874        // For example, if we have conflicting groups {x1, x2} and {x3,
3875        // x4}, we need to make sure the forks generated from one set
3876        // also account for the other set.
3877        for set in conflicts.iter() {
3878            let mut new = vec![];
3879            for fork in std::mem::take(&mut forks) {
3880                // Check if this conflict set is relevant to this fork. We need two conditions:
3881                //
3882                // 1. At least one item has dependencies in this fork (otherwise there's nothing to
3883                //    fork on).
3884                // 2. At least two items are not already excluded in this fork's environment
3885                //    (otherwise the conflict constraint is already satisfied and no fork is
3886                //    needed).
3887                let mut has_conflicting_dependency = false;
3888                for item in set.iter() {
3889                    if fork.contains_conflicting_item(item.as_ref()) {
3890                        has_conflicting_dependency = true;
3891                        diverging_packages.insert(item.package().clone());
3892                        break;
3893                    }
3894                }
3895                if !has_conflicting_dependency {
3896                    new.push(fork);
3897                    continue;
3898                }
3899
3900                // If fewer than two items in this conflict set are still possible (not already
3901                // excluded) in this fork, the conflict constraint is already satisfied by prior
3902                // forking. We can skip the full N+1 fork split if the single remaining non-excluded
3903                // item doesn't appear in any other conflict set (since it would never need its own
3904                // "excluded" variant).
3905                let non_excluded: Vec<_> = set
3906                    .iter()
3907                    .filter(|item| fork.env.included_by_group(item.as_ref()))
3908                    .collect();
3909                if non_excluded.len() < 2 {
3910                    // Check if any non-excluded item still has a live conflict in another set —
3911                    // i.e., another set where this item AND at least one other non-excluded item
3912                    // both appear. If so, we still need to fork to create the "excluded" variant
3913                    // for that item.
3914                    let dominated = non_excluded.iter().all(|item| {
3915                        !conflicts.iter().any(|other_set| {
3916                            !std::ptr::eq(set, other_set)
3917                                && other_set.contains(item.package(), item.kind().as_ref())
3918                                && other_set
3919                                    .iter()
3920                                    .filter(|other_item| {
3921                                        other_item.package() != item.package()
3922                                            || other_item.kind() != item.kind()
3923                                    })
3924                                    .any(|other_item| {
3925                                        fork.env.included_by_group(other_item.as_ref())
3926                                    })
3927                        })
3928                    });
3929                    if dominated {
3930                        // When dependencies are added to forks, we check `included_by_marker` but
3931                        // not on whether the dependency's conflict item is included by the fork's
3932                        // environment so there may be extraneous dependencies and we need to filter
3933                        // the fork to clean up dependencies gated on already-excluded extras.
3934                        let rules: Vec<_> = set
3935                            .iter()
3936                            .filter(|item| !fork.env.included_by_group(item.as_ref()))
3937                            .cloned()
3938                            .map(Err)
3939                            .collect();
3940                        if let Some(filtered) = fork.filter(rules) {
3941                            new.push(filtered);
3942                        }
3943                        continue;
3944                    }
3945                }
3946
3947                // Create a fork that excludes ALL conflicts.
3948                if let Some(fork_none) = fork.clone().filter(set.iter().cloned().map(Err)) {
3949                    new.push(fork_none);
3950                }
3951
3952                // Now create a fork for each conflicting group, where
3953                // that fork excludes every *other* conflicting group.
3954                //
3955                // So if we have conflicting extras foo, bar and baz,
3956                // then this creates three forks: one that excludes
3957                // {foo, bar}, one that excludes {foo, baz} and one
3958                // that excludes {bar, baz}.
3959                for (i, _) in set.iter().enumerate() {
3960                    let fork_allows_group = fork.clone().filter(
3961                        set.iter()
3962                            .cloned()
3963                            .enumerate()
3964                            .map(|(j, group)| if i == j { Ok(group) } else { Err(group) }),
3965                    );
3966                    if let Some(fork_allows_group) = fork_allows_group {
3967                        new.push(fork_allows_group);
3968                    }
3969                }
3970            }
3971            forks = new;
3972        }
3973        Self {
3974            forks,
3975            diverging_packages,
3976        }
3977    }
3978}
3979
3980/// A single fork in a list of dependencies.
3981///
3982/// A fork corresponds to the full list of dependencies for a package,
3983/// but with any conflicting dependency specifications omitted. For
3984/// example, if we have `a<2 ; sys_platform == 'foo'` and `a>=2 ;
3985/// sys_platform == 'bar'`, then because the dependency specifications
3986/// have the same name and because the marker expressions are disjoint,
3987/// a fork occurs. One fork will contain `a<2` but not `a>=2`, while
3988/// the other fork will contain `a>=2` but not `a<2`.
3989#[derive(Clone, Debug)]
3990struct Fork {
3991    /// The list of dependencies for this fork, guaranteed to be conflict
3992    /// free. (i.e., There are no two packages with the same name with
3993    /// non-overlapping marker expressions.)
3994    ///
3995    /// Note that callers shouldn't mutate this sequence directly. Instead,
3996    /// they should use `add_forked_package` or `add_nonfork_package`. Namely,
3997    /// it should be impossible for a package with a marker expression that is
3998    /// disjoint from the marker expression on this fork to be added.
3999    dependencies: Vec<PubGrubDependency>,
4000    /// The conflicting groups in this fork.
4001    ///
4002    /// This exists to make some access patterns more efficient. Namely,
4003    /// it makes it easy to check whether there's a dependency with a
4004    /// particular conflicting group in this fork.
4005    conflicts: crate::FxHashbrownSet<ConflictItem>,
4006    /// The resolver environment for this fork.
4007    ///
4008    /// Principally, this corresponds to the markers in this for. So in the
4009    /// example above, the `a<2` fork would have `sys_platform == 'foo'`, while
4010    /// the `a>=2` fork would have `sys_platform == 'bar'`.
4011    ///
4012    /// If this fork was generated from another fork, then this *includes*
4013    /// the criteria from its parent. i.e., Its marker expression represents
4014    /// the intersection of the marker expression from its parent and any
4015    /// additional marker expression generated by addition forking based on
4016    /// conflicting dependency specifications.
4017    env: ResolverEnvironment,
4018}
4019
4020impl Fork {
4021    /// Create a new fork with no dependencies with the given resolver
4022    /// environment.
4023    fn new(env: ResolverEnvironment) -> Self {
4024        Self {
4025            dependencies: vec![],
4026            conflicts: crate::FxHashbrownSet::default(),
4027            env,
4028        }
4029    }
4030
4031    /// Add a dependency to this fork.
4032    fn add_dependency(&mut self, dep: PubGrubDependency) {
4033        if let Some(conflicting_item) = dep.conflicting_item() {
4034            self.conflicts.insert(conflicting_item.to_owned());
4035        }
4036        self.dependencies.push(dep);
4037    }
4038
4039    /// Sets the resolver environment to the one given.
4040    ///
4041    /// Any dependency in this fork that does not satisfy the given environment
4042    /// is removed.
4043    fn set_env(&mut self, env: ResolverEnvironment) {
4044        self.env = env;
4045        self.dependencies.retain(|dep| {
4046            let marker = dep.package.marker();
4047            if self.env.included_by_marker(marker) {
4048                return true;
4049            }
4050            if let Some(conflicting_item) = dep.conflicting_item() {
4051                self.conflicts.remove(&conflicting_item);
4052            }
4053            false
4054        });
4055    }
4056
4057    /// Returns true if any of the dependencies in this fork contain a
4058    /// dependency with the given package and extra values.
4059    fn contains_conflicting_item(&self, item: ConflictItemRef<'_>) -> bool {
4060        self.conflicts.contains(&item)
4061    }
4062
4063    /// Include or Exclude the given groups from this fork.
4064    ///
4065    /// This removes all dependencies matching the given conflicting groups.
4066    ///
4067    /// If the exclusion rules would result in a fork with an unsatisfiable
4068    /// resolver environment, then this returns `None`.
4069    fn filter(
4070        mut self,
4071        rules: impl IntoIterator<Item = Result<ConflictItem, ConflictItem>>,
4072    ) -> Option<Self> {
4073        self.env = self.env.filter_by_group(rules)?;
4074        self.dependencies.retain(|dep| {
4075            let Some(conflicting_item) = dep.conflicting_item() else {
4076                return true;
4077            };
4078            if self.env.included_by_group(conflicting_item) {
4079                return true;
4080            }
4081            match conflicting_item.kind() {
4082                // We should not filter entire projects unless they're a top-level dependency
4083                // Otherwise, we'll fail to solve for children of the project, like extras
4084                ConflictKindRef::Project => {
4085                    if dep.parent.is_some() {
4086                        return true;
4087                    }
4088                }
4089                ConflictKindRef::Group(_) => {}
4090                ConflictKindRef::Extra(_) => {}
4091            }
4092            self.conflicts.remove(&conflicting_item);
4093            false
4094        });
4095        Some(self)
4096    }
4097
4098    /// Compare forks, preferring forks with g `requires-python` requirements.
4099    fn cmp_requires_python(&self, other: &Self) -> Ordering {
4100        // A higher `requires-python` requirement indicates a _higher-priority_ fork.
4101        //
4102        // This ordering ensures that we prefer choosing the highest version for each fork based on
4103        // its `requires-python` requirement.
4104        //
4105        // The reverse would prefer choosing fewer versions, at the cost of using older package
4106        // versions on newer Python versions. For example, if reversed, we'd prefer to solve `<3.7
4107        // before solving `>=3.7`, since the resolution produced by the former might work for the
4108        // latter, but the inverse is unlikely to be true.
4109        let self_bound = self.env.requires_python().unwrap_or_default();
4110        let other_bound = other.env.requires_python().unwrap_or_default();
4111        self_bound.lower().cmp(other_bound.lower())
4112    }
4113
4114    /// Compare forks, preferring forks with upper bounds.
4115    fn cmp_upper_bounds(&self, other: &Self) -> Ordering {
4116        // We'd prefer to solve `numpy <= 2` before solving `numpy >= 1`, since the resolution
4117        // produced by the former might work for the latter, but the inverse is unlikely to be true
4118        // due to maximum version selection. (Selecting `numpy==2.0.0` would satisfy both forks, but
4119        // selecting the latest `numpy` would not.)
4120        let self_upper_bounds = self
4121            .dependencies
4122            .iter()
4123            .filter(|dep| {
4124                dep.version
4125                    .bounding_range()
4126                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4127            })
4128            .count();
4129        let other_upper_bounds = other
4130            .dependencies
4131            .iter()
4132            .filter(|dep| {
4133                dep.version
4134                    .bounding_range()
4135                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4136            })
4137            .count();
4138
4139        self_upper_bounds.cmp(&other_upper_bounds)
4140    }
4141}
4142
4143impl Eq for Fork {}
4144
4145impl PartialEq for Fork {
4146    fn eq(&self, other: &Self) -> bool {
4147        self.dependencies == other.dependencies && self.env == other.env
4148    }
4149}
4150
4151#[derive(Debug, Clone)]
4152pub(crate) struct VersionFork {
4153    /// The environment to use in the fork.
4154    env: ResolverEnvironment,
4155    /// The initial package to select in the fork.
4156    id: Id<PubGrubPackage>,
4157    /// The initial version to set for the selected package in the fork.
4158    version: Option<Version>,
4159}
4160
4161/// Enrich a [`ResolveError`] with additional information about why a given package was included.
4162fn enrich_dependency_error(
4163    error: ResolveError,
4164    id: Id<PubGrubPackage>,
4165    version: &Version,
4166    pubgrub: &State<UvDependencyProvider>,
4167) -> ResolveError {
4168    let Some(name) = pubgrub.package_store[id].name_no_root() else {
4169        return error;
4170    };
4171    let chain = DerivationChainBuilder::from_state(id, version, pubgrub).unwrap_or_default();
4172    ResolveError::Dependencies(Box::new(error), name.clone(), version.clone(), chain)
4173}
4174
4175/// Compute the set of markers for which a package is known to be relevant.
4176fn find_environments(id: Id<PubGrubPackage>, state: &State<UvDependencyProvider>) -> MarkerTree {
4177    let package = &state.package_store[id];
4178    if package.is_root() {
4179        return MarkerTree::TRUE;
4180    }
4181
4182    // First, collect the reverse-dependency closure for the package. We limit the propagation
4183    // below to this subgraph so cycles in unrelated packages don't matter here.
4184    let mut ancestors = FxHashSet::default();
4185    let mut stack = vec![id];
4186    let mut root = None;
4187    ancestors.insert(id);
4188
4189    while let Some(current) = stack.pop() {
4190        let Some(incompatibilities) = state.incompatibilities.get(&current) else {
4191            continue;
4192        };
4193
4194        for index in incompatibilities {
4195            let incompat = &state.incompatibility_store[*index];
4196            if let Kind::FromDependencyOf(parent, _, child, _) = &incompat.kind {
4197                if current != *child {
4198                    continue;
4199                }
4200                if ancestors.insert(*parent) {
4201                    if state.package_store[*parent].is_root() {
4202                        root = Some(*parent);
4203                    }
4204                    stack.push(*parent);
4205                }
4206            }
4207        }
4208    }
4209
4210    let Some(root) = root else {
4211        return MarkerTree::FALSE;
4212    };
4213
4214    // Propagate markers forward from the root through the collected subgraph. This reaches a
4215    // fixpoint even in the presence of cycles, unlike the recursive reverse walk above.
4216    let mut environments = FxHashMap::default();
4217    let mut queue = VecDeque::from([root]);
4218    environments.insert(root, MarkerTree::TRUE);
4219
4220    while let Some(current) = queue.pop_front() {
4221        let Some(current_environment) = environments.get(&current).copied() else {
4222            continue;
4223        };
4224        let Some(incompatibilities) = state.incompatibilities.get(&current) else {
4225            continue;
4226        };
4227
4228        for index in incompatibilities {
4229            let incompat = &state.incompatibility_store[*index];
4230            let Kind::FromDependencyOf(parent, _, child, _) = &incompat.kind else {
4231                continue;
4232            };
4233            if current != *parent || !ancestors.contains(child) {
4234                continue;
4235            }
4236
4237            let mut next_environment = state.package_store[*child].marker();
4238            next_environment.and(current_environment);
4239
4240            let entry = environments.entry(*child).or_insert(MarkerTree::FALSE);
4241            let mut combined = *entry;
4242            combined.or(next_environment);
4243            if combined != *entry {
4244                *entry = combined;
4245                queue.push_back(*child);
4246            }
4247        }
4248    }
4249
4250    environments.remove(&id).unwrap_or(MarkerTree::FALSE)
4251}
4252
4253#[derive(Debug, Default, Clone)]
4254struct ConflictTracker {
4255    /// How often a decision on the package was discarded due to another package decided earlier.
4256    affected: FxHashMap<Id<PubGrubPackage>, usize>,
4257    /// Package(s) to be prioritized after the next unit propagation
4258    ///
4259    /// Distilled from `affected` for fast checking in the hot loop.
4260    prioritize: Vec<Id<PubGrubPackage>>,
4261    /// How often a package was decided earlier and caused another package to be discarded.
4262    culprit: FxHashMap<Id<PubGrubPackage>, usize>,
4263    /// Package(s) to be de-prioritized after the next unit propagation
4264    ///
4265    /// Distilled from `culprit` for fast checking in the hot loop.
4266    deprioritize: Vec<Id<PubGrubPackage>>,
4267}