uv_resolver/resolver/
mod.rs

1//! Given a set of requirements, find a set of compatible packages.
2
3use std::borrow::Cow;
4use std::cmp::Ordering;
5use std::collections::{BTreeMap, BTreeSet, VecDeque};
6use std::fmt::{Display, Formatter, Write};
7use std::ops::Bound;
8use std::sync::Arc;
9use std::time::Instant;
10use std::{iter, slice, thread};
11
12use dashmap::DashMap;
13use either::Either;
14use futures::{FutureExt, StreamExt};
15use itertools::Itertools;
16use pubgrub::{Id, IncompId, Incompatibility, Kind, Range, Ranges, State};
17use rustc_hash::{FxHashMap, FxHashSet};
18use tokio::sync::mpsc::{self, Receiver, Sender};
19use tokio::sync::oneshot;
20use tokio_stream::wrappers::ReceiverStream;
21use tracing::{Level, debug, info, instrument, trace, warn};
22
23use uv_configuration::{Constraints, Excludes, Overrides};
24use uv_distribution::{ArchiveMetadata, DistributionDatabase};
25use uv_distribution_types::{
26    BuiltDist, CompatibleDist, DerivationChain, Dist, DistErrorKind, DistributionMetadata,
27    IncompatibleDist, IncompatibleSource, IncompatibleWheel, IndexCapabilities, IndexLocations,
28    IndexMetadata, IndexUrl, InstalledDist, Name, PythonRequirementKind, RemoteSource, Requirement,
29    ResolvedDist, ResolvedDistRef, SourceDist, VersionOrUrlRef, implied_markers,
30};
31use uv_git::GitResolver;
32use uv_normalize::{ExtraName, GroupName, PackageName};
33use uv_pep440::{MIN_VERSION, Version, VersionSpecifiers, release_specifiers_to_ranges};
34use uv_pep508::{
35    MarkerEnvironment, MarkerExpression, MarkerOperator, MarkerTree, MarkerValueString,
36};
37use uv_platform_tags::{IncompatibleTag, Tags};
38use uv_pypi_types::{ConflictItem, ConflictItemRef, ConflictKindRef, Conflicts, VerbatimParsedUrl};
39use uv_torch::TorchStrategy;
40use uv_types::{BuildContext, HashStrategy, InstalledPackagesProvider};
41use uv_warnings::warn_user_once;
42
43use crate::candidate_selector::{Candidate, CandidateDist, CandidateSelector};
44use crate::dependency_provider::UvDependencyProvider;
45use crate::error::{NoSolutionError, ResolveError};
46use crate::fork_indexes::ForkIndexes;
47use crate::fork_strategy::ForkStrategy;
48use crate::fork_urls::ForkUrls;
49use crate::manifest::Manifest;
50use crate::pins::FilePins;
51use crate::preferences::{PreferenceSource, Preferences};
52use crate::pubgrub::{
53    PubGrubDependency, PubGrubDistribution, PubGrubPackage, PubGrubPackageInner, PubGrubPriorities,
54    PubGrubPython,
55};
56use crate::python_requirement::PythonRequirement;
57use crate::resolution::ResolverOutput;
58use crate::resolution_mode::ResolutionStrategy;
59pub(crate) use crate::resolver::availability::{
60    ResolverVersion, UnavailableErrorChain, UnavailablePackage, UnavailableReason,
61    UnavailableVersion,
62};
63use crate::resolver::batch_prefetch::BatchPrefetcher;
64pub use crate::resolver::derivation::DerivationChainBuilder;
65pub use crate::resolver::environment::ResolverEnvironment;
66use crate::resolver::environment::{
67    ForkingPossibility, fork_version_by_marker, fork_version_by_python_requirement,
68};
69pub(crate) use crate::resolver::fork_map::{ForkMap, ForkSet};
70pub use crate::resolver::index::InMemoryIndex;
71use crate::resolver::indexes::Indexes;
72pub use crate::resolver::provider::{
73    DefaultResolverProvider, MetadataResponse, PackageVersionsResult, ResolverProvider,
74    VersionsResponse, WheelMetadataResult,
75};
76pub use crate::resolver::reporter::{BuildId, Reporter};
77use crate::resolver::system::SystemDependency;
78pub(crate) use crate::resolver::urls::Urls;
79use crate::universal_marker::{ConflictMarker, UniversalMarker};
80use crate::yanks::AllowedYanks;
81use crate::{
82    DependencyMode, ExcludeNewer, Exclusions, FlatIndex, Options, ResolutionMode, VersionMap,
83    marker,
84};
85pub(crate) use provider::MetadataUnavailable;
86
87mod availability;
88mod batch_prefetch;
89mod derivation;
90mod environment;
91mod fork_map;
92mod index;
93mod indexes;
94mod provider;
95mod reporter;
96mod system;
97mod urls;
98
99/// The number of conflicts a package may accumulate before we re-prioritize and backtrack.
100const CONFLICT_THRESHOLD: usize = 5;
101
102pub struct Resolver<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider> {
103    state: ResolverState<InstalledPackages>,
104    provider: Provider,
105}
106
107/// State that is shared between the prefetcher and the PubGrub solver during
108/// resolution, across all forks.
109struct ResolverState<InstalledPackages: InstalledPackagesProvider> {
110    project: Option<PackageName>,
111    requirements: Vec<Requirement>,
112    constraints: Constraints,
113    overrides: Overrides,
114    excludes: Excludes,
115    preferences: Preferences,
116    git: GitResolver,
117    capabilities: IndexCapabilities,
118    locations: IndexLocations,
119    exclusions: Exclusions,
120    urls: Urls,
121    indexes: Indexes,
122    dependency_mode: DependencyMode,
123    hasher: HashStrategy,
124    env: ResolverEnvironment,
125    // The environment of the current Python interpreter.
126    current_environment: MarkerEnvironment,
127    tags: Option<Tags>,
128    python_requirement: PythonRequirement,
129    conflicts: Conflicts,
130    workspace_members: BTreeSet<PackageName>,
131    selector: CandidateSelector,
132    index: InMemoryIndex,
133    installed_packages: InstalledPackages,
134    /// Incompatibilities for packages that are entirely unavailable.
135    unavailable_packages: DashMap<PackageName, UnavailablePackage>,
136    /// Incompatibilities for packages that are unavailable at specific versions.
137    incomplete_packages: DashMap<PackageName, DashMap<Version, MetadataUnavailable>>,
138    /// The options that were used to configure this resolver.
139    options: Options,
140    /// The reporter to use for this resolver.
141    reporter: Option<Arc<dyn Reporter>>,
142}
143
144impl<'a, Context: BuildContext, InstalledPackages: InstalledPackagesProvider>
145    Resolver<DefaultResolverProvider<'a, Context>, InstalledPackages>
146{
147    /// Initialize a new resolver using the default backend doing real requests.
148    ///
149    /// Reads the flat index entries.
150    ///
151    /// # Marker environment
152    ///
153    /// The marker environment is optional.
154    ///
155    /// When a marker environment is not provided, the resolver is said to be
156    /// in "universal" mode. When in universal mode, the resolution produced
157    /// may contain multiple versions of the same package. And thus, in order
158    /// to use the resulting resolution, there must be a "universal"-aware
159    /// reader of the resolution that knows to exclude distributions that can't
160    /// be used in the current environment.
161    ///
162    /// When a marker environment is provided, the resolver is in
163    /// "non-universal" mode, which corresponds to standard `pip` behavior that
164    /// works only for a specific marker environment.
165    pub fn new(
166        manifest: Manifest,
167        options: Options,
168        python_requirement: &'a PythonRequirement,
169        env: ResolverEnvironment,
170        current_environment: &MarkerEnvironment,
171        conflicts: Conflicts,
172        tags: Option<&'a Tags>,
173        flat_index: &'a FlatIndex,
174        index: &'a InMemoryIndex,
175        hasher: &'a HashStrategy,
176        build_context: &'a Context,
177        installed_packages: InstalledPackages,
178        database: DistributionDatabase<'a, Context>,
179    ) -> Result<Self, ResolveError> {
180        let provider = DefaultResolverProvider::new(
181            database,
182            flat_index,
183            tags,
184            python_requirement.target(),
185            AllowedYanks::from_manifest(&manifest, &env, options.dependency_mode),
186            hasher,
187            options.exclude_newer.clone(),
188            build_context.build_options(),
189            build_context.capabilities(),
190        );
191
192        Self::new_custom_io(
193            manifest,
194            options,
195            hasher,
196            env,
197            current_environment,
198            tags.cloned(),
199            python_requirement,
200            conflicts,
201            index,
202            build_context.git(),
203            build_context.capabilities(),
204            build_context.locations(),
205            provider,
206            installed_packages,
207        )
208    }
209}
210
211impl<Provider: ResolverProvider, InstalledPackages: InstalledPackagesProvider>
212    Resolver<Provider, InstalledPackages>
213{
214    /// Initialize a new resolver using a user provided backend.
215    pub fn new_custom_io(
216        manifest: Manifest,
217        options: Options,
218        hasher: &HashStrategy,
219        env: ResolverEnvironment,
220        current_environment: &MarkerEnvironment,
221        tags: Option<Tags>,
222        python_requirement: &PythonRequirement,
223        conflicts: Conflicts,
224        index: &InMemoryIndex,
225        git: &GitResolver,
226        capabilities: &IndexCapabilities,
227        locations: &IndexLocations,
228        provider: Provider,
229        installed_packages: InstalledPackages,
230    ) -> Result<Self, ResolveError> {
231        let state = ResolverState {
232            index: index.clone(),
233            git: git.clone(),
234            capabilities: capabilities.clone(),
235            selector: CandidateSelector::for_resolution(&options, &manifest, &env),
236            dependency_mode: options.dependency_mode,
237            urls: Urls::from_manifest(&manifest, &env, git, options.dependency_mode),
238            indexes: Indexes::from_manifest(&manifest, &env, options.dependency_mode),
239            project: manifest.project,
240            workspace_members: manifest.workspace_members,
241            requirements: manifest.requirements,
242            constraints: manifest.constraints,
243            overrides: manifest.overrides,
244            excludes: manifest.excludes,
245            preferences: manifest.preferences,
246            exclusions: manifest.exclusions,
247            hasher: hasher.clone(),
248            locations: locations.clone(),
249            env,
250            current_environment: current_environment.clone(),
251            tags,
252            python_requirement: python_requirement.clone(),
253            conflicts,
254            installed_packages,
255            unavailable_packages: DashMap::default(),
256            incomplete_packages: DashMap::default(),
257            options,
258            reporter: None,
259        };
260        Ok(Self { state, provider })
261    }
262
263    /// Set the [`Reporter`] to use for this installer.
264    #[must_use]
265    pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
266        Self {
267            state: ResolverState {
268                reporter: Some(reporter.clone()),
269                ..self.state
270            },
271            provider: self
272                .provider
273                .with_reporter(reporter.into_distribution_reporter()),
274        }
275    }
276
277    /// Resolve a set of requirements into a set of pinned versions.
278    pub async fn resolve(self) -> Result<ResolverOutput, ResolveError> {
279        let state = Arc::new(self.state);
280        let provider = Arc::new(self.provider);
281
282        // A channel to fetch package metadata (e.g., given `flask`, fetch all versions) and version
283        // metadata (e.g., given `flask==1.0.0`, fetch the metadata for that version).
284        // Channel size is set large to accommodate batch prefetching.
285        let (request_sink, request_stream) = mpsc::channel(300);
286
287        // Run the fetcher.
288        let requests_fut = state.clone().fetch(provider.clone(), request_stream).fuse();
289
290        // Spawn the PubGrub solver on a dedicated thread.
291        let solver = state.clone();
292        let (tx, rx) = oneshot::channel();
293        thread::Builder::new()
294            .name("uv-resolver".into())
295            .spawn(move || {
296                let result = solver.solve(&request_sink);
297
298                // This may fail if the main thread returned early due to an error.
299                let _ = tx.send(result);
300            })
301            .unwrap();
302
303        let resolve_fut = async move { rx.await.map_err(|_| ResolveError::ChannelClosed) };
304
305        // Wait for both to complete.
306        let ((), resolution) = tokio::try_join!(requests_fut, resolve_fut)?;
307
308        state.on_complete();
309        resolution
310    }
311}
312
313impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackages> {
314    #[instrument(skip_all)]
315    fn solve(
316        self: Arc<Self>,
317        request_sink: &Sender<Request>,
318    ) -> Result<ResolverOutput, ResolveError> {
319        debug!(
320            "Solving with installed Python version: {}",
321            self.python_requirement.exact()
322        );
323        debug!(
324            "Solving with target Python version: {}",
325            self.python_requirement.target()
326        );
327
328        let mut visited = FxHashSet::default();
329
330        let root = PubGrubPackage::from(PubGrubPackageInner::Root(self.project.clone()));
331        let pubgrub = State::init(root.clone(), MIN_VERSION.clone());
332        let prefetcher = BatchPrefetcher::new(
333            self.capabilities.clone(),
334            self.index.clone(),
335            request_sink.clone(),
336        );
337        let state = ForkState::new(
338            pubgrub,
339            self.env.clone(),
340            self.python_requirement.clone(),
341            prefetcher,
342        );
343        let mut preferences = self.preferences.clone();
344        let mut forked_states = self.env.initial_forked_states(state)?;
345        let mut resolutions = vec![];
346
347        'FORK: while let Some(mut state) = forked_states.pop() {
348            if let Some(split) = state.env.end_user_fork_display() {
349                let requires_python = state.python_requirement.target();
350                debug!("Solving {split} (requires-python: {requires_python:?})");
351            }
352            let start = Instant::now();
353            loop {
354                let highest_priority_pkg =
355                    if let Some(initial) = state.initial_id.take() {
356                        // If we just forked based on `requires-python`, we can skip unit
357                        // propagation, since we already propagated the package that initiated
358                        // the fork.
359                        initial
360                    } else {
361                        // Run unit propagation.
362                        let result = state.pubgrub.unit_propagation(state.next);
363                        match result {
364                            Err(err) => {
365                                // If unit propagation failed, there is no solution.
366                                return Err(self.convert_no_solution_err(
367                                    err,
368                                    state.fork_urls,
369                                    state.fork_indexes,
370                                    state.env,
371                                    self.current_environment.clone(),
372                                    Some(&self.options.exclude_newer),
373                                    &visited,
374                                ));
375                            }
376                            Ok(conflicts) => {
377                                for (affected, incompatibility) in conflicts {
378                                    // Conflict tracking: If there was a conflict, track affected and
379                                    // culprit for all root cause incompatibilities
380                                    state.record_conflict(affected, None, incompatibility);
381                                }
382                            }
383                        }
384
385                        // Pre-visit all candidate packages, to allow metadata to be fetched in parallel.
386                        if self.dependency_mode.is_transitive() {
387                            Self::pre_visit(
388                                state
389                                    .pubgrub
390                                    .partial_solution
391                                    .prioritized_packages()
392                                    .map(|(id, range)| (&state.pubgrub.package_store[id], range)),
393                                &self.urls,
394                                &self.indexes,
395                                &state.python_requirement,
396                                request_sink,
397                            )?;
398                        }
399
400                        Self::reprioritize_conflicts(&mut state);
401
402                        trace!(
403                            "Assigned packages: {}",
404                            state
405                                .pubgrub
406                                .partial_solution
407                                .extract_solution()
408                                .filter(|(p, _)| !state.pubgrub.package_store[*p].is_proxy())
409                                .map(|(p, v)| format!("{}=={}", state.pubgrub.package_store[p], v))
410                                .join(", ")
411                        );
412                        // Choose a package.
413                        // We aren't allowed to use the term intersection as it would extend the
414                        // mutable borrow of `state`.
415                        let Some((highest_priority_pkg, _)) =
416                            state.pubgrub.partial_solution.pick_highest_priority_pkg(
417                                |id, _range| state.priorities.get(&state.pubgrub.package_store[id]),
418                            )
419                        else {
420                            // All packages have been assigned, the fork has been successfully resolved
421                            if tracing::enabled!(Level::DEBUG) {
422                                state.prefetcher.log_tried_versions();
423                            }
424                            debug!(
425                                "{} resolution took {:.3}s",
426                                state.env,
427                                start.elapsed().as_secs_f32()
428                            );
429
430                            let resolution = state.into_resolution();
431
432                            // Walk over the selected versions, and mark them as preferences. We have to
433                            // add forks back as to not override the preferences from the lockfile for
434                            // the next fork
435                            //
436                            // If we're using a resolution mode that varies based on whether a dependency is
437                            // direct or transitive, skip preferences, as we risk adding a preference from
438                            // one fork (in which it's a transitive dependency) to another fork (in which
439                            // it's direct).
440                            if matches!(
441                                self.options.resolution_mode,
442                                ResolutionMode::Lowest | ResolutionMode::Highest
443                            ) {
444                                for (package, version) in &resolution.nodes {
445                                    preferences.insert(
446                                        package.name.clone(),
447                                        package.index.clone(),
448                                        resolution
449                                            .env
450                                            .try_universal_markers()
451                                            .unwrap_or(UniversalMarker::TRUE),
452                                        version.clone(),
453                                        PreferenceSource::Resolver,
454                                    );
455                                }
456                            }
457
458                            resolutions.push(resolution);
459                            continue 'FORK;
460                        };
461                        trace!(
462                            "Chose package for decision: {}. remaining choices: {}",
463                            state.pubgrub.package_store[highest_priority_pkg],
464                            state
465                                .pubgrub
466                                .partial_solution
467                                .undecided_packages()
468                                .filter(|(p, _)| !state.pubgrub.package_store[**p].is_proxy())
469                                .map(|(p, _)| state.pubgrub.package_store[*p].to_string())
470                                .join(", ")
471                        );
472
473                        highest_priority_pkg
474                    };
475
476                state.next = highest_priority_pkg;
477
478                // TODO(charlie): Remove as many usages of `next_package` as we can.
479                let next_id = state.next;
480                let next_package = &state.pubgrub.package_store[state.next];
481
482                let url = next_package
483                    .name()
484                    .and_then(|name| state.fork_urls.get(name));
485                let index = next_package
486                    .name()
487                    .and_then(|name| state.fork_indexes.get(name));
488
489                // Consider:
490                // ```toml
491                // dependencies = [
492                //   "iniconfig == 1.1.1 ; python_version < '3.12'",
493                //   "iniconfig @ https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl ; python_version >= '3.12'",
494                // ]
495                // ```
496                // In the `python_version < '3.12'` case, we haven't pre-visited `iniconfig` yet,
497                // since we weren't sure whether it might also be a URL requirement when
498                // transforming the requirements. For that case, we do another request here
499                // (idempotent due to caching).
500                self.request_package(next_package, url, index, request_sink)?;
501
502                let version = if let Some(version) = state.initial_version.take() {
503                    // If we just forked based on platform support, we can skip version selection,
504                    // since the fork operation itself already selected the appropriate version for
505                    // the platform.
506                    version
507                } else {
508                    let term_intersection = state
509                        .pubgrub
510                        .partial_solution
511                        .term_intersection_for_package(next_id)
512                        .expect("a package was chosen but we don't have a term");
513                    let decision = self.choose_version(
514                        next_package,
515                        next_id,
516                        index.map(IndexMetadata::url),
517                        term_intersection.unwrap_positive(),
518                        &mut state.pins,
519                        &preferences,
520                        &state.fork_urls,
521                        &state.env,
522                        &state.python_requirement,
523                        &state.pubgrub,
524                        &mut visited,
525                        request_sink,
526                    )?;
527
528                    // Pick the next compatible version.
529                    let Some(version) = decision else {
530                        debug!("No compatible version found for: {next_package}");
531
532                        let term_intersection = state
533                            .pubgrub
534                            .partial_solution
535                            .term_intersection_for_package(next_id)
536                            .expect("a package was chosen but we don't have a term");
537
538                        if let PubGrubPackageInner::Package { name, .. } = &**next_package {
539                            // Check if the decision was due to the package being unavailable
540                            if let Some(entry) = self.unavailable_packages.get(name) {
541                                state
542                                    .pubgrub
543                                    .add_incompatibility(Incompatibility::custom_term(
544                                        next_id,
545                                        term_intersection.clone(),
546                                        UnavailableReason::Package(entry.clone()),
547                                    ));
548                                continue;
549                            }
550                        }
551
552                        state
553                            .pubgrub
554                            .add_incompatibility(Incompatibility::no_versions(
555                                next_id,
556                                term_intersection.clone(),
557                            ));
558                        continue;
559                    };
560
561                    let version = match version {
562                        ResolverVersion::Unforked(version) => version,
563                        ResolverVersion::Forked(forks) => {
564                            forked_states.extend(self.version_forks_to_fork_states(state, forks));
565                            continue 'FORK;
566                        }
567                        ResolverVersion::Unavailable(version, reason) => {
568                            state.add_unavailable_version(version, reason);
569                            continue;
570                        }
571                    };
572
573                    // Only consider registry packages for prefetch.
574                    if url.is_none() {
575                        state.prefetcher.prefetch_batches(
576                            next_package,
577                            index,
578                            &version,
579                            term_intersection.unwrap_positive(),
580                            state
581                                .pubgrub
582                                .partial_solution
583                                .unchanging_term_for_package(next_id),
584                            &state.python_requirement,
585                            &self.selector,
586                            &state.env,
587                        )?;
588                    }
589
590                    version
591                };
592
593                state.prefetcher.version_tried(next_package, &version);
594
595                self.on_progress(next_package, &version);
596
597                if !state
598                    .added_dependencies
599                    .entry(next_id)
600                    .or_default()
601                    .insert(version.clone())
602                {
603                    // `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
604                    // terms and can add the decision directly.
605                    state
606                        .pubgrub
607                        .partial_solution
608                        .add_decision(next_id, version);
609                    continue;
610                }
611
612                // Retrieve that package dependencies.
613                let forked_deps = self.get_dependencies_forking(
614                    next_id,
615                    next_package,
616                    &version,
617                    &state.pins,
618                    &state.fork_urls,
619                    &state.env,
620                    &state.python_requirement,
621                    &state.pubgrub,
622                )?;
623
624                match forked_deps {
625                    ForkedDependencies::Unavailable(reason) => {
626                        // Then here, if we get a reason that we consider unrecoverable, we should
627                        // show the derivation chain.
628                        state
629                            .pubgrub
630                            .add_incompatibility(Incompatibility::custom_version(
631                                next_id,
632                                version.clone(),
633                                UnavailableReason::Version(reason),
634                            ));
635                    }
636                    ForkedDependencies::Unforked(dependencies) => {
637                        // Enrich the state with any URLs, etc.
638                        state
639                            .visit_package_version_dependencies(
640                                next_id,
641                                &version,
642                                &self.urls,
643                                &self.indexes,
644                                &dependencies,
645                                &self.git,
646                                &self.workspace_members,
647                                self.selector.resolution_strategy(),
648                            )
649                            .map_err(|err| {
650                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
651                            })?;
652
653                        // Emit a request to fetch the metadata for each registry package.
654                        self.visit_dependencies(&dependencies, &state, request_sink)
655                            .map_err(|err| {
656                                enrich_dependency_error(err, next_id, &version, &state.pubgrub)
657                            })?;
658
659                        // Add the dependencies to the state.
660                        state.add_package_version_dependencies(next_id, &version, dependencies);
661                    }
662                    ForkedDependencies::Forked {
663                        mut forks,
664                        diverging_packages,
665                    } => {
666                        debug!(
667                            "Pre-fork {} took {:.3}s",
668                            state.env,
669                            start.elapsed().as_secs_f32()
670                        );
671
672                        // Prioritize the forks.
673                        match (self.options.fork_strategy, self.options.resolution_mode) {
674                            (ForkStrategy::Fewest, _) | (_, ResolutionMode::Lowest) => {
675                                // Prefer solving forks with lower Python bounds, since they're more
676                                // likely to produce solutions that work for forks with higher
677                                // Python bounds (whereas the inverse is not true).
678                                forks.sort_by(|a, b| {
679                                    a.cmp_requires_python(b)
680                                        .reverse()
681                                        .then_with(|| a.cmp_upper_bounds(b))
682                                });
683                            }
684                            (ForkStrategy::RequiresPython, _) => {
685                                // Otherwise, prefer solving forks with higher Python bounds, since
686                                // we want to prioritize choosing the latest-compatible package
687                                // version for each Python version.
688                                forks.sort_by(|a, b| {
689                                    a.cmp_requires_python(b).then_with(|| a.cmp_upper_bounds(b))
690                                });
691                            }
692                        }
693
694                        for new_fork_state in self.forks_to_fork_states(
695                            state,
696                            &version,
697                            forks,
698                            request_sink,
699                            &diverging_packages,
700                        ) {
701                            forked_states.push(new_fork_state?);
702                        }
703                        continue 'FORK;
704                    }
705                }
706            }
707        }
708        if resolutions.len() > 1 {
709            info!(
710                "Solved your requirements for {} environments",
711                resolutions.len()
712            );
713        }
714        if tracing::enabled!(Level::DEBUG) {
715            for resolution in &resolutions {
716                if let Some(env) = resolution.env.end_user_fork_display() {
717                    let packages: FxHashSet<_> = resolution
718                        .nodes
719                        .keys()
720                        .map(|package| &package.name)
721                        .collect();
722                    debug!(
723                        "Distinct solution for {env} with {} package(s)",
724                        packages.len()
725                    );
726                }
727            }
728        }
729        for resolution in &resolutions {
730            Self::trace_resolution(resolution);
731        }
732        ResolverOutput::from_state(
733            &resolutions,
734            &self.requirements,
735            &self.constraints,
736            &self.overrides,
737            &self.preferences,
738            &self.index,
739            &self.git,
740            &self.python_requirement,
741            &self.conflicts,
742            self.selector.resolution_strategy(),
743            self.options.clone(),
744        )
745    }
746
747    /// Change the priority of often conflicting packages and backtrack.
748    ///
749    /// To be called after unit propagation.
750    fn reprioritize_conflicts(state: &mut ForkState) {
751        for package in state.conflict_tracker.prioritize.drain(..) {
752            let changed = state
753                .priorities
754                .mark_conflict_early(&state.pubgrub.package_store[package]);
755            if changed {
756                debug!(
757                    "Package {} has too many conflicts (affected), prioritizing",
758                    &state.pubgrub.package_store[package]
759                );
760            } else {
761                debug!(
762                    "Package {} has too many conflicts (affected), already {:?}",
763                    state.pubgrub.package_store[package],
764                    state.priorities.get(&state.pubgrub.package_store[package])
765                );
766            }
767        }
768
769        for package in state.conflict_tracker.deprioritize.drain(..) {
770            let changed = state
771                .priorities
772                .mark_conflict_late(&state.pubgrub.package_store[package]);
773            if changed {
774                debug!(
775                    "Package {} has too many conflicts (culprit), deprioritizing and backtracking",
776                    state.pubgrub.package_store[package],
777                );
778                let backtrack_level = state.pubgrub.backtrack_package(package);
779                if let Some(backtrack_level) = backtrack_level {
780                    debug!("Backtracked {backtrack_level} decisions");
781                } else {
782                    debug!(
783                        "Package {} is not decided, cannot backtrack",
784                        state.pubgrub.package_store[package]
785                    );
786                }
787            } else {
788                debug!(
789                    "Package {} has too many conflicts (culprit), already {:?}",
790                    state.pubgrub.package_store[package],
791                    state.priorities.get(&state.pubgrub.package_store[package])
792                );
793            }
794        }
795    }
796
797    /// When trace level logging is enabled, we dump the final
798    /// set of resolutions, including markers, to help with
799    /// debugging. Namely, this tells use precisely the state
800    /// emitted by the resolver before going off to construct a
801    /// resolution graph.
802    fn trace_resolution(combined: &Resolution) {
803        if !tracing::enabled!(Level::TRACE) {
804            return;
805        }
806        trace!("Resolution: {:?}", combined.env);
807        for edge in &combined.edges {
808            trace!(
809                "Resolution edge: {} -> {}",
810                edge.from
811                    .as_ref()
812                    .map(PackageName::as_str)
813                    .unwrap_or("ROOT"),
814                edge.to,
815            );
816            // The unwraps below are OK because `write`ing to
817            // a String can never fail (except for OOM).
818            let mut msg = String::new();
819            write!(msg, "{}", edge.from_version).unwrap();
820            if let Some(ref extra) = edge.from_extra {
821                write!(msg, " (extra: {extra})").unwrap();
822            }
823            if let Some(ref dev) = edge.from_group {
824                write!(msg, " (group: {dev})").unwrap();
825            }
826
827            write!(msg, " -> ").unwrap();
828
829            write!(msg, "{}", edge.to_version).unwrap();
830            if let Some(ref extra) = edge.to_extra {
831                write!(msg, " (extra: {extra})").unwrap();
832            }
833            if let Some(ref dev) = edge.to_group {
834                write!(msg, " (group: {dev})").unwrap();
835            }
836            if let Some(marker) = edge.marker.contents() {
837                write!(msg, " ; {marker}").unwrap();
838            }
839            trace!("Resolution edge:     {msg}");
840        }
841    }
842
843    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
844    fn forks_to_fork_states<'a>(
845        &'a self,
846        current_state: ForkState,
847        version: &'a Version,
848        forks: Vec<Fork>,
849        request_sink: &'a Sender<Request>,
850        diverging_packages: &'a [PackageName],
851    ) -> impl Iterator<Item = Result<ForkState, ResolveError>> + 'a {
852        debug!(
853            "Splitting resolution on {}=={} over {} into {} resolution{} with separate markers",
854            current_state.pubgrub.package_store[current_state.next],
855            version,
856            diverging_packages
857                .iter()
858                .map(ToString::to_string)
859                .join(", "),
860            forks.len(),
861            if forks.len() == 1 { "" } else { "s" }
862        );
863        assert!(forks.len() >= 2);
864        // This is a somewhat tortured technique to ensure
865        // that our resolver state is only cloned as much
866        // as it needs to be. We basically move the state
867        // into `forked_states`, and then only clone it if
868        // there is at least one more fork to visit.
869        let package = current_state.next;
870        let mut cur_state = Some(current_state);
871        let forks_len = forks.len();
872        forks
873            .into_iter()
874            .enumerate()
875            .map(move |(i, fork)| {
876                let is_last = i == forks_len - 1;
877                let forked_state = cur_state.take().unwrap();
878                if !is_last {
879                    cur_state = Some(forked_state.clone());
880                }
881
882                let env = fork.env.clone();
883                (fork, forked_state.with_env(env))
884            })
885            .map(move |(fork, mut forked_state)| {
886                // Enrich the state with any URLs, etc.
887                forked_state
888                    .visit_package_version_dependencies(
889                        package,
890                        version,
891                        &self.urls,
892                        &self.indexes,
893                        &fork.dependencies,
894                        &self.git,
895                        &self.workspace_members,
896                        self.selector.resolution_strategy(),
897                    )
898                    .map_err(|err| {
899                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
900                    })?;
901
902                // Emit a request to fetch the metadata for each registry package.
903                self.visit_dependencies(&fork.dependencies, &forked_state, request_sink)
904                    .map_err(|err| {
905                        enrich_dependency_error(err, package, version, &forked_state.pubgrub)
906                    })?;
907
908                // Add the dependencies to the state.
909                forked_state.add_package_version_dependencies(package, version, fork.dependencies);
910
911                Ok(forked_state)
912            })
913    }
914
915    /// Convert the dependency [`Fork`]s into [`ForkState`]s.
916    #[allow(clippy::unused_self)]
917    fn version_forks_to_fork_states(
918        &self,
919        current_state: ForkState,
920        forks: Vec<VersionFork>,
921    ) -> impl Iterator<Item = ForkState> + '_ {
922        // This is a somewhat tortured technique to ensure
923        // that our resolver state is only cloned as much
924        // as it needs to be. We basically move the state
925        // into `forked_states`, and then only clone it if
926        // there is at least one more fork to visit.
927        let mut cur_state = Some(current_state);
928        let forks_len = forks.len();
929        forks.into_iter().enumerate().map(move |(i, fork)| {
930            let is_last = i == forks_len - 1;
931            let mut forked_state = cur_state.take().unwrap();
932            if !is_last {
933                cur_state = Some(forked_state.clone());
934            }
935            forked_state.initial_id = Some(fork.id);
936            forked_state.initial_version = fork.version;
937            forked_state.with_env(fork.env)
938        })
939    }
940
941    /// Visit a set of [`PubGrubDependency`] entities prior to selection.
942    fn visit_dependencies(
943        &self,
944        dependencies: &[PubGrubDependency],
945        state: &ForkState,
946        request_sink: &Sender<Request>,
947    ) -> Result<(), ResolveError> {
948        for dependency in dependencies {
949            let PubGrubDependency {
950                package,
951                version: _,
952                parent: _,
953                url: _,
954            } = dependency;
955            let url = package.name().and_then(|name| state.fork_urls.get(name));
956            let index = package.name().and_then(|name| state.fork_indexes.get(name));
957            self.visit_package(package, url, index, request_sink)?;
958        }
959        Ok(())
960    }
961
962    /// Visit a [`PubGrubPackage`] prior to selection. This should be called on a [`PubGrubPackage`]
963    /// before it is selected, to allow metadata to be fetched in parallel.
964    fn visit_package(
965        &self,
966        package: &PubGrubPackage,
967        url: Option<&VerbatimParsedUrl>,
968        index: Option<&IndexMetadata>,
969        request_sink: &Sender<Request>,
970    ) -> Result<(), ResolveError> {
971        // Ignore unresolved URL packages, i.e., packages that use a direct URL in some forks.
972        if url.is_none()
973            && package
974                .name()
975                .map(|name| self.urls.any_url(name))
976                .unwrap_or(true)
977        {
978            return Ok(());
979        }
980
981        self.request_package(package, url, index, request_sink)
982    }
983
984    fn request_package(
985        &self,
986        package: &PubGrubPackage,
987        url: Option<&VerbatimParsedUrl>,
988        index: Option<&IndexMetadata>,
989        request_sink: &Sender<Request>,
990    ) -> Result<(), ResolveError> {
991        // Only request real packages.
992        let Some(name) = package.name_no_root() else {
993            return Ok(());
994        };
995
996        if let Some(url) = url {
997            // Verify that the package is allowed under the hash-checking policy.
998            if !self.hasher.allows_url(&url.verbatim) {
999                return Err(ResolveError::UnhashedPackage(name.clone()));
1000            }
1001
1002            // Emit a request to fetch the metadata for this distribution.
1003            let dist = Dist::from_url(name.clone(), url.clone())?;
1004            if self.index.distributions().register(dist.version_id()) {
1005                request_sink.blocking_send(Request::Dist(dist))?;
1006            }
1007        } else if let Some(index) = index {
1008            // Emit a request to fetch the metadata for this package on the index.
1009            if self
1010                .index
1011                .explicit()
1012                .register((name.clone(), index.url().clone()))
1013            {
1014                request_sink.blocking_send(Request::Package(name.clone(), Some(index.clone())))?;
1015            }
1016        } else {
1017            // Emit a request to fetch the metadata for this package.
1018            if self.index.implicit().register(name.clone()) {
1019                request_sink.blocking_send(Request::Package(name.clone(), None))?;
1020            }
1021        }
1022        Ok(())
1023    }
1024
1025    /// Visit the set of [`PubGrubPackage`] candidates prior to selection. This allows us to fetch
1026    /// metadata for all packages in parallel.
1027    fn pre_visit<'data>(
1028        packages: impl Iterator<Item = (&'data PubGrubPackage, &'data Range<Version>)>,
1029        urls: &Urls,
1030        indexes: &Indexes,
1031        python_requirement: &PythonRequirement,
1032        request_sink: &Sender<Request>,
1033    ) -> Result<(), ResolveError> {
1034        // Iterate over the potential packages, and fetch file metadata for any of them. These
1035        // represent our current best guesses for the versions that we _might_ select.
1036        for (package, range) in packages {
1037            let PubGrubPackageInner::Package {
1038                name,
1039                extra: None,
1040                group: None,
1041                marker: MarkerTree::TRUE,
1042            } = &**package
1043            else {
1044                continue;
1045            };
1046            // Avoid pre-visiting packages that have any URLs in any fork. At this point we can't
1047            // tell whether they are registry distributions or which url they use.
1048            if urls.any_url(name) {
1049                continue;
1050            }
1051            // Avoid visiting packages that may use an explicit index.
1052            if indexes.contains_key(name) {
1053                continue;
1054            }
1055            request_sink.blocking_send(Request::Prefetch(
1056                name.clone(),
1057                range.clone(),
1058                python_requirement.clone(),
1059            ))?;
1060        }
1061        Ok(())
1062    }
1063
1064    /// Given a candidate package, choose the next version in range to try.
1065    ///
1066    /// Returns `None` when there are no versions in the given range, rejecting the current partial
1067    /// solution.
1068    // TODO(konsti): re-enable tracing. This trace is crucial to understanding the
1069    // tracing-durations-export diagrams, but it took ~5% resolver thread runtime for apache-airflow
1070    // when I last measured.
1071    #[cfg_attr(feature = "tracing-durations-export", instrument(skip_all, fields(%package)))]
1072    fn choose_version(
1073        &self,
1074        package: &PubGrubPackage,
1075        id: Id<PubGrubPackage>,
1076        index: Option<&IndexUrl>,
1077        range: &Range<Version>,
1078        pins: &mut FilePins,
1079        preferences: &Preferences,
1080        fork_urls: &ForkUrls,
1081        env: &ResolverEnvironment,
1082        python_requirement: &PythonRequirement,
1083        pubgrub: &State<UvDependencyProvider>,
1084        visited: &mut FxHashSet<PackageName>,
1085        request_sink: &Sender<Request>,
1086    ) -> Result<Option<ResolverVersion>, ResolveError> {
1087        match &**package {
1088            PubGrubPackageInner::Root(_) => {
1089                Ok(Some(ResolverVersion::Unforked(MIN_VERSION.clone())))
1090            }
1091
1092            PubGrubPackageInner::Python(_) => {
1093                // Dependencies on Python are only added when a package is incompatible; as such,
1094                // we don't need to do anything here.
1095                Ok(None)
1096            }
1097
1098            PubGrubPackageInner::System(_) => {
1099                // We don't care what the actual version is here, just that it's consistent across
1100                // the dependency graph.
1101                let Some(version) = range.as_singleton() else {
1102                    return Ok(None);
1103                };
1104                Ok(Some(ResolverVersion::Unforked(version.clone())))
1105            }
1106
1107            PubGrubPackageInner::Marker { name, .. }
1108            | PubGrubPackageInner::Extra { name, .. }
1109            | PubGrubPackageInner::Group { name, .. }
1110            | PubGrubPackageInner::Package { name, .. } => {
1111                if let Some(url) = package.name().and_then(|name| fork_urls.get(name)) {
1112                    self.choose_version_url(id, name, range, url, env, python_requirement, pubgrub)
1113                } else {
1114                    self.choose_version_registry(
1115                        package,
1116                        id,
1117                        name,
1118                        index,
1119                        range,
1120                        preferences,
1121                        env,
1122                        python_requirement,
1123                        pubgrub,
1124                        pins,
1125                        visited,
1126                        request_sink,
1127                    )
1128                }
1129            }
1130        }
1131    }
1132
1133    /// Select a version for a URL requirement. Since there is only one version per URL, we return
1134    /// that version if it is in range and `None` otherwise.
1135    fn choose_version_url(
1136        &self,
1137        id: Id<PubGrubPackage>,
1138        name: &PackageName,
1139        range: &Range<Version>,
1140        url: &VerbatimParsedUrl,
1141        env: &ResolverEnvironment,
1142        python_requirement: &PythonRequirement,
1143        pubgrub: &State<UvDependencyProvider>,
1144    ) -> Result<Option<ResolverVersion>, ResolveError> {
1145        debug!(
1146            "Searching for a compatible version of {name} @ {} ({range})",
1147            url.verbatim
1148        );
1149
1150        let dist = PubGrubDistribution::from_url(name, url);
1151        let response = self
1152            .index
1153            .distributions()
1154            .wait_blocking(&dist.version_id())
1155            .ok_or_else(|| ResolveError::UnregisteredTask(dist.version_id().to_string()))?;
1156
1157        // If we failed to fetch the metadata for a URL, we can't proceed.
1158        let metadata = match &*response {
1159            MetadataResponse::Found(archive) => &archive.metadata,
1160            MetadataResponse::Unavailable(reason) => {
1161                self.unavailable_packages
1162                    .insert(name.clone(), reason.into());
1163                return Ok(None);
1164            }
1165            // TODO(charlie): Add derivation chain for URL dependencies. In practice, this isn't
1166            // critical since we fetch URL dependencies _prior_ to invoking the resolver.
1167            MetadataResponse::Error(dist, err) => {
1168                return Err(ResolveError::Dist(
1169                    DistErrorKind::from_requested_dist(dist, &**err),
1170                    dist.clone(),
1171                    DerivationChain::default(),
1172                    err.clone(),
1173                ));
1174            }
1175        };
1176
1177        let version = &metadata.version;
1178
1179        // The version is incompatible with the requirement.
1180        if !range.contains(version) {
1181            return Ok(None);
1182        }
1183
1184        // If the URL points to a pre-built wheel, and the wheel's supported Python versions don't
1185        // match our `Requires-Python`, mark it as incompatible.
1186        let dist = Dist::from_url(name.clone(), url.clone())?;
1187        if let Dist::Built(dist) = dist {
1188            let filename = match &dist {
1189                BuiltDist::Registry(dist) => &dist.best_wheel().filename,
1190                BuiltDist::DirectUrl(dist) => &dist.filename,
1191                BuiltDist::Path(dist) => &dist.filename,
1192            };
1193
1194            // If the wheel does _not_ cover a required platform, it's incompatible.
1195            if env.marker_environment().is_none() && !self.options.required_environments.is_empty()
1196            {
1197                let wheel_marker = implied_markers(filename);
1198                // If the user explicitly marked a platform as required, ensure it has coverage.
1199                for environment_marker in self.options.required_environments.iter().copied() {
1200                    // If the platform is part of the current environment...
1201                    if env.included_by_marker(environment_marker)
1202                        && !find_environments(id, pubgrub).is_disjoint(environment_marker)
1203                    {
1204                        // ...but the wheel doesn't support it, it's incompatible.
1205                        if wheel_marker.is_disjoint(environment_marker) {
1206                            return Ok(Some(ResolverVersion::Unavailable(
1207                                version.clone(),
1208                                UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1209                                    IncompatibleWheel::MissingPlatform(environment_marker),
1210                                )),
1211                            )));
1212                        }
1213                    }
1214                }
1215            }
1216
1217            // If the wheel's Python tag doesn't match the target Python, it's incompatible.
1218            if !python_requirement.target().matches_wheel_tag(filename) {
1219                return Ok(Some(ResolverVersion::Unavailable(
1220                    filename.version.clone(),
1221                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1222                        IncompatibleWheel::Tag(IncompatibleTag::AbiPythonVersion),
1223                    )),
1224                )));
1225            }
1226        }
1227
1228        // The version is incompatible due to its `Requires-Python` requirement.
1229        if let Some(requires_python) = metadata.requires_python.as_ref() {
1230            // TODO(charlie): We only care about this for source distributions.
1231            if !python_requirement
1232                .installed()
1233                .is_contained_by(requires_python)
1234            {
1235                return Ok(Some(ResolverVersion::Unavailable(
1236                    version.clone(),
1237                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1238                        IncompatibleSource::RequiresPython(
1239                            requires_python.clone(),
1240                            PythonRequirementKind::Installed,
1241                        ),
1242                    )),
1243                )));
1244            }
1245            if !python_requirement.target().is_contained_by(requires_python) {
1246                return Ok(Some(ResolverVersion::Unavailable(
1247                    version.clone(),
1248                    UnavailableVersion::IncompatibleDist(IncompatibleDist::Source(
1249                        IncompatibleSource::RequiresPython(
1250                            requires_python.clone(),
1251                            PythonRequirementKind::Target,
1252                        ),
1253                    )),
1254                )));
1255            }
1256        }
1257
1258        // If this is a wheel, and the implied Python version doesn't overlap, raise an error.
1259
1260        Ok(Some(ResolverVersion::Unforked(version.clone())))
1261    }
1262
1263    /// Given a candidate registry requirement, choose the next version in range to try, or `None`
1264    /// if there is no version in this range.
1265    fn choose_version_registry(
1266        &self,
1267        package: &PubGrubPackage,
1268        id: Id<PubGrubPackage>,
1269        name: &PackageName,
1270        index: Option<&IndexUrl>,
1271        range: &Range<Version>,
1272        preferences: &Preferences,
1273        env: &ResolverEnvironment,
1274        python_requirement: &PythonRequirement,
1275        pubgrub: &State<UvDependencyProvider>,
1276        pins: &mut FilePins,
1277        visited: &mut FxHashSet<PackageName>,
1278        request_sink: &Sender<Request>,
1279    ) -> Result<Option<ResolverVersion>, ResolveError> {
1280        // Wait for the metadata to be available.
1281        let versions_response = if let Some(index) = index {
1282            self.index
1283                .explicit()
1284                .wait_blocking(&(name.clone(), index.clone()))
1285                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1286        } else {
1287            self.index
1288                .implicit()
1289                .wait_blocking(name)
1290                .ok_or_else(|| ResolveError::UnregisteredTask(name.to_string()))?
1291        };
1292        visited.insert(name.clone());
1293
1294        let version_maps = match *versions_response {
1295            VersionsResponse::Found(ref version_maps) => version_maps.as_slice(),
1296            VersionsResponse::NoIndex => {
1297                self.unavailable_packages
1298                    .insert(name.clone(), UnavailablePackage::NoIndex);
1299                &[]
1300            }
1301            VersionsResponse::Offline => {
1302                self.unavailable_packages
1303                    .insert(name.clone(), UnavailablePackage::Offline);
1304                &[]
1305            }
1306            VersionsResponse::NotFound => {
1307                self.unavailable_packages
1308                    .insert(name.clone(), UnavailablePackage::NotFound);
1309                &[]
1310            }
1311        };
1312
1313        debug!("Searching for a compatible version of {package} ({range})");
1314
1315        // Find a version.
1316        let Some(candidate) = self.selector.select(
1317            name,
1318            range,
1319            version_maps,
1320            preferences,
1321            &self.installed_packages,
1322            &self.exclusions,
1323            index,
1324            env,
1325            self.tags.as_ref(),
1326        ) else {
1327            // Short circuit: we couldn't find _any_ versions for a package.
1328            return Ok(None);
1329        };
1330
1331        let dist = match candidate.dist() {
1332            CandidateDist::Compatible(dist) => dist,
1333            CandidateDist::Incompatible {
1334                incompatible_dist: incompatibility,
1335                prioritized_dist: _,
1336            } => {
1337                // If the version is incompatible because no distributions are compatible, exit early.
1338                return Ok(Some(ResolverVersion::Unavailable(
1339                    candidate.version().clone(),
1340                    // TODO(charlie): We can avoid this clone; the candidate is dropped here and
1341                    // owns the incompatibility.
1342                    UnavailableVersion::IncompatibleDist(incompatibility.clone()),
1343                )));
1344            }
1345        };
1346
1347        // Check whether the version is incompatible due to its Python requirement.
1348        if let Some((requires_python, incompatibility)) =
1349            Self::check_requires_python(dist, python_requirement)
1350        {
1351            if matches!(self.options.fork_strategy, ForkStrategy::RequiresPython) {
1352                if env.marker_environment().is_none() {
1353                    let forks = fork_version_by_python_requirement(
1354                        requires_python,
1355                        python_requirement,
1356                        env,
1357                    );
1358                    if !forks.is_empty() {
1359                        debug!(
1360                            "Forking Python requirement `{}` on `{}` for {}=={} ({})",
1361                            python_requirement.target(),
1362                            requires_python,
1363                            name,
1364                            candidate.version(),
1365                            forks
1366                                .iter()
1367                                .map(ToString::to_string)
1368                                .collect::<Vec<_>>()
1369                                .join(", ")
1370                        );
1371                        let forks = forks
1372                            .into_iter()
1373                            .map(|env| VersionFork {
1374                                env,
1375                                id,
1376                                version: None,
1377                            })
1378                            .collect();
1379                        return Ok(Some(ResolverVersion::Forked(forks)));
1380                    }
1381                }
1382            }
1383
1384            return Ok(Some(ResolverVersion::Unavailable(
1385                candidate.version().clone(),
1386                UnavailableVersion::IncompatibleDist(incompatibility),
1387            )));
1388        }
1389
1390        // Check whether this version covers all supported platforms; and, if not, generate a fork.
1391        if let Some(forked) = self.fork_version_registry(
1392            &candidate,
1393            dist,
1394            version_maps,
1395            package,
1396            id,
1397            name,
1398            index,
1399            range,
1400            preferences,
1401            env,
1402            pubgrub,
1403            pins,
1404            request_sink,
1405        )? {
1406            return Ok(Some(forked));
1407        }
1408
1409        let filename = match dist.for_installation() {
1410            ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1411                .filename()
1412                .unwrap_or(Cow::Borrowed("unknown filename")),
1413            ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1414                .filename()
1415                .unwrap_or(Cow::Borrowed("unknown filename")),
1416            ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1417        };
1418
1419        debug!(
1420            "Selecting: {}=={} [{}] ({})",
1421            name,
1422            candidate.version(),
1423            candidate.choice_kind(),
1424            filename,
1425        );
1426        self.visit_candidate(&candidate, dist, package, name, pins, request_sink)?;
1427
1428        let version = candidate.version().clone();
1429        Ok(Some(ResolverVersion::Unforked(version)))
1430    }
1431
1432    /// Determine whether a candidate covers all supported platforms; and, if not, generate a fork.
1433    ///
1434    /// This only ever applies to versions that lack source distributions And, for now, we only
1435    /// apply it in two cases:
1436    ///
1437    /// 1. Local versions, where the non-local version has greater platform coverage. The intent is
1438    ///    such that, if we're resolving PyTorch, and we choose `torch==2.5.2+cpu`, we want to
1439    ///    fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't
1440    ///    include any macOS wheels).
1441    /// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user
1442    ///    might require that the generated resolution always includes wheels for x86 macOS, and
1443    ///    fails entirely if the platform is unsupported.
1444    fn fork_version_registry(
1445        &self,
1446        candidate: &Candidate,
1447        dist: &CompatibleDist,
1448        version_maps: &[VersionMap],
1449        package: &PubGrubPackage,
1450        id: Id<PubGrubPackage>,
1451        name: &PackageName,
1452        index: Option<&IndexUrl>,
1453        range: &Range<Version>,
1454        preferences: &Preferences,
1455        env: &ResolverEnvironment,
1456        pubgrub: &State<UvDependencyProvider>,
1457        pins: &mut FilePins,
1458        request_sink: &Sender<Request>,
1459    ) -> Result<Option<ResolverVersion>, ResolveError> {
1460        // This only applies to universal resolutions.
1461        if env.marker_environment().is_some() {
1462            return Ok(None);
1463        }
1464
1465        // If the package is already compatible with all environments (as is the case for
1466        // packages that include a source distribution), we don't need to fork.
1467        if dist.implied_markers().is_true() {
1468            return Ok(None);
1469        }
1470
1471        // If the user explicitly marked a platform as required, ensure it has coverage.
1472        for marker in self.options.required_environments.iter().copied() {
1473            // If the platform is part of the current environment...
1474            if env.included_by_marker(marker) {
1475                // But isn't supported by the distribution...
1476                if dist.implied_markers().is_disjoint(marker)
1477                    && !find_environments(id, pubgrub).is_disjoint(marker)
1478                {
1479                    // Then we need to fork.
1480                    let Some((left, right)) = fork_version_by_marker(env, marker) else {
1481                        return Ok(Some(ResolverVersion::Unavailable(
1482                            candidate.version().clone(),
1483                            UnavailableVersion::IncompatibleDist(IncompatibleDist::Wheel(
1484                                IncompatibleWheel::MissingPlatform(marker),
1485                            )),
1486                        )));
1487                    };
1488
1489                    debug!(
1490                        "Forking on required platform `{}` for {}=={} ({})",
1491                        marker.try_to_string().unwrap_or_else(|| "true".to_string()),
1492                        name,
1493                        candidate.version(),
1494                        [&left, &right]
1495                            .iter()
1496                            .map(ToString::to_string)
1497                            .collect::<Vec<_>>()
1498                            .join(", ")
1499                    );
1500                    let forks = vec![
1501                        VersionFork {
1502                            env: left,
1503                            id,
1504                            version: None,
1505                        },
1506                        VersionFork {
1507                            env: right,
1508                            id,
1509                            version: None,
1510                        },
1511                    ];
1512                    return Ok(Some(ResolverVersion::Forked(forks)));
1513                }
1514            }
1515        }
1516
1517        // For now, we only apply this to local versions.
1518        if !candidate.version().is_local() {
1519            return Ok(None);
1520        }
1521
1522        debug!(
1523            "Looking at local version: {}=={}",
1524            name,
1525            candidate.version()
1526        );
1527
1528        // If there's a non-local version...
1529        let range = range.clone().intersection(&Range::singleton(
1530            candidate.version().clone().without_local(),
1531        ));
1532
1533        let Some(base_candidate) = self.selector.select(
1534            name,
1535            &range,
1536            version_maps,
1537            preferences,
1538            &self.installed_packages,
1539            &self.exclusions,
1540            index,
1541            env,
1542            self.tags.as_ref(),
1543        ) else {
1544            return Ok(None);
1545        };
1546        let CandidateDist::Compatible(base_dist) = base_candidate.dist() else {
1547            return Ok(None);
1548        };
1549
1550        // ...and the non-local version has greater platform support...
1551        let mut remainder = {
1552            let mut remainder = base_dist.implied_markers();
1553            remainder.and(dist.implied_markers().negate());
1554            remainder
1555        };
1556        if remainder.is_false() {
1557            return Ok(None);
1558        }
1559
1560        // If the remainder isn't relevant to the current environment, there's no need to fork.
1561        // For example, if we're solving for `sys_platform == 'darwin'` but the remainder is
1562        // `sys_platform == 'linux'`, we don't need to fork.
1563        if !env.included_by_marker(remainder) {
1564            return Ok(None);
1565        }
1566
1567        // Similarly, if the local distribution is incompatible with the current environment, then
1568        // use the base distribution instead (but don't fork).
1569        if !env.included_by_marker(dist.implied_markers()) {
1570            let filename = match dist.for_installation() {
1571                ResolvedDistRef::InstallableRegistrySourceDist { sdist, .. } => sdist
1572                    .filename()
1573                    .unwrap_or(Cow::Borrowed("unknown filename")),
1574                ResolvedDistRef::InstallableRegistryBuiltDist { wheel, .. } => wheel
1575                    .filename()
1576                    .unwrap_or(Cow::Borrowed("unknown filename")),
1577                ResolvedDistRef::Installed { .. } => Cow::Borrowed("installed"),
1578            };
1579
1580            debug!(
1581                "Preferring non-local candidate: {}=={} [{}] ({})",
1582                name,
1583                base_candidate.version(),
1584                base_candidate.choice_kind(),
1585                filename,
1586            );
1587            self.visit_candidate(
1588                &base_candidate,
1589                base_dist,
1590                package,
1591                name,
1592                pins,
1593                request_sink,
1594            )?;
1595
1596            return Ok(Some(ResolverVersion::Unforked(
1597                base_candidate.version().clone(),
1598            )));
1599        }
1600
1601        // If the implied markers includes _some_ macOS environments, but the remainder doesn't,
1602        // then we can extend the implied markers to include _all_ macOS environments. Same goes for
1603        // Linux and Windows.
1604        //
1605        // The idea here is that the base version could support (e.g.) ARM macOS, but not Intel
1606        // macOS. But if _neither_ version supports Intel macOS, we'd rather use `sys_platform == 'darwin'`
1607        // instead of `sys_platform == 'darwin' and platform_machine == 'arm64'`, since it's much
1608        // simpler, and _neither_ version will succeed with Intel macOS anyway.
1609        for value in [
1610            arcstr::literal!("darwin"),
1611            arcstr::literal!("linux"),
1612            arcstr::literal!("win32"),
1613        ] {
1614            let sys_platform = MarkerTree::expression(MarkerExpression::String {
1615                key: MarkerValueString::SysPlatform,
1616                operator: MarkerOperator::Equal,
1617                value,
1618            });
1619            if dist.implied_markers().is_disjoint(sys_platform)
1620                && !remainder.is_disjoint(sys_platform)
1621            {
1622                remainder.or(sys_platform);
1623            }
1624        }
1625
1626        // Otherwise, we need to fork.
1627        let Some((base_env, local_env)) = fork_version_by_marker(env, remainder) else {
1628            return Ok(None);
1629        };
1630
1631        debug!(
1632            "Forking platform for {}=={} ({})",
1633            name,
1634            candidate.version(),
1635            [&base_env, &local_env]
1636                .iter()
1637                .map(ToString::to_string)
1638                .collect::<Vec<_>>()
1639                .join(", ")
1640        );
1641        self.visit_candidate(candidate, dist, package, name, pins, request_sink)?;
1642        self.visit_candidate(
1643            &base_candidate,
1644            base_dist,
1645            package,
1646            name,
1647            pins,
1648            request_sink,
1649        )?;
1650
1651        let forks = vec![
1652            VersionFork {
1653                env: base_env.clone(),
1654                id,
1655                version: Some(base_candidate.version().clone()),
1656            },
1657            VersionFork {
1658                env: local_env.clone(),
1659                id,
1660                version: Some(candidate.version().clone()),
1661            },
1662        ];
1663        Ok(Some(ResolverVersion::Forked(forks)))
1664    }
1665
1666    /// Visit a selected candidate.
1667    fn visit_candidate(
1668        &self,
1669        candidate: &Candidate,
1670        dist: &CompatibleDist,
1671        package: &PubGrubPackage,
1672        name: &PackageName,
1673        pins: &mut FilePins,
1674        request_sink: &Sender<Request>,
1675    ) -> Result<(), ResolveError> {
1676        // We want to return a package pinned to a specific version; but we _also_ want to
1677        // store the exact file that we selected to satisfy that version.
1678        pins.insert(candidate, dist);
1679
1680        // Emit a request to fetch the metadata for this version.
1681        if matches!(&**package, PubGrubPackageInner::Package { .. }) {
1682            if self.dependency_mode.is_transitive() {
1683                if self.index.distributions().register(candidate.version_id()) {
1684                    if name != dist.name() {
1685                        return Err(ResolveError::MismatchedPackageName {
1686                            request: "distribution",
1687                            expected: name.clone(),
1688                            actual: dist.name().clone(),
1689                        });
1690                    }
1691                    // Verify that the package is allowed under the hash-checking policy.
1692                    if !self
1693                        .hasher
1694                        .allows_package(candidate.name(), candidate.version())
1695                    {
1696                        return Err(ResolveError::UnhashedPackage(candidate.name().clone()));
1697                    }
1698
1699                    let request = Request::from(dist.for_resolution());
1700                    request_sink.blocking_send(request)?;
1701                }
1702            }
1703        }
1704
1705        Ok(())
1706    }
1707
1708    /// Check if the distribution is incompatible with the Python requirement, and if so, return
1709    /// the incompatibility.
1710    fn check_requires_python<'dist>(
1711        dist: &'dist CompatibleDist,
1712        python_requirement: &PythonRequirement,
1713    ) -> Option<(&'dist VersionSpecifiers, IncompatibleDist)> {
1714        let requires_python = dist.requires_python()?;
1715        if python_requirement.target().is_contained_by(requires_python) {
1716            None
1717        } else {
1718            let incompatibility = if matches!(dist, CompatibleDist::CompatibleWheel { .. }) {
1719                IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(
1720                    requires_python.clone(),
1721                    if python_requirement.installed() == python_requirement.target() {
1722                        PythonRequirementKind::Installed
1723                    } else {
1724                        PythonRequirementKind::Target
1725                    },
1726                ))
1727            } else {
1728                IncompatibleDist::Source(IncompatibleSource::RequiresPython(
1729                    requires_python.clone(),
1730                    if python_requirement.installed() == python_requirement.target() {
1731                        PythonRequirementKind::Installed
1732                    } else {
1733                        PythonRequirementKind::Target
1734                    },
1735                ))
1736            };
1737            Some((requires_python, incompatibility))
1738        }
1739    }
1740
1741    /// Given a candidate package and version, return its dependencies.
1742    #[instrument(skip_all, fields(%package, %version))]
1743    fn get_dependencies_forking(
1744        &self,
1745        id: Id<PubGrubPackage>,
1746        package: &PubGrubPackage,
1747        version: &Version,
1748        pins: &FilePins,
1749        fork_urls: &ForkUrls,
1750        env: &ResolverEnvironment,
1751        python_requirement: &PythonRequirement,
1752        pubgrub: &State<UvDependencyProvider>,
1753    ) -> Result<ForkedDependencies, ResolveError> {
1754        let result = self.get_dependencies(
1755            id,
1756            package,
1757            version,
1758            pins,
1759            fork_urls,
1760            env,
1761            python_requirement,
1762            pubgrub,
1763        );
1764        if env.marker_environment().is_some() {
1765            result.map(|deps| match deps {
1766                Dependencies::Available(deps) | Dependencies::Unforkable(deps) => {
1767                    ForkedDependencies::Unforked(deps)
1768                }
1769                Dependencies::Unavailable(err) => ForkedDependencies::Unavailable(err),
1770            })
1771        } else {
1772            Ok(result?.fork(env, python_requirement, &self.conflicts))
1773        }
1774    }
1775
1776    /// Given a candidate package and version, return its dependencies.
1777    #[instrument(skip_all, fields(%package, %version))]
1778    fn get_dependencies(
1779        &self,
1780        id: Id<PubGrubPackage>,
1781        package: &PubGrubPackage,
1782        version: &Version,
1783        pins: &FilePins,
1784        fork_urls: &ForkUrls,
1785        env: &ResolverEnvironment,
1786        python_requirement: &PythonRequirement,
1787        pubgrub: &State<UvDependencyProvider>,
1788    ) -> Result<Dependencies, ResolveError> {
1789        let url = package.name().and_then(|name| fork_urls.get(name));
1790        let dependencies = match &**package {
1791            PubGrubPackageInner::Root(_) => {
1792                let no_dev_deps = BTreeMap::default();
1793                let requirements = self.flatten_requirements(
1794                    &self.requirements,
1795                    &no_dev_deps,
1796                    None,
1797                    None,
1798                    None,
1799                    env,
1800                    python_requirement,
1801                );
1802
1803                requirements
1804                    .flat_map(move |requirement| {
1805                        PubGrubDependency::from_requirement(
1806                            &self.conflicts,
1807                            requirement,
1808                            None,
1809                            Some(package),
1810                        )
1811                    })
1812                    .collect()
1813            }
1814
1815            PubGrubPackageInner::Package {
1816                name,
1817                extra,
1818                group,
1819                marker: _,
1820            } => {
1821                // If we're excluding transitive dependencies, short-circuit.
1822                if self.dependency_mode.is_direct() {
1823                    return Ok(Dependencies::Unforkable(Vec::default()));
1824                }
1825
1826                // Determine the distribution to lookup.
1827                let dist = match url {
1828                    Some(url) => PubGrubDistribution::from_url(name, url),
1829                    None => PubGrubDistribution::from_registry(name, version),
1830                };
1831                let version_id = dist.version_id();
1832
1833                // If the package does not exist in the registry or locally, we cannot fetch its dependencies
1834                if self.dependency_mode.is_transitive()
1835                    && self.unavailable_packages.get(name).is_some()
1836                    && self.installed_packages.get_packages(name).is_empty()
1837                {
1838                    debug_assert!(
1839                        false,
1840                        "Dependencies were requested for a package that is not available"
1841                    );
1842                    return Err(ResolveError::PackageUnavailable(name.clone()));
1843                }
1844
1845                // Wait for the metadata to be available.
1846                let response = self
1847                    .index
1848                    .distributions()
1849                    .wait_blocking(&version_id)
1850                    .ok_or_else(|| ResolveError::UnregisteredTask(version_id.to_string()))?;
1851
1852                let metadata = match &*response {
1853                    MetadataResponse::Found(archive) => &archive.metadata,
1854                    MetadataResponse::Unavailable(reason) => {
1855                        let unavailable_version = UnavailableVersion::from(reason);
1856                        let message = unavailable_version.singular_message();
1857                        if let Some(err) = reason.source() {
1858                            // Show the detailed error for metadata parse errors.
1859                            warn!("{name} {message}: {err}");
1860                        } else {
1861                            warn!("{name} {message}");
1862                        }
1863                        self.incomplete_packages
1864                            .entry(name.clone())
1865                            .or_default()
1866                            .insert(version.clone(), reason.clone());
1867                        return Ok(Dependencies::Unavailable(unavailable_version));
1868                    }
1869                    MetadataResponse::Error(dist, err) => {
1870                        let chain = DerivationChainBuilder::from_state(id, version, pubgrub)
1871                            .unwrap_or_default();
1872                        return Err(ResolveError::Dist(
1873                            DistErrorKind::from_requested_dist(dist, &**err),
1874                            dist.clone(),
1875                            chain,
1876                            err.clone(),
1877                        ));
1878                    }
1879                };
1880
1881                // If there was no requires-python on the index page, we may have an incompatible
1882                // distribution.
1883                if let Some(requires_python) = &metadata.requires_python {
1884                    if !python_requirement.target().is_contained_by(requires_python) {
1885                        return Ok(Dependencies::Unavailable(
1886                            UnavailableVersion::RequiresPython(requires_python.clone()),
1887                        ));
1888                    }
1889                }
1890
1891                // Identify any system dependencies based on the index URL.
1892                let system_dependencies = self
1893                    .options
1894                    .torch_backend
1895                    .as_ref()
1896                    .filter(|torch_backend| matches!(torch_backend, TorchStrategy::Cuda { .. }))
1897                    .filter(|torch_backend| torch_backend.has_system_dependency(name))
1898                    .and_then(|_| pins.get(name, version).and_then(ResolvedDist::index))
1899                    .map(IndexUrl::url)
1900                    .and_then(SystemDependency::from_index)
1901                    .into_iter()
1902                    .inspect(|system_dependency| {
1903                        debug!(
1904                            "Adding system dependency `{}` for `{package}@{version}`",
1905                            system_dependency
1906                        );
1907                    })
1908                    .map(PubGrubDependency::from);
1909
1910                let requirements = self.flatten_requirements(
1911                    &metadata.requires_dist,
1912                    &metadata.dependency_groups,
1913                    extra.as_ref(),
1914                    group.as_ref(),
1915                    Some(name),
1916                    env,
1917                    python_requirement,
1918                );
1919
1920                requirements
1921                    .filter(|requirement| !self.excludes.contains(&requirement.name))
1922                    .flat_map(|requirement| {
1923                        PubGrubDependency::from_requirement(
1924                            &self.conflicts,
1925                            requirement,
1926                            group.as_ref(),
1927                            Some(package),
1928                        )
1929                    })
1930                    .chain(system_dependencies)
1931                    .collect()
1932            }
1933
1934            PubGrubPackageInner::Python(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1935
1936            PubGrubPackageInner::System(_) => return Ok(Dependencies::Unforkable(Vec::default())),
1937
1938            // Add a dependency on both the marker and base package.
1939            PubGrubPackageInner::Marker { name, marker } => {
1940                return Ok(Dependencies::Unforkable(
1941                    [MarkerTree::TRUE, *marker]
1942                        .into_iter()
1943                        .map(move |marker| PubGrubDependency {
1944                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1945                                name: name.clone(),
1946                                extra: None,
1947                                group: None,
1948                                marker,
1949                            }),
1950                            version: Range::singleton(version.clone()),
1951                            parent: None,
1952                            url: None,
1953                        })
1954                        .collect(),
1955                ));
1956            }
1957
1958            // Add a dependency on both the extra and base package, with and without the marker.
1959            PubGrubPackageInner::Extra {
1960                name,
1961                extra,
1962                marker,
1963            } => {
1964                return Ok(Dependencies::Unforkable(
1965                    [MarkerTree::TRUE, *marker]
1966                        .into_iter()
1967                        .dedup()
1968                        .flat_map(move |marker| {
1969                            [None, Some(extra)]
1970                                .into_iter()
1971                                .map(move |extra| PubGrubDependency {
1972                                    package: PubGrubPackage::from(PubGrubPackageInner::Package {
1973                                        name: name.clone(),
1974                                        extra: extra.cloned(),
1975                                        group: None,
1976                                        marker,
1977                                    }),
1978                                    version: Range::singleton(version.clone()),
1979                                    parent: None,
1980                                    url: None,
1981                                })
1982                        })
1983                        .collect(),
1984                ));
1985            }
1986
1987            // Add a dependency on the dependency group, with and without the marker.
1988            PubGrubPackageInner::Group {
1989                name,
1990                group,
1991                marker,
1992            } => {
1993                return Ok(Dependencies::Unforkable(
1994                    [MarkerTree::TRUE, *marker]
1995                        .into_iter()
1996                        .dedup()
1997                        .map(|marker| PubGrubDependency {
1998                            package: PubGrubPackage::from(PubGrubPackageInner::Package {
1999                                name: name.clone(),
2000                                extra: None,
2001                                group: Some(group.clone()),
2002                                marker,
2003                            }),
2004                            version: Range::singleton(version.clone()),
2005                            parent: None,
2006                            url: None,
2007                        })
2008                        .collect(),
2009                ));
2010            }
2011        };
2012        Ok(Dependencies::Available(dependencies))
2013    }
2014
2015    /// The regular and dev dependencies filtered by Python version and the markers of this fork,
2016    /// plus the extras dependencies of the current package (e.g., `black` depending on
2017    /// `black[colorama]`).
2018    fn flatten_requirements<'a>(
2019        &'a self,
2020        dependencies: &'a [Requirement],
2021        dev_dependencies: &'a BTreeMap<GroupName, Box<[Requirement]>>,
2022        extra: Option<&'a ExtraName>,
2023        dev: Option<&'a GroupName>,
2024        name: Option<&PackageName>,
2025        env: &'a ResolverEnvironment,
2026        python_requirement: &'a PythonRequirement,
2027    ) -> impl Iterator<Item = Cow<'a, Requirement>> {
2028        let python_marker = python_requirement.to_marker_tree();
2029
2030        if let Some(dev) = dev {
2031            // Dependency groups can include the project itself, so no need to flatten recursive
2032            // dependencies.
2033            Either::Left(Either::Left(self.requirements_for_extra(
2034                dev_dependencies.get(dev).into_iter().flatten(),
2035                extra,
2036                env,
2037                python_marker,
2038                python_requirement,
2039            )))
2040        } else if !dependencies
2041            .iter()
2042            .any(|req| name == Some(&req.name) && !req.extras.is_empty())
2043        {
2044            // If the project doesn't define any recursive dependencies, take the fast path.
2045            Either::Left(Either::Right(self.requirements_for_extra(
2046                dependencies.iter(),
2047                extra,
2048                env,
2049                python_marker,
2050                python_requirement,
2051            )))
2052        } else {
2053            let mut requirements = self
2054                .requirements_for_extra(
2055                    dependencies.iter(),
2056                    extra,
2057                    env,
2058                    python_marker,
2059                    python_requirement,
2060                )
2061                .collect::<Vec<_>>();
2062
2063            // Transitively process all extras that are recursively included, starting with the current
2064            // extra.
2065            let mut seen = FxHashSet::<(ExtraName, MarkerTree)>::default();
2066            let mut queue: VecDeque<_> = requirements
2067                .iter()
2068                .filter(|req| name == Some(&req.name))
2069                .flat_map(|req| req.extras.iter().cloned().map(|extra| (extra, req.marker)))
2070                .collect();
2071            while let Some((extra, marker)) = queue.pop_front() {
2072                if !seen.insert((extra.clone(), marker)) {
2073                    continue;
2074                }
2075                for requirement in self.requirements_for_extra(
2076                    dependencies,
2077                    Some(&extra),
2078                    env,
2079                    python_marker,
2080                    python_requirement,
2081                ) {
2082                    let requirement = match requirement {
2083                        Cow::Owned(mut requirement) => {
2084                            requirement.marker.and(marker);
2085                            requirement
2086                        }
2087                        Cow::Borrowed(requirement) => {
2088                            let mut marker = marker;
2089                            marker.and(requirement.marker);
2090                            Requirement {
2091                                name: requirement.name.clone(),
2092                                extras: requirement.extras.clone(),
2093                                groups: requirement.groups.clone(),
2094                                source: requirement.source.clone(),
2095                                origin: requirement.origin.clone(),
2096                                marker: marker.simplify_extras(slice::from_ref(&extra)),
2097                            }
2098                        }
2099                    };
2100                    if name == Some(&requirement.name) {
2101                        // Add each transitively included extra.
2102                        queue.extend(
2103                            requirement
2104                                .extras
2105                                .iter()
2106                                .cloned()
2107                                .map(|extra| (extra, requirement.marker)),
2108                        );
2109                    } else {
2110                        // Add the requirements for that extra.
2111                        requirements.push(Cow::Owned(requirement));
2112                    }
2113                }
2114            }
2115
2116            // Retain any self-constraints for that extra, e.g., if `project[foo]` includes
2117            // `project[bar]>1.0`, as a dependency, we need to propagate `project>1.0`, in addition to
2118            // transitively expanding `project[bar]`.
2119            let mut self_constraints = vec![];
2120            for req in &requirements {
2121                if name == Some(&req.name) && !req.source.is_empty() {
2122                    self_constraints.push(Requirement {
2123                        name: req.name.clone(),
2124                        extras: Box::new([]),
2125                        groups: req.groups.clone(),
2126                        source: req.source.clone(),
2127                        origin: req.origin.clone(),
2128                        marker: req.marker,
2129                    });
2130                }
2131            }
2132
2133            // Drop all the self-requirements now that we flattened them out.
2134            requirements.retain(|req| name != Some(&req.name) || req.extras.is_empty());
2135            requirements.extend(self_constraints.into_iter().map(Cow::Owned));
2136
2137            Either::Right(requirements.into_iter())
2138        }
2139    }
2140
2141    /// The set of the regular and dev dependencies, filtered by Python version,
2142    /// the markers of this fork and the requested extra.
2143    fn requirements_for_extra<'data, 'parameters>(
2144        &'data self,
2145        dependencies: impl IntoIterator<Item = &'data Requirement> + 'parameters,
2146        extra: Option<&'parameters ExtraName>,
2147        env: &'parameters ResolverEnvironment,
2148        python_marker: MarkerTree,
2149        python_requirement: &'parameters PythonRequirement,
2150    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2151    where
2152        'data: 'parameters,
2153    {
2154        self.overrides
2155            .apply(dependencies)
2156            .filter(move |requirement| {
2157                Self::is_requirement_applicable(
2158                    requirement,
2159                    extra,
2160                    env,
2161                    python_marker,
2162                    python_requirement,
2163                )
2164            })
2165            .flat_map(move |requirement| {
2166                iter::once(requirement.clone()).chain(self.constraints_for_requirement(
2167                    requirement,
2168                    extra,
2169                    env,
2170                    python_marker,
2171                    python_requirement,
2172                ))
2173            })
2174    }
2175
2176    /// Whether a requirement is applicable for the Python version, the markers of this fork and the
2177    /// requested extra.
2178    fn is_requirement_applicable(
2179        requirement: &Requirement,
2180        extra: Option<&ExtraName>,
2181        env: &ResolverEnvironment,
2182        python_marker: MarkerTree,
2183        python_requirement: &PythonRequirement,
2184    ) -> bool {
2185        // If the requirement isn't relevant for the current platform, skip it.
2186        match extra {
2187            Some(source_extra) => {
2188                // Only include requirements that are relevant for the current extra.
2189                if requirement.evaluate_markers(env.marker_environment(), &[]) {
2190                    return false;
2191                }
2192                if !requirement
2193                    .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2194                {
2195                    return false;
2196                }
2197                if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2198                {
2199                    return false;
2200                }
2201            }
2202            None => {
2203                if !requirement.evaluate_markers(env.marker_environment(), &[]) {
2204                    return false;
2205                }
2206            }
2207        }
2208
2209        // If the requirement would not be selected with any Python version
2210        // supported by the root, skip it.
2211        if python_marker.is_disjoint(requirement.marker) {
2212            trace!(
2213                "Skipping {requirement} because of Requires-Python: {requires_python}",
2214                requires_python = python_requirement.target(),
2215            );
2216            return false;
2217        }
2218
2219        // If we're in a fork in universal mode, ignore any dependency that isn't part of
2220        // this fork (but will be part of another fork).
2221        if !env.included_by_marker(requirement.marker) {
2222            trace!("Skipping {requirement} because of {env}");
2223            return false;
2224        }
2225
2226        true
2227    }
2228
2229    /// The constraints applicable to the requirement, filtered by Python version, the markers of
2230    /// this fork and the requested extra.
2231    fn constraints_for_requirement<'data, 'parameters>(
2232        &'data self,
2233        requirement: Cow<'data, Requirement>,
2234        extra: Option<&'parameters ExtraName>,
2235        env: &'parameters ResolverEnvironment,
2236        python_marker: MarkerTree,
2237        python_requirement: &'parameters PythonRequirement,
2238    ) -> impl Iterator<Item = Cow<'data, Requirement>> + 'parameters
2239    where
2240        'data: 'parameters,
2241    {
2242        self.constraints
2243            .get(&requirement.name)
2244            .into_iter()
2245            .flatten()
2246            .filter_map(move |constraint| {
2247                // If the requirement would not be selected with any Python version
2248                // supported by the root, skip it.
2249                let constraint = if constraint.marker.is_true() {
2250                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2251                    // and the constraint is `requests ; python_version == '3.6'`, the
2252                    // constraint should only apply when _both_ markers are true.
2253                    if requirement.marker.is_true() {
2254                        Cow::Borrowed(constraint)
2255                    } else {
2256                        let mut marker = constraint.marker;
2257                        marker.and(requirement.marker);
2258
2259                        if marker.is_false() {
2260                            trace!(
2261                                "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2262                                constraint.marker.try_to_string().unwrap(),
2263                                requirement.marker.try_to_string().unwrap(),
2264                            );
2265                            return None;
2266                        }
2267
2268                        Cow::Owned(Requirement {
2269                            name: constraint.name.clone(),
2270                            extras: constraint.extras.clone(),
2271                            groups: constraint.groups.clone(),
2272                            source: constraint.source.clone(),
2273                            origin: constraint.origin.clone(),
2274                            marker,
2275                        })
2276                    }
2277                } else {
2278                    let requires_python = python_requirement.target();
2279
2280                    let mut marker = constraint.marker;
2281                    marker.and(requirement.marker);
2282
2283                    if marker.is_false() {
2284                        trace!(
2285                            "Skipping {constraint} because of disjoint markers: `{}` vs. `{}`",
2286                            constraint.marker.try_to_string().unwrap(),
2287                            requirement.marker.try_to_string().unwrap(),
2288                        );
2289                        return None;
2290                    }
2291
2292                    // Additionally, if the requirement is `requests ; sys_platform == 'darwin'`
2293                    // and the constraint is `requests ; python_version == '3.6'`, the
2294                    // constraint should only apply when _both_ markers are true.
2295                    if python_marker.is_disjoint(marker) {
2296                        trace!(
2297                            "Skipping constraint {requirement} because of Requires-Python: {requires_python}"
2298                        );
2299                        return None;
2300                    }
2301
2302                    if marker == constraint.marker {
2303                        Cow::Borrowed(constraint)
2304                    } else {
2305                        Cow::Owned(Requirement {
2306                            name: constraint.name.clone(),
2307                            extras: constraint.extras.clone(),
2308                            groups: constraint.groups.clone(),
2309                            source: constraint.source.clone(),
2310                            origin: constraint.origin.clone(),
2311                            marker,
2312                        })
2313                    }
2314                };
2315
2316                // If we're in a fork in universal mode, ignore any dependency that isn't part of
2317                // this fork (but will be part of another fork).
2318                if !env.included_by_marker(constraint.marker) {
2319                    trace!("Skipping {constraint} because of {env}");
2320                    return None;
2321                }
2322
2323                // If the constraint isn't relevant for the current platform, skip it.
2324                match extra {
2325                    Some(source_extra) => {
2326                        if !constraint
2327                            .evaluate_markers(env.marker_environment(), slice::from_ref(source_extra))
2328                        {
2329                            return None;
2330                        }
2331                        if !env.included_by_group(ConflictItemRef::from((&requirement.name, source_extra)))
2332                        {
2333                            return None;
2334                        }
2335                    }
2336                    None => {
2337                        if !constraint.evaluate_markers(env.marker_environment(), &[]) {
2338                            return None;
2339                        }
2340                    }
2341                }
2342
2343                Some(constraint)
2344            })
2345    }
2346
2347    /// Fetch the metadata for a stream of packages and versions.
2348    async fn fetch<Provider: ResolverProvider>(
2349        self: Arc<Self>,
2350        provider: Arc<Provider>,
2351        request_stream: Receiver<Request>,
2352    ) -> Result<(), ResolveError> {
2353        let mut response_stream = ReceiverStream::new(request_stream)
2354            .map(|request| self.process_request(request, &*provider).boxed_local())
2355            // Allow as many futures as possible to start in the background.
2356            // Backpressure is provided by at a more granular level by `DistributionDatabase`
2357            // and `SourceDispatch`, as well as the bounded request channel.
2358            .buffer_unordered(usize::MAX);
2359
2360        while let Some(response) = response_stream.next().await {
2361            match response? {
2362                Some(Response::Package(name, index, version_map)) => {
2363                    trace!("Received package metadata for: {name}");
2364                    if let Some(index) = index {
2365                        self.index
2366                            .explicit()
2367                            .done((name, index), Arc::new(version_map));
2368                    } else {
2369                        self.index.implicit().done(name, Arc::new(version_map));
2370                    }
2371                }
2372                Some(Response::Installed { dist, metadata }) => {
2373                    trace!("Received installed distribution metadata for: {dist}");
2374                    self.index
2375                        .distributions()
2376                        .done(dist.version_id(), Arc::new(metadata));
2377                }
2378                Some(Response::Dist { dist, metadata }) => {
2379                    let dist_kind = match dist {
2380                        Dist::Built(_) => "built",
2381                        Dist::Source(_) => "source",
2382                    };
2383                    trace!("Received {dist_kind} distribution metadata for: {dist}");
2384                    if let MetadataResponse::Unavailable(reason) = &metadata {
2385                        let message = UnavailableVersion::from(reason).singular_message();
2386                        if let Some(err) = reason.source() {
2387                            // Show the detailed error for metadata parse errors.
2388                            warn!("{dist} {message}: {err}");
2389                        } else {
2390                            warn!("{dist} {message}");
2391                        }
2392                    }
2393                    self.index
2394                        .distributions()
2395                        .done(dist.version_id(), Arc::new(metadata));
2396                }
2397                None => {}
2398            }
2399        }
2400
2401        Ok::<(), ResolveError>(())
2402    }
2403
2404    #[instrument(skip_all, fields(%request))]
2405    async fn process_request<Provider: ResolverProvider>(
2406        &self,
2407        request: Request,
2408        provider: &Provider,
2409    ) -> Result<Option<Response>, ResolveError> {
2410        match request {
2411            // Fetch package metadata from the registry.
2412            Request::Package(package_name, index) => {
2413                let package_versions = provider
2414                    .get_package_versions(&package_name, index.as_ref())
2415                    .boxed_local()
2416                    .await
2417                    .map_err(ResolveError::Client)?;
2418
2419                Ok(Some(Response::Package(
2420                    package_name,
2421                    index.map(IndexMetadata::into_url),
2422                    package_versions,
2423                )))
2424            }
2425
2426            // Fetch distribution metadata from the distribution database.
2427            Request::Dist(dist) => {
2428                if let Some(version) = dist.version() {
2429                    if let Some(index) = dist.index() {
2430                        // Check the implicit indexes for pre-provided metadata.
2431                        let versions_response = self.index.implicit().get(dist.name());
2432                        if let Some(VersionsResponse::Found(version_maps)) =
2433                            versions_response.as_deref()
2434                        {
2435                            for version_map in version_maps {
2436                                if version_map.index() == Some(index) {
2437                                    let Some(metadata) = version_map.get_metadata(version) else {
2438                                        continue;
2439                                    };
2440                                    debug!("Found registry-provided metadata for: {dist}");
2441                                    return Ok(Some(Response::Dist {
2442                                        dist,
2443                                        metadata: MetadataResponse::Found(
2444                                            ArchiveMetadata::from_metadata23(metadata.clone()),
2445                                        ),
2446                                    }));
2447                                }
2448                            }
2449                        }
2450
2451                        // Check the explicit indexes for pre-provided metadata.
2452                        let versions_response = self
2453                            .index
2454                            .explicit()
2455                            .get(&(dist.name().clone(), index.clone()));
2456                        if let Some(VersionsResponse::Found(version_maps)) =
2457                            versions_response.as_deref()
2458                        {
2459                            for version_map in version_maps {
2460                                let Some(metadata) = version_map.get_metadata(version) else {
2461                                    continue;
2462                                };
2463                                debug!("Found registry-provided metadata for: {dist}");
2464                                return Ok(Some(Response::Dist {
2465                                    dist,
2466                                    metadata: MetadataResponse::Found(
2467                                        ArchiveMetadata::from_metadata23(metadata.clone()),
2468                                    ),
2469                                }));
2470                            }
2471                        }
2472                    }
2473                }
2474
2475                let metadata = provider
2476                    .get_or_build_wheel_metadata(&dist)
2477                    .boxed_local()
2478                    .await?;
2479
2480                if let MetadataResponse::Found(metadata) = &metadata {
2481                    if &metadata.metadata.name != dist.name() {
2482                        return Err(ResolveError::MismatchedPackageName {
2483                            request: "distribution metadata",
2484                            expected: dist.name().clone(),
2485                            actual: metadata.metadata.name.clone(),
2486                        });
2487                    }
2488                }
2489
2490                Ok(Some(Response::Dist { dist, metadata }))
2491            }
2492
2493            Request::Installed(dist) => {
2494                let metadata = provider.get_installed_metadata(&dist).boxed_local().await?;
2495
2496                if let MetadataResponse::Found(metadata) = &metadata {
2497                    if &metadata.metadata.name != dist.name() {
2498                        return Err(ResolveError::MismatchedPackageName {
2499                            request: "installed metadata",
2500                            expected: dist.name().clone(),
2501                            actual: metadata.metadata.name.clone(),
2502                        });
2503                    }
2504                }
2505
2506                Ok(Some(Response::Installed { dist, metadata }))
2507            }
2508
2509            // Pre-fetch the package and distribution metadata.
2510            Request::Prefetch(package_name, range, python_requirement) => {
2511                // Wait for the package metadata to become available.
2512                let versions_response = self
2513                    .index
2514                    .implicit()
2515                    .wait(&package_name)
2516                    .await
2517                    .ok_or_else(|| ResolveError::UnregisteredTask(package_name.to_string()))?;
2518
2519                let version_map = match *versions_response {
2520                    VersionsResponse::Found(ref version_map) => version_map,
2521                    // Short-circuit if we did not find any versions for the package
2522                    VersionsResponse::NoIndex => {
2523                        self.unavailable_packages
2524                            .insert(package_name.clone(), UnavailablePackage::NoIndex);
2525
2526                        return Ok(None);
2527                    }
2528                    VersionsResponse::Offline => {
2529                        self.unavailable_packages
2530                            .insert(package_name.clone(), UnavailablePackage::Offline);
2531
2532                        return Ok(None);
2533                    }
2534                    VersionsResponse::NotFound => {
2535                        self.unavailable_packages
2536                            .insert(package_name.clone(), UnavailablePackage::NotFound);
2537
2538                        return Ok(None);
2539                    }
2540                };
2541
2542                // We don't have access to the fork state when prefetching, so assume that
2543                // pre-release versions are allowed.
2544                let env = ResolverEnvironment::universal(vec![]);
2545
2546                // Try to find a compatible version. If there aren't any compatible versions,
2547                // short-circuit.
2548                let Some(candidate) = self.selector.select(
2549                    &package_name,
2550                    &range,
2551                    version_map,
2552                    &self.preferences,
2553                    &self.installed_packages,
2554                    &self.exclusions,
2555                    None,
2556                    &env,
2557                    self.tags.as_ref(),
2558                ) else {
2559                    return Ok(None);
2560                };
2561
2562                // If there is not a compatible distribution, short-circuit.
2563                let Some(dist) = candidate.compatible() else {
2564                    return Ok(None);
2565                };
2566
2567                // If the registry provided metadata for this distribution, use it.
2568                for version_map in version_map {
2569                    if let Some(metadata) = version_map.get_metadata(candidate.version()) {
2570                        let dist = dist.for_resolution();
2571                        if version_map.index() == dist.index() {
2572                            debug!("Found registry-provided metadata for: {dist}");
2573
2574                            let metadata = MetadataResponse::Found(
2575                                ArchiveMetadata::from_metadata23(metadata.clone()),
2576                            );
2577
2578                            let dist = dist.to_owned();
2579                            if &package_name != dist.name() {
2580                                return Err(ResolveError::MismatchedPackageName {
2581                                    request: "distribution",
2582                                    expected: package_name,
2583                                    actual: dist.name().clone(),
2584                                });
2585                            }
2586
2587                            let response = match dist {
2588                                ResolvedDist::Installable { dist, .. } => Response::Dist {
2589                                    dist: (*dist).clone(),
2590                                    metadata,
2591                                },
2592                                ResolvedDist::Installed { dist } => Response::Installed {
2593                                    dist: (*dist).clone(),
2594                                    metadata,
2595                                },
2596                            };
2597
2598                            return Ok(Some(response));
2599                        }
2600                    }
2601                }
2602
2603                // Avoid prefetching source distributions with unbounded lower-bound ranges. This
2604                // often leads to failed attempts to build legacy versions of packages that are
2605                // incompatible with modern build tools.
2606                if dist.wheel().is_none() {
2607                    if !self.selector.use_highest_version(&package_name, &env) {
2608                        if let Some((lower, _)) = range.iter().next() {
2609                            if lower == &Bound::Unbounded {
2610                                debug!(
2611                                    "Skipping prefetch for unbounded minimum-version range: {package_name} ({range})"
2612                                );
2613                                return Ok(None);
2614                            }
2615                        }
2616                    }
2617                }
2618
2619                // Validate the Python requirement.
2620                let requires_python = match dist {
2621                    CompatibleDist::InstalledDist(_) => None,
2622                    CompatibleDist::SourceDist { sdist, .. }
2623                    | CompatibleDist::IncompatibleWheel { sdist, .. } => {
2624                        sdist.file.requires_python.as_ref()
2625                    }
2626                    CompatibleDist::CompatibleWheel { wheel, .. } => {
2627                        wheel.file.requires_python.as_ref()
2628                    }
2629                };
2630                if let Some(requires_python) = requires_python.as_ref() {
2631                    if !python_requirement.target().is_contained_by(requires_python) {
2632                        return Ok(None);
2633                    }
2634                }
2635
2636                // Verify that the package is allowed under the hash-checking policy.
2637                if !self
2638                    .hasher
2639                    .allows_package(candidate.name(), candidate.version())
2640                {
2641                    return Ok(None);
2642                }
2643
2644                // Emit a request to fetch the metadata for this version.
2645                if self.index.distributions().register(candidate.version_id()) {
2646                    let dist = dist.for_resolution().to_owned();
2647                    if &package_name != dist.name() {
2648                        return Err(ResolveError::MismatchedPackageName {
2649                            request: "distribution",
2650                            expected: package_name,
2651                            actual: dist.name().clone(),
2652                        });
2653                    }
2654
2655                    let response = match dist {
2656                        ResolvedDist::Installable { dist, .. } => {
2657                            let metadata = provider
2658                                .get_or_build_wheel_metadata(&dist)
2659                                .boxed_local()
2660                                .await?;
2661
2662                            Response::Dist {
2663                                dist: (*dist).clone(),
2664                                metadata,
2665                            }
2666                        }
2667                        ResolvedDist::Installed { dist } => {
2668                            let metadata =
2669                                provider.get_installed_metadata(&dist).boxed_local().await?;
2670
2671                            Response::Installed {
2672                                dist: (*dist).clone(),
2673                                metadata,
2674                            }
2675                        }
2676                    };
2677
2678                    Ok(Some(response))
2679                } else {
2680                    Ok(None)
2681                }
2682            }
2683        }
2684    }
2685
2686    fn convert_no_solution_err(
2687        &self,
2688        mut err: pubgrub::NoSolutionError<UvDependencyProvider>,
2689        fork_urls: ForkUrls,
2690        fork_indexes: ForkIndexes,
2691        env: ResolverEnvironment,
2692        current_environment: MarkerEnvironment,
2693        exclude_newer: Option<&ExcludeNewer>,
2694        visited: &FxHashSet<PackageName>,
2695    ) -> ResolveError {
2696        err = NoSolutionError::collapse_local_version_segments(NoSolutionError::collapse_proxies(
2697            err,
2698        ));
2699
2700        let mut unavailable_packages = FxHashMap::default();
2701        for package in err.packages() {
2702            if let PubGrubPackageInner::Package { name, .. } = &**package {
2703                if let Some(reason) = self.unavailable_packages.get(name) {
2704                    unavailable_packages.insert(name.clone(), reason.clone());
2705                }
2706            }
2707        }
2708
2709        let mut incomplete_packages = FxHashMap::default();
2710        for package in err.packages() {
2711            if let PubGrubPackageInner::Package { name, .. } = &**package {
2712                if let Some(versions) = self.incomplete_packages.get(name) {
2713                    for entry in versions.iter() {
2714                        let (version, reason) = entry.pair();
2715                        incomplete_packages
2716                            .entry(name.clone())
2717                            .or_insert_with(BTreeMap::default)
2718                            .insert(version.clone(), reason.clone());
2719                    }
2720                }
2721            }
2722        }
2723
2724        let mut available_indexes = FxHashMap::default();
2725        let mut available_versions = FxHashMap::default();
2726        for package in err.packages() {
2727            let Some(name) = package.name() else { continue };
2728            if !visited.contains(name) {
2729                // Avoid including available versions for packages that exist in the derivation
2730                // tree, but were never visited during resolution. We _may_ have metadata for
2731                // these packages, but it's non-deterministic, and omitting them ensures that
2732                // we represent the self of the resolver at the time of failure.
2733                continue;
2734            }
2735            let versions_response = if let Some(index) = fork_indexes.get(name) {
2736                self.index
2737                    .explicit()
2738                    .get(&(name.clone(), index.url().clone()))
2739            } else {
2740                self.index.implicit().get(name)
2741            };
2742            if let Some(response) = versions_response {
2743                if let VersionsResponse::Found(ref version_maps) = *response {
2744                    // Track the available versions, across all indexes.
2745                    for version_map in version_maps {
2746                        let package_versions = available_versions
2747                            .entry(name.clone())
2748                            .or_insert_with(BTreeSet::new);
2749
2750                        for (version, dists) in version_map.iter(&Ranges::full()) {
2751                            // Don't show versions removed by excluded-newer in hints.
2752                            if let Some(exclude_newer) =
2753                                exclude_newer.and_then(|en| en.exclude_newer_package(name))
2754                            {
2755                                let Some(prioritized_dist) = dists.prioritized_dist() else {
2756                                    continue;
2757                                };
2758                                if prioritized_dist.files().all(|file| {
2759                                    file.upload_time_utc_ms.is_none_or(|upload_time| {
2760                                        upload_time >= exclude_newer.timestamp_millis()
2761                                    })
2762                                }) {
2763                                    continue;
2764                                }
2765                            }
2766
2767                            package_versions.insert(version.clone());
2768                        }
2769                    }
2770
2771                    // Track the indexes in which the package is available.
2772                    available_indexes
2773                        .entry(name.clone())
2774                        .or_insert(BTreeSet::new())
2775                        .extend(
2776                            version_maps
2777                                .iter()
2778                                .filter_map(|version_map| version_map.index().cloned()),
2779                        );
2780                }
2781            }
2782        }
2783
2784        ResolveError::NoSolution(Box::new(NoSolutionError::new(
2785            err,
2786            self.index.clone(),
2787            available_versions,
2788            available_indexes,
2789            self.selector.clone(),
2790            self.python_requirement.clone(),
2791            self.locations.clone(),
2792            self.capabilities.clone(),
2793            unavailable_packages,
2794            incomplete_packages,
2795            fork_urls,
2796            fork_indexes,
2797            env,
2798            current_environment,
2799            self.tags.clone(),
2800            self.workspace_members.clone(),
2801            self.options.clone(),
2802        )))
2803    }
2804
2805    fn on_progress(&self, package: &PubGrubPackage, version: &Version) {
2806        if let Some(reporter) = self.reporter.as_ref() {
2807            match &**package {
2808                PubGrubPackageInner::Root(_) => {}
2809                PubGrubPackageInner::Python(_) => {}
2810                PubGrubPackageInner::System(_) => {}
2811                PubGrubPackageInner::Marker { .. } => {}
2812                PubGrubPackageInner::Extra { .. } => {}
2813                PubGrubPackageInner::Group { .. } => {}
2814                PubGrubPackageInner::Package { name, .. } => {
2815                    reporter.on_progress(name, &VersionOrUrlRef::Version(version));
2816                }
2817            }
2818        }
2819    }
2820
2821    fn on_complete(&self) {
2822        if let Some(reporter) = self.reporter.as_ref() {
2823            reporter.on_complete();
2824        }
2825    }
2826}
2827
2828/// State that is used during unit propagation in the resolver, one instance per fork.
2829#[derive(Clone)]
2830pub(crate) struct ForkState {
2831    /// The internal state used by the resolver.
2832    ///
2833    /// Note that not all parts of this state are strictly internal. For
2834    /// example, the edges in the dependency graph generated as part of the
2835    /// output of resolution are derived from the "incompatibilities" tracked
2836    /// in this state. We also ultimately retrieve the final set of version
2837    /// assignments (to packages) from this state's "partial solution."
2838    pubgrub: State<UvDependencyProvider>,
2839    /// The initial package to select. If set, the first iteration over this state will avoid
2840    /// asking PubGrub for the highest-priority package, and will instead use the provided package.
2841    initial_id: Option<Id<PubGrubPackage>>,
2842    /// The initial version to select. If set, the first iteration over this state will avoid
2843    /// asking PubGrub for the highest-priority version, and will instead use the provided version.
2844    initial_version: Option<Version>,
2845    /// The next package on which to run unit propagation.
2846    next: Id<PubGrubPackage>,
2847    /// The set of pinned versions we accrue throughout resolution.
2848    ///
2849    /// The key of this map is a package name, and each package name maps to
2850    /// a set of versions for that package. Each version in turn is mapped
2851    /// to a single [`ResolvedDist`]. That [`ResolvedDist`] represents, at time
2852    /// of writing (2024/05/09), at most one wheel. The idea here is that
2853    /// [`FilePins`] tracks precisely which wheel was selected during resolution.
2854    /// After resolution is finished, this maps is consulted in order to select
2855    /// the wheel chosen during resolution.
2856    pins: FilePins,
2857    /// Ensure we don't have duplicate URLs in any branch.
2858    ///
2859    /// Unlike [`Urls`], we add only the URLs we have seen in this branch, and there can be only
2860    /// one URL per package. By prioritizing direct URL dependencies over registry dependencies,
2861    /// this map is populated for all direct URL packages before we look at any registry packages.
2862    fork_urls: ForkUrls,
2863    /// Ensure we don't have duplicate indexes in any branch.
2864    ///
2865    /// Unlike [`Indexes`], we add only the indexes we have seen in this branch, and there can be
2866    /// only one index per package.
2867    fork_indexes: ForkIndexes,
2868    /// When dependencies for a package are retrieved, this map of priorities
2869    /// is updated based on how each dependency was specified. Certain types
2870    /// of dependencies have more "priority" than others (like direct URL
2871    /// dependencies). These priorities help determine which package to
2872    /// consider next during resolution.
2873    priorities: PubGrubPriorities,
2874    /// This keeps track of the set of versions for each package that we've
2875    /// already visited during resolution. This avoids doing redundant work.
2876    added_dependencies: FxHashMap<Id<PubGrubPackage>, FxHashSet<Version>>,
2877    /// The marker expression that created this state.
2878    ///
2879    /// The root state always corresponds to a marker expression that is always
2880    /// `true` for every `MarkerEnvironment`.
2881    ///
2882    /// In non-universal mode, forking never occurs and so this marker
2883    /// expression is always `true`.
2884    ///
2885    /// Whenever dependencies are fetched, all requirement specifications
2886    /// are checked for disjointness with the marker expression of the fork
2887    /// in which those dependencies were fetched. If a requirement has a
2888    /// completely disjoint marker expression (i.e., it can never be true given
2889    /// that the marker expression that provoked the fork is true), then that
2890    /// dependency is completely ignored.
2891    env: ResolverEnvironment,
2892    /// The Python requirement for this fork. Defaults to the Python requirement for
2893    /// the resolution, but may be narrowed if a `python_version` marker is present
2894    /// in a given fork.
2895    ///
2896    /// For example, in:
2897    /// ```text
2898    /// numpy >=1.26 ; python_version >= "3.9"
2899    /// numpy <1.26 ; python_version < "3.9"
2900    /// ```
2901    ///
2902    /// The top fork has a narrower Python compatibility range, and thus can find a
2903    /// solution that omits Python 3.8 support.
2904    python_requirement: PythonRequirement,
2905    conflict_tracker: ConflictTracker,
2906    /// Prefetch package versions for packages with many rejected versions.
2907    ///
2908    /// Tracked on the fork state to avoid counting each identical version between forks as new try.
2909    prefetcher: BatchPrefetcher,
2910}
2911
2912impl ForkState {
2913    fn new(
2914        pubgrub: State<UvDependencyProvider>,
2915        env: ResolverEnvironment,
2916        python_requirement: PythonRequirement,
2917        prefetcher: BatchPrefetcher,
2918    ) -> Self {
2919        Self {
2920            initial_id: None,
2921            initial_version: None,
2922            next: pubgrub.root_package,
2923            pubgrub,
2924            pins: FilePins::default(),
2925            fork_urls: ForkUrls::default(),
2926            fork_indexes: ForkIndexes::default(),
2927            priorities: PubGrubPriorities::default(),
2928            added_dependencies: FxHashMap::default(),
2929            env,
2930            python_requirement,
2931            conflict_tracker: ConflictTracker::default(),
2932            prefetcher,
2933        }
2934    }
2935
2936    /// Visit the dependencies for the selected version of the current package, incorporating any
2937    /// relevant URLs and pinned indexes into the [`ForkState`].
2938    fn visit_package_version_dependencies(
2939        &mut self,
2940        for_package: Id<PubGrubPackage>,
2941        for_version: &Version,
2942        urls: &Urls,
2943        indexes: &Indexes,
2944        dependencies: &[PubGrubDependency],
2945        git: &GitResolver,
2946        workspace_members: &BTreeSet<PackageName>,
2947        resolution_strategy: &ResolutionStrategy,
2948    ) -> Result<(), ResolveError> {
2949        for dependency in dependencies {
2950            let PubGrubDependency {
2951                package,
2952                version,
2953                parent: _,
2954                url,
2955            } = dependency;
2956
2957            let mut has_url = false;
2958            if let Some(name) = package.name() {
2959                // From the [`Requirement`] to [`PubGrubDependency`] conversion, we get a URL if the
2960                // requirement was a URL requirement. `Urls` applies canonicalization to this and
2961                // override URLs to both URL and registry requirements, which we then check for
2962                // conflicts using [`ForkUrl`].
2963                for url in urls.get_url(&self.env, name, url.as_ref(), git)? {
2964                    self.fork_urls.insert(name, url, &self.env)?;
2965                    has_url = true;
2966                }
2967
2968                // If the package is pinned to an exact index, add it to the fork.
2969                for index in indexes.get(name, &self.env) {
2970                    self.fork_indexes.insert(name, index, &self.env)?;
2971                }
2972            }
2973
2974            if let Some(name) = self.pubgrub.package_store[for_package]
2975                .name_no_root()
2976                .filter(|name| !workspace_members.contains(name))
2977            {
2978                debug!(
2979                    "Adding transitive dependency for {name}=={for_version}: {package}{version}"
2980                );
2981            } else {
2982                // A dependency from the root package or `requirements.txt`.
2983                debug!("Adding direct dependency: {package}{version}");
2984
2985                // Warn the user if a direct dependency lacks a lower bound in `--lowest` resolution.
2986                let missing_lower_bound = version
2987                    .bounding_range()
2988                    .map(|(lowest, _highest)| lowest == Bound::Unbounded)
2989                    .unwrap_or(true);
2990                let strategy_lowest = matches!(
2991                    resolution_strategy,
2992                    ResolutionStrategy::Lowest | ResolutionStrategy::LowestDirect(..)
2993                );
2994
2995                if !has_url && missing_lower_bound && strategy_lowest {
2996                    let name = package.name_no_root().unwrap();
2997                    // Handle cases where a package is listed both without and with a lower bound.
2998                    // Example:
2999                    // ```
3000                    // "coverage[toml] ; python_version < '3.11'",
3001                    // "coverage >= 7.10.0",
3002                    // ```
3003                    let bound_on_other_package = dependencies.iter().any(|other| {
3004                        Some(name) == other.package.name()
3005                            && !other
3006                                .version
3007                                .bounding_range()
3008                                .map(|(lowest, _highest)| lowest == Bound::Unbounded)
3009                                .unwrap_or(true)
3010                    });
3011
3012                    if !bound_on_other_package {
3013                        warn_user_once!(
3014                            "The direct dependency `{name}` is unpinned. \
3015                            Consider setting a lower bound when using `--resolution lowest` \
3016                            or `--resolution lowest-direct` to avoid using outdated versions.",
3017                        );
3018                    }
3019                }
3020            }
3021
3022            // Update the package priorities.
3023            self.priorities.insert(package, version, &self.fork_urls);
3024            // As we're adding an incompatibility from the proxy package to the base package,
3025            // we need to register the base package.
3026            if let Some(base_package) = package.base_package() {
3027                self.priorities
3028                    .insert(&base_package, version, &self.fork_urls);
3029            }
3030        }
3031
3032        Ok(())
3033    }
3034
3035    /// Add the dependencies for the selected version of the current package.
3036    fn add_package_version_dependencies(
3037        &mut self,
3038        for_package: Id<PubGrubPackage>,
3039        for_version: &Version,
3040        dependencies: Vec<PubGrubDependency>,
3041    ) {
3042        for dependency in &dependencies {
3043            let PubGrubDependency {
3044                package,
3045                version,
3046                parent: _,
3047                url: _,
3048            } = dependency;
3049
3050            let Some(base_package) = package.base_package() else {
3051                continue;
3052            };
3053
3054            let proxy_package = self.pubgrub.package_store.alloc(package.clone());
3055            let base_package_id = self.pubgrub.package_store.alloc(base_package.clone());
3056            self.pubgrub
3057                .add_proxy_package(proxy_package, base_package_id, version.clone());
3058        }
3059
3060        let conflict = self.pubgrub.add_package_version_dependencies(
3061            self.next,
3062            for_version.clone(),
3063            dependencies.into_iter().map(|dependency| {
3064                let PubGrubDependency {
3065                    package,
3066                    version,
3067                    parent: _,
3068                    url: _,
3069                } = dependency;
3070                (package, version)
3071            }),
3072        );
3073
3074        // Conflict tracking: If the version was rejected due to its dependencies, record culprit
3075        // and affected.
3076        if let Some(incompatibility) = conflict {
3077            self.record_conflict(for_package, Some(for_version), incompatibility);
3078        }
3079    }
3080
3081    fn record_conflict(
3082        &mut self,
3083        affected: Id<PubGrubPackage>,
3084        version: Option<&Version>,
3085        incompatibility: IncompId<PubGrubPackage, Ranges<Version>, UnavailableReason>,
3086    ) {
3087        let mut culprit_is_real = false;
3088        for (incompatible, _term) in self.pubgrub.incompatibility_store[incompatibility].iter() {
3089            if incompatible == affected {
3090                continue;
3091            }
3092            if self.pubgrub.package_store[affected].name()
3093                == self.pubgrub.package_store[incompatible].name()
3094            {
3095                // Don't track conflicts between a marker package and the main package, when the
3096                // marker is "copying" the obligations from the main package through conflicts.
3097                continue;
3098            }
3099            culprit_is_real = true;
3100            let culprit_count = self
3101                .conflict_tracker
3102                .culprit
3103                .entry(incompatible)
3104                .or_default();
3105            *culprit_count += 1;
3106            if *culprit_count == CONFLICT_THRESHOLD {
3107                self.conflict_tracker.deprioritize.push(incompatible);
3108            }
3109        }
3110        // Don't track conflicts between a marker package and the main package, when the
3111        // marker is "copying" the obligations from the main package through conflicts.
3112        if culprit_is_real {
3113            if tracing::enabled!(Level::DEBUG) {
3114                let incompatibility = self.pubgrub.incompatibility_store[incompatibility]
3115                    .iter()
3116                    .map(|(package, _term)| &self.pubgrub.package_store[package])
3117                    .join(", ");
3118                if let Some(version) = version {
3119                    debug!(
3120                        "Recording dependency conflict of {}=={} from incompatibility of ({})",
3121                        self.pubgrub.package_store[affected], version, incompatibility
3122                    );
3123                } else {
3124                    debug!(
3125                        "Recording unit propagation conflict of {} from incompatibility of ({})",
3126                        self.pubgrub.package_store[affected], incompatibility
3127                    );
3128                }
3129            }
3130
3131            let affected_count = self.conflict_tracker.affected.entry(self.next).or_default();
3132            *affected_count += 1;
3133            if *affected_count == CONFLICT_THRESHOLD {
3134                self.conflict_tracker.prioritize.push(self.next);
3135            }
3136        }
3137    }
3138
3139    fn add_unavailable_version(&mut self, version: Version, reason: UnavailableVersion) {
3140        // Incompatible requires-python versions are special in that we track
3141        // them as incompatible dependencies instead of marking the package version
3142        // as unavailable directly.
3143        if let UnavailableVersion::IncompatibleDist(
3144            IncompatibleDist::Source(IncompatibleSource::RequiresPython(requires_python, kind))
3145            | IncompatibleDist::Wheel(IncompatibleWheel::RequiresPython(requires_python, kind)),
3146        ) = reason
3147        {
3148            let package = &self.next;
3149            let python = self.pubgrub.package_store.alloc(PubGrubPackage::from(
3150                PubGrubPackageInner::Python(match kind {
3151                    PythonRequirementKind::Installed => PubGrubPython::Installed,
3152                    PythonRequirementKind::Target => PubGrubPython::Target,
3153                }),
3154            ));
3155            self.pubgrub
3156                .add_incompatibility(Incompatibility::from_dependency(
3157                    *package,
3158                    Range::singleton(version.clone()),
3159                    (python, release_specifiers_to_ranges(requires_python)),
3160                ));
3161            self.pubgrub
3162                .partial_solution
3163                .add_decision(self.next, version);
3164            return;
3165        }
3166        self.pubgrub
3167            .add_incompatibility(Incompatibility::custom_version(
3168                self.next,
3169                version.clone(),
3170                UnavailableReason::Version(reason),
3171            ));
3172    }
3173
3174    /// Subset the current markers with the new markers and update the python requirements fields
3175    /// accordingly.
3176    ///
3177    /// If the fork should be dropped (e.g., because its markers can never be true for its
3178    /// Python requirement), then this returns `None`.
3179    fn with_env(mut self, env: ResolverEnvironment) -> Self {
3180        self.env = env;
3181        // If the fork contains a narrowed Python requirement, apply it.
3182        if let Some(req) = self.env.narrow_python_requirement(&self.python_requirement) {
3183            debug!("Narrowed `requires-python` bound to: {}", req.target());
3184            self.python_requirement = req;
3185        }
3186        self
3187    }
3188
3189    /// Returns the URL or index for a package and version.
3190    ///
3191    /// In practice, exactly one of the returned values will be `Some`.
3192    fn source(
3193        &self,
3194        name: &PackageName,
3195        version: &Version,
3196    ) -> (Option<&VerbatimParsedUrl>, Option<&IndexUrl>) {
3197        let url = self.fork_urls.get(name);
3198        let index = url
3199            .is_none()
3200            .then(|| {
3201                self.pins
3202                    .get(name, version)
3203                    .expect("Every package should be pinned")
3204                    .index()
3205            })
3206            .flatten();
3207        (url, index)
3208    }
3209
3210    fn into_resolution(self) -> Resolution {
3211        let solution: FxHashMap<_, _> = self.pubgrub.partial_solution.extract_solution().collect();
3212        let edge_count: usize = solution
3213            .keys()
3214            .map(|package| self.pubgrub.incompatibilities[package].len())
3215            .sum();
3216        let mut edges: Vec<ResolutionDependencyEdge> = Vec::with_capacity(edge_count);
3217        for (package, self_version) in &solution {
3218            for id in &self.pubgrub.incompatibilities[package] {
3219                let pubgrub::Kind::FromDependencyOf(
3220                    self_package,
3221                    ref self_range,
3222                    dependency_package,
3223                    ref dependency_range,
3224                ) = self.pubgrub.incompatibility_store[*id].kind
3225                else {
3226                    continue;
3227                };
3228                if *package != self_package {
3229                    continue;
3230                }
3231                if !self_range.contains(self_version) {
3232                    continue;
3233                }
3234                let Some(dependency_version) = solution.get(&dependency_package) else {
3235                    continue;
3236                };
3237                if !dependency_range.contains(dependency_version) {
3238                    continue;
3239                }
3240
3241                let self_package = &self.pubgrub.package_store[self_package];
3242                let dependency_package = &self.pubgrub.package_store[dependency_package];
3243
3244                let (self_name, self_extra, self_group) = match &**self_package {
3245                    PubGrubPackageInner::Package {
3246                        name: self_name,
3247                        extra: self_extra,
3248                        group: self_group,
3249                        marker: _,
3250                    } => (Some(self_name), self_extra.as_ref(), self_group.as_ref()),
3251
3252                    PubGrubPackageInner::Root(_) => (None, None, None),
3253
3254                    _ => continue,
3255                };
3256
3257                let (self_url, self_index) = self_name
3258                    .map(|self_name| self.source(self_name, self_version))
3259                    .unwrap_or((None, None));
3260
3261                match **dependency_package {
3262                    PubGrubPackageInner::Package {
3263                        name: ref dependency_name,
3264                        extra: ref dependency_extra,
3265                        group: ref dependency_dev,
3266                        marker: ref dependency_marker,
3267                    } => {
3268                        debug_assert!(
3269                            dependency_extra.is_none(),
3270                            "Packages should depend on an extra proxy"
3271                        );
3272                        debug_assert!(
3273                            dependency_dev.is_none(),
3274                            "Packages should depend on a group proxy"
3275                        );
3276
3277                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3278                        // but allow groups to depend on other groups, or on the package itself.
3279                        if self_group.is_none() {
3280                            if self_name == Some(dependency_name) {
3281                                continue;
3282                            }
3283                        }
3284
3285                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3286
3287                        let edge = ResolutionDependencyEdge {
3288                            from: self_name.cloned(),
3289                            from_version: self_version.clone(),
3290                            from_url: self_url.cloned(),
3291                            from_index: self_index.cloned(),
3292                            from_extra: self_extra.cloned(),
3293                            from_group: self_group.cloned(),
3294                            to: dependency_name.clone(),
3295                            to_version: dependency_version.clone(),
3296                            to_url: to_url.cloned(),
3297                            to_index: to_index.cloned(),
3298                            to_extra: dependency_extra.clone(),
3299                            to_group: dependency_dev.clone(),
3300                            marker: *dependency_marker,
3301                        };
3302                        edges.push(edge);
3303                    }
3304
3305                    PubGrubPackageInner::Marker {
3306                        name: ref dependency_name,
3307                        marker: ref dependency_marker,
3308                    } => {
3309                        // Ignore self-dependencies (e.g., `tensorflow-macos` depends on `tensorflow-macos`),
3310                        // but allow groups to depend on other groups, or on the package itself.
3311                        if self_group.is_none() {
3312                            if self_name == Some(dependency_name) {
3313                                continue;
3314                            }
3315                        }
3316
3317                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3318
3319                        let edge = ResolutionDependencyEdge {
3320                            from: self_name.cloned(),
3321                            from_version: self_version.clone(),
3322                            from_url: self_url.cloned(),
3323                            from_index: self_index.cloned(),
3324                            from_extra: self_extra.cloned(),
3325                            from_group: self_group.cloned(),
3326                            to: dependency_name.clone(),
3327                            to_version: dependency_version.clone(),
3328                            to_url: to_url.cloned(),
3329                            to_index: to_index.cloned(),
3330                            to_extra: None,
3331                            to_group: None,
3332                            marker: *dependency_marker,
3333                        };
3334                        edges.push(edge);
3335                    }
3336
3337                    PubGrubPackageInner::Extra {
3338                        name: ref dependency_name,
3339                        extra: ref dependency_extra,
3340                        marker: ref dependency_marker,
3341                    } => {
3342                        if self_group.is_none() {
3343                            debug_assert!(
3344                                self_name != Some(dependency_name),
3345                                "Extras should be flattened"
3346                            );
3347                        }
3348                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3349
3350                        // Insert an edge from the dependent package to the extra package.
3351                        let edge = ResolutionDependencyEdge {
3352                            from: self_name.cloned(),
3353                            from_version: self_version.clone(),
3354                            from_url: self_url.cloned(),
3355                            from_index: self_index.cloned(),
3356                            from_extra: self_extra.cloned(),
3357                            from_group: self_group.cloned(),
3358                            to: dependency_name.clone(),
3359                            to_version: dependency_version.clone(),
3360                            to_url: to_url.cloned(),
3361                            to_index: to_index.cloned(),
3362                            to_extra: Some(dependency_extra.clone()),
3363                            to_group: None,
3364                            marker: *dependency_marker,
3365                        };
3366                        edges.push(edge);
3367
3368                        // Insert an edge from the dependent package to the base package.
3369                        let edge = ResolutionDependencyEdge {
3370                            from: self_name.cloned(),
3371                            from_version: self_version.clone(),
3372                            from_url: self_url.cloned(),
3373                            from_index: self_index.cloned(),
3374                            from_extra: self_extra.cloned(),
3375                            from_group: self_group.cloned(),
3376                            to: dependency_name.clone(),
3377                            to_version: dependency_version.clone(),
3378                            to_url: to_url.cloned(),
3379                            to_index: to_index.cloned(),
3380                            to_extra: None,
3381                            to_group: None,
3382                            marker: *dependency_marker,
3383                        };
3384                        edges.push(edge);
3385                    }
3386
3387                    PubGrubPackageInner::Group {
3388                        name: ref dependency_name,
3389                        group: ref dependency_group,
3390                        marker: ref dependency_marker,
3391                    } => {
3392                        debug_assert!(
3393                            self_name != Some(dependency_name),
3394                            "Groups should be flattened"
3395                        );
3396
3397                        let (to_url, to_index) = self.source(dependency_name, dependency_version);
3398
3399                        // Add an edge from the dependent package to the dev package, but _not_ the
3400                        // base package.
3401                        let edge = ResolutionDependencyEdge {
3402                            from: self_name.cloned(),
3403                            from_version: self_version.clone(),
3404                            from_url: self_url.cloned(),
3405                            from_index: self_index.cloned(),
3406                            from_extra: self_extra.cloned(),
3407                            from_group: self_group.cloned(),
3408                            to: dependency_name.clone(),
3409                            to_version: dependency_version.clone(),
3410                            to_url: to_url.cloned(),
3411                            to_index: to_index.cloned(),
3412                            to_extra: None,
3413                            to_group: Some(dependency_group.clone()),
3414                            marker: *dependency_marker,
3415                        };
3416                        edges.push(edge);
3417                    }
3418
3419                    _ => {}
3420                }
3421            }
3422        }
3423
3424        let nodes = solution
3425            .into_iter()
3426            .filter_map(|(package, version)| {
3427                if let PubGrubPackageInner::Package {
3428                    name,
3429                    extra,
3430                    group,
3431                    marker: MarkerTree::TRUE,
3432                } = &*self.pubgrub.package_store[package]
3433                {
3434                    let (url, index) = self.source(name, &version);
3435                    Some((
3436                        ResolutionPackage {
3437                            name: name.clone(),
3438                            extra: extra.clone(),
3439                            dev: group.clone(),
3440                            url: url.cloned(),
3441                            index: index.cloned(),
3442                        },
3443                        version,
3444                    ))
3445                } else {
3446                    None
3447                }
3448            })
3449            .collect();
3450
3451        Resolution {
3452            nodes,
3453            edges,
3454            pins: self.pins,
3455            env: self.env,
3456        }
3457    }
3458}
3459
3460/// The resolution from a single fork including the virtual packages and the edges between them.
3461#[derive(Debug)]
3462pub(crate) struct Resolution {
3463    pub(crate) nodes: FxHashMap<ResolutionPackage, Version>,
3464    /// The directed connections between the nodes, where the marker is the node weight. We don't
3465    /// store the requirement itself, but it can be retrieved from the package metadata.
3466    pub(crate) edges: Vec<ResolutionDependencyEdge>,
3467    /// Map each package name, version tuple from `packages` to a distribution.
3468    pub(crate) pins: FilePins,
3469    /// The environment setting this resolution was found under.
3470    pub(crate) env: ResolverEnvironment,
3471}
3472
3473/// Package representation we used during resolution where each extra and also the dev-dependencies
3474/// group are their own package.
3475#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3476pub(crate) struct ResolutionPackage {
3477    pub(crate) name: PackageName,
3478    pub(crate) extra: Option<ExtraName>,
3479    pub(crate) dev: Option<GroupName>,
3480    /// For registry packages, this is `None`; otherwise, the direct URL of the distribution.
3481    pub(crate) url: Option<VerbatimParsedUrl>,
3482    /// For URL packages, this is `None`; otherwise, the index URL of the distribution.
3483    pub(crate) index: Option<IndexUrl>,
3484}
3485
3486/// The `from_` fields and the `to_` fields allow mapping to the originating and target
3487///  [`ResolutionPackage`] respectively. The `marker` is the edge weight.
3488#[derive(Clone, Debug, Eq, Hash, PartialEq)]
3489pub(crate) struct ResolutionDependencyEdge {
3490    /// This value is `None` if the dependency comes from the root package.
3491    pub(crate) from: Option<PackageName>,
3492    pub(crate) from_version: Version,
3493    pub(crate) from_url: Option<VerbatimParsedUrl>,
3494    pub(crate) from_index: Option<IndexUrl>,
3495    pub(crate) from_extra: Option<ExtraName>,
3496    pub(crate) from_group: Option<GroupName>,
3497    pub(crate) to: PackageName,
3498    pub(crate) to_version: Version,
3499    pub(crate) to_url: Option<VerbatimParsedUrl>,
3500    pub(crate) to_index: Option<IndexUrl>,
3501    pub(crate) to_extra: Option<ExtraName>,
3502    pub(crate) to_group: Option<GroupName>,
3503    pub(crate) marker: MarkerTree,
3504}
3505
3506impl ResolutionDependencyEdge {
3507    pub(crate) fn universal_marker(&self) -> UniversalMarker {
3508        // We specifically do not account for conflict
3509        // markers here. Instead, those are computed via
3510        // a traversal on the resolution graph.
3511        UniversalMarker::new(self.marker, ConflictMarker::TRUE)
3512    }
3513}
3514
3515/// Fetch the metadata for an item
3516#[derive(Debug)]
3517#[allow(clippy::large_enum_variant)]
3518pub(crate) enum Request {
3519    /// A request to fetch the metadata for a package.
3520    Package(PackageName, Option<IndexMetadata>),
3521    /// A request to fetch the metadata for a built or source distribution.
3522    Dist(Dist),
3523    /// A request to fetch the metadata from an already-installed distribution.
3524    Installed(InstalledDist),
3525    /// A request to pre-fetch the metadata for a package and the best-guess distribution.
3526    Prefetch(PackageName, Range<Version>, PythonRequirement),
3527}
3528
3529impl<'a> From<ResolvedDistRef<'a>> for Request {
3530    fn from(dist: ResolvedDistRef<'a>) -> Self {
3531        // N.B. This is almost identical to `ResolvedDistRef::to_owned`, but
3532        // creates a `Request` instead of a `ResolvedDist`. There's probably
3533        // some room for DRYing this up a bit. The obvious way would be to
3534        // add a method to create a `Dist`, but a `Dist` cannot be represented
3535        // as an installed dist.
3536        match dist {
3537            ResolvedDistRef::InstallableRegistrySourceDist { sdist, prioritized } => {
3538                // This is okay because we're only here if the prioritized dist
3539                // has an sdist, so this always succeeds.
3540                let source = prioritized.source_dist().expect("a source distribution");
3541                assert_eq!(
3542                    (&sdist.name, &sdist.version),
3543                    (&source.name, &source.version),
3544                    "expected chosen sdist to match prioritized sdist"
3545                );
3546                Self::Dist(Dist::Source(SourceDist::Registry(source)))
3547            }
3548            ResolvedDistRef::InstallableRegistryBuiltDist {
3549                wheel, prioritized, ..
3550            } => {
3551                assert_eq!(
3552                    Some(&wheel.filename),
3553                    prioritized.best_wheel().map(|(wheel, _)| &wheel.filename),
3554                    "expected chosen wheel to match best wheel"
3555                );
3556                // This is okay because we're only here if the prioritized dist
3557                // has at least one wheel, so this always succeeds.
3558                let built = prioritized.built_dist().expect("at least one wheel");
3559                Self::Dist(Dist::Built(BuiltDist::Registry(built)))
3560            }
3561            ResolvedDistRef::Installed { dist } => Self::Installed(dist.clone()),
3562        }
3563    }
3564}
3565
3566impl Display for Request {
3567    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
3568        match self {
3569            Self::Package(package_name, _) => {
3570                write!(f, "Versions {package_name}")
3571            }
3572            Self::Dist(dist) => {
3573                write!(f, "Metadata {dist}")
3574            }
3575            Self::Installed(dist) => {
3576                write!(f, "Installed metadata {dist}")
3577            }
3578            Self::Prefetch(package_name, range, _) => {
3579                write!(f, "Prefetch {package_name} {range}")
3580            }
3581        }
3582    }
3583}
3584
3585#[derive(Debug)]
3586#[allow(clippy::large_enum_variant)]
3587enum Response {
3588    /// The returned metadata for a package hosted on a registry.
3589    Package(PackageName, Option<IndexUrl>, VersionsResponse),
3590    /// The returned metadata for a distribution.
3591    Dist {
3592        dist: Dist,
3593        metadata: MetadataResponse,
3594    },
3595    /// The returned metadata for an already-installed distribution.
3596    Installed {
3597        dist: InstalledDist,
3598        metadata: MetadataResponse,
3599    },
3600}
3601
3602/// Information about the dependencies for a particular package.
3603///
3604/// This effectively distills the dependency metadata of a package down into
3605/// its pubgrub specific constituent parts: each dependency package has a range
3606/// of possible versions.
3607enum Dependencies {
3608    /// Package dependencies are not available.
3609    Unavailable(UnavailableVersion),
3610    /// Container for all available package versions.
3611    ///
3612    /// Note that in universal mode, it is possible and allowed for multiple
3613    /// `PubGrubPackage` values in this list to have the same package name.
3614    /// These conflicts are resolved via `Dependencies::fork`.
3615    Available(Vec<PubGrubDependency>),
3616    /// Dependencies that should never result in a fork.
3617    ///
3618    /// For example, the dependencies of a `Marker` package will have the
3619    /// same name and version, but differ according to marker expressions.
3620    /// But we never want this to result in a fork.
3621    Unforkable(Vec<PubGrubDependency>),
3622}
3623
3624impl Dependencies {
3625    /// Turn this flat list of dependencies into a potential set of forked
3626    /// groups of dependencies.
3627    ///
3628    /// A fork *only* occurs when there are multiple dependencies with the same
3629    /// name *and* those dependency specifications have corresponding marker
3630    /// expressions that are completely disjoint with one another.
3631    fn fork(
3632        self,
3633        env: &ResolverEnvironment,
3634        python_requirement: &PythonRequirement,
3635        conflicts: &Conflicts,
3636    ) -> ForkedDependencies {
3637        let deps = match self {
3638            Self::Available(deps) => deps,
3639            Self::Unforkable(deps) => return ForkedDependencies::Unforked(deps),
3640            Self::Unavailable(err) => return ForkedDependencies::Unavailable(err),
3641        };
3642        let mut name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>> = BTreeMap::new();
3643        for dep in deps {
3644            let name = dep
3645                .package
3646                .name()
3647                .expect("dependency always has a name")
3648                .clone();
3649            name_to_deps.entry(name).or_default().push(dep);
3650        }
3651        let Forks {
3652            mut forks,
3653            diverging_packages,
3654        } = Forks::new(name_to_deps, env, python_requirement, conflicts);
3655        if forks.is_empty() {
3656            ForkedDependencies::Unforked(vec![])
3657        } else if forks.len() == 1 {
3658            ForkedDependencies::Unforked(forks.pop().unwrap().dependencies)
3659        } else {
3660            ForkedDependencies::Forked {
3661                forks,
3662                diverging_packages: diverging_packages.into_iter().collect(),
3663            }
3664        }
3665    }
3666}
3667
3668/// Information about the (possibly forked) dependencies for a particular
3669/// package.
3670///
3671/// This is like `Dependencies` but with an extra variant that only occurs when
3672/// a `Dependencies` list has multiple dependency specifications with the same
3673/// name and non-overlapping marker expressions (i.e., a fork occurs).
3674#[derive(Debug)]
3675enum ForkedDependencies {
3676    /// Package dependencies are not available.
3677    Unavailable(UnavailableVersion),
3678    /// No forking occurred.
3679    ///
3680    /// This is the same as `Dependencies::Available`.
3681    Unforked(Vec<PubGrubDependency>),
3682    /// Forked containers for all available package versions.
3683    ///
3684    /// Note that there is always at least two forks. If there would
3685    /// be fewer than 2 forks, then there is no fork at all and the
3686    /// `Unforked` variant is used instead.
3687    Forked {
3688        forks: Vec<Fork>,
3689        /// The package(s) with different requirements for disjoint markers.
3690        diverging_packages: Vec<PackageName>,
3691    },
3692}
3693
3694/// A list of forks determined from the dependencies of a single package.
3695///
3696/// Any time a marker expression is seen that is not true for all possible
3697/// marker environments, it is possible for it to introduce a new fork.
3698#[derive(Debug, Default)]
3699struct Forks {
3700    /// The forks discovered among the dependencies.
3701    forks: Vec<Fork>,
3702    /// The package(s) that provoked at least one additional fork.
3703    diverging_packages: BTreeSet<PackageName>,
3704}
3705
3706impl Forks {
3707    fn new(
3708        name_to_deps: BTreeMap<PackageName, Vec<PubGrubDependency>>,
3709        env: &ResolverEnvironment,
3710        python_requirement: &PythonRequirement,
3711        conflicts: &Conflicts,
3712    ) -> Self {
3713        let python_marker = python_requirement.to_marker_tree();
3714
3715        let mut forks = vec![Fork::new(env.clone())];
3716        let mut diverging_packages = BTreeSet::new();
3717        for (name, mut deps) in name_to_deps {
3718            assert!(!deps.is_empty(), "every name has at least one dependency");
3719            // We never fork if there's only one dependency
3720            // specification for a given package name. This particular
3721            // strategy results in a "conservative" approach to forking
3722            // that gives up correctness in some cases in exchange for
3723            // more limited forking. More limited forking results in
3724            // simpler-and-easier-to-understand lock files and faster
3725            // resolving. The correctness we give up manifests when
3726            // two transitive non-sibling dependencies conflict. In
3727            // that case, we don't detect the fork ahead of time (at
3728            // present).
3729            if let [dep] = deps.as_slice() {
3730                // There's one exception: if the requirement increases the minimum-supported Python
3731                // version, we also fork in order to respect that minimum in the subsequent
3732                // resolution.
3733                //
3734                // For example, given `requires-python = ">=3.7"` and `uv ; python_version >= "3.8"`,
3735                // where uv itself only supports Python 3.8 and later, we need to fork to ensure
3736                // that the resolution can find a solution.
3737                if marker::requires_python(dep.package.marker())
3738                    .is_none_or(|bound| !python_requirement.raises(&bound))
3739                {
3740                    let dep = deps.pop().unwrap();
3741                    let marker = dep.package.marker();
3742                    for fork in &mut forks {
3743                        if fork.env.included_by_marker(marker) {
3744                            fork.add_dependency(dep.clone());
3745                        }
3746                    }
3747                    continue;
3748                }
3749            } else {
3750                // If all dependencies have the same markers, we should also avoid forking.
3751                if let Some(dep) = deps.first() {
3752                    let marker = dep.package.marker();
3753                    if deps.iter().all(|dep| marker == dep.package.marker()) {
3754                        // Unless that "same marker" is a Python requirement that is stricter than
3755                        // the current Python requirement. In that case, we need to fork to respect
3756                        // the stricter requirement.
3757                        if marker::requires_python(marker)
3758                            .is_none_or(|bound| !python_requirement.raises(&bound))
3759                        {
3760                            for dep in deps {
3761                                for fork in &mut forks {
3762                                    if fork.env.included_by_marker(marker) {
3763                                        fork.add_dependency(dep.clone());
3764                                    }
3765                                }
3766                            }
3767                            continue;
3768                        }
3769                    }
3770                }
3771            }
3772            for dep in deps {
3773                let mut forker = match ForkingPossibility::new(env, &dep) {
3774                    ForkingPossibility::Possible(forker) => forker,
3775                    ForkingPossibility::DependencyAlwaysExcluded => {
3776                        // If the markers can never be satisfied by the parent
3777                        // fork, then we can drop this dependency unceremoniously.
3778                        continue;
3779                    }
3780                    ForkingPossibility::NoForkingPossible => {
3781                        // Or, if the markers are always true, then we just
3782                        // add the dependency to every fork unconditionally.
3783                        for fork in &mut forks {
3784                            fork.add_dependency(dep.clone());
3785                        }
3786                        continue;
3787                    }
3788                };
3789                // Otherwise, we *should* need to add a new fork...
3790                diverging_packages.insert(name.clone());
3791
3792                let mut new = vec![];
3793                for fork in std::mem::take(&mut forks) {
3794                    let Some((remaining_forker, envs)) = forker.fork(&fork.env) else {
3795                        new.push(fork);
3796                        continue;
3797                    };
3798                    forker = remaining_forker;
3799
3800                    for fork_env in envs {
3801                        let mut new_fork = fork.clone();
3802                        new_fork.set_env(fork_env);
3803                        // We only add the dependency to this fork if it
3804                        // satisfies the fork's markers. Some forks are
3805                        // specifically created to exclude this dependency,
3806                        // so this isn't always true!
3807                        if forker.included(&new_fork.env) {
3808                            new_fork.add_dependency(dep.clone());
3809                        }
3810                        // Filter out any forks we created that are disjoint with our
3811                        // Python requirement.
3812                        if new_fork.env.included_by_marker(python_marker) {
3813                            new.push(new_fork);
3814                        }
3815                    }
3816                }
3817                forks = new;
3818            }
3819        }
3820        // When there is a conflicting group configuration, we need
3821        // to potentially add more forks. Each fork added contains an
3822        // exclusion list of conflicting groups where dependencies with
3823        // the corresponding package and extra name are forcefully
3824        // excluded from that group.
3825        //
3826        // We specifically iterate on conflicting groups and
3827        // potentially re-generate all forks for each one. We do it
3828        // this way in case there are multiple sets of conflicting
3829        // groups that impact the forks here.
3830        //
3831        // For example, if we have conflicting groups {x1, x2} and {x3,
3832        // x4}, we need to make sure the forks generated from one set
3833        // also account for the other set.
3834        for set in conflicts.iter() {
3835            let mut new = vec![];
3836            for fork in std::mem::take(&mut forks) {
3837                let mut has_conflicting_dependency = false;
3838                for item in set.iter() {
3839                    if fork.contains_conflicting_item(item.as_ref()) {
3840                        has_conflicting_dependency = true;
3841                        diverging_packages.insert(item.package().clone());
3842                        break;
3843                    }
3844                }
3845                if !has_conflicting_dependency {
3846                    new.push(fork);
3847                    continue;
3848                }
3849
3850                // Create a fork that excludes ALL conflicts.
3851                if let Some(fork_none) = fork.clone().filter(set.iter().cloned().map(Err)) {
3852                    new.push(fork_none);
3853                }
3854
3855                // Now create a fork for each conflicting group, where
3856                // that fork excludes every *other* conflicting group.
3857                //
3858                // So if we have conflicting extras foo, bar and baz,
3859                // then this creates three forks: one that excludes
3860                // {foo, bar}, one that excludes {foo, baz} and one
3861                // that excludes {bar, baz}.
3862                for (i, _) in set.iter().enumerate() {
3863                    let fork_allows_group = fork.clone().filter(
3864                        set.iter()
3865                            .cloned()
3866                            .enumerate()
3867                            .map(|(j, group)| if i == j { Ok(group) } else { Err(group) }),
3868                    );
3869                    if let Some(fork_allows_group) = fork_allows_group {
3870                        new.push(fork_allows_group);
3871                    }
3872                }
3873            }
3874            forks = new;
3875        }
3876        Self {
3877            forks,
3878            diverging_packages,
3879        }
3880    }
3881}
3882
3883/// A single fork in a list of dependencies.
3884///
3885/// A fork corresponds to the full list of dependencies for a package,
3886/// but with any conflicting dependency specifications omitted. For
3887/// example, if we have `a<2 ; sys_platform == 'foo'` and `a>=2 ;
3888/// sys_platform == 'bar'`, then because the dependency specifications
3889/// have the same name and because the marker expressions are disjoint,
3890/// a fork occurs. One fork will contain `a<2` but not `a>=2`, while
3891/// the other fork will contain `a>=2` but not `a<2`.
3892#[derive(Clone, Debug)]
3893struct Fork {
3894    /// The list of dependencies for this fork, guaranteed to be conflict
3895    /// free. (i.e., There are no two packages with the same name with
3896    /// non-overlapping marker expressions.)
3897    ///
3898    /// Note that callers shouldn't mutate this sequence directly. Instead,
3899    /// they should use `add_forked_package` or `add_nonfork_package`. Namely,
3900    /// it should be impossible for a package with a marker expression that is
3901    /// disjoint from the marker expression on this fork to be added.
3902    dependencies: Vec<PubGrubDependency>,
3903    /// The conflicting groups in this fork.
3904    ///
3905    /// This exists to make some access patterns more efficient. Namely,
3906    /// it makes it easy to check whether there's a dependency with a
3907    /// particular conflicting group in this fork.
3908    conflicts: crate::FxHashbrownSet<ConflictItem>,
3909    /// The resolver environment for this fork.
3910    ///
3911    /// Principally, this corresponds to the markers in this for. So in the
3912    /// example above, the `a<2` fork would have `sys_platform == 'foo'`, while
3913    /// the `a>=2` fork would have `sys_platform == 'bar'`.
3914    ///
3915    /// If this fork was generated from another fork, then this *includes*
3916    /// the criteria from its parent. i.e., Its marker expression represents
3917    /// the intersection of the marker expression from its parent and any
3918    /// additional marker expression generated by addition forking based on
3919    /// conflicting dependency specifications.
3920    env: ResolverEnvironment,
3921}
3922
3923impl Fork {
3924    /// Create a new fork with no dependencies with the given resolver
3925    /// environment.
3926    fn new(env: ResolverEnvironment) -> Self {
3927        Self {
3928            dependencies: vec![],
3929            conflicts: crate::FxHashbrownSet::default(),
3930            env,
3931        }
3932    }
3933
3934    /// Add a dependency to this fork.
3935    fn add_dependency(&mut self, dep: PubGrubDependency) {
3936        if let Some(conflicting_item) = dep.conflicting_item() {
3937            self.conflicts.insert(conflicting_item.to_owned());
3938        }
3939        self.dependencies.push(dep);
3940    }
3941
3942    /// Sets the resolver environment to the one given.
3943    ///
3944    /// Any dependency in this fork that does not satisfy the given environment
3945    /// is removed.
3946    fn set_env(&mut self, env: ResolverEnvironment) {
3947        self.env = env;
3948        self.dependencies.retain(|dep| {
3949            let marker = dep.package.marker();
3950            if self.env.included_by_marker(marker) {
3951                return true;
3952            }
3953            if let Some(conflicting_item) = dep.conflicting_item() {
3954                self.conflicts.remove(&conflicting_item);
3955            }
3956            false
3957        });
3958    }
3959
3960    /// Returns true if any of the dependencies in this fork contain a
3961    /// dependency with the given package and extra values.
3962    fn contains_conflicting_item(&self, item: ConflictItemRef<'_>) -> bool {
3963        self.conflicts.contains(&item)
3964    }
3965
3966    /// Include or Exclude the given groups from this fork.
3967    ///
3968    /// This removes all dependencies matching the given conflicting groups.
3969    ///
3970    /// If the exclusion rules would result in a fork with an unsatisfiable
3971    /// resolver environment, then this returns `None`.
3972    fn filter(
3973        mut self,
3974        rules: impl IntoIterator<Item = Result<ConflictItem, ConflictItem>>,
3975    ) -> Option<Self> {
3976        self.env = self.env.filter_by_group(rules)?;
3977        self.dependencies.retain(|dep| {
3978            let Some(conflicting_item) = dep.conflicting_item() else {
3979                return true;
3980            };
3981            if self.env.included_by_group(conflicting_item) {
3982                return true;
3983            }
3984            match conflicting_item.kind() {
3985                // We should not filter entire projects unless they're a top-level dependency
3986                // Otherwise, we'll fail to solve for children of the project, like extras
3987                ConflictKindRef::Project => {
3988                    if dep.parent.is_some() {
3989                        return true;
3990                    }
3991                }
3992                ConflictKindRef::Group(_) => {}
3993                ConflictKindRef::Extra(_) => {}
3994            }
3995            self.conflicts.remove(&conflicting_item);
3996            false
3997        });
3998        Some(self)
3999    }
4000
4001    /// Compare forks, preferring forks with g `requires-python` requirements.
4002    fn cmp_requires_python(&self, other: &Self) -> Ordering {
4003        // A higher `requires-python` requirement indicates a _higher-priority_ fork.
4004        //
4005        // This ordering ensures that we prefer choosing the highest version for each fork based on
4006        // its `requires-python` requirement.
4007        //
4008        // The reverse would prefer choosing fewer versions, at the cost of using older package
4009        // versions on newer Python versions. For example, if reversed, we'd prefer to solve `<3.7
4010        // before solving `>=3.7`, since the resolution produced by the former might work for the
4011        // latter, but the inverse is unlikely to be true.
4012        let self_bound = self.env.requires_python().unwrap_or_default();
4013        let other_bound = other.env.requires_python().unwrap_or_default();
4014        self_bound.lower().cmp(other_bound.lower())
4015    }
4016
4017    /// Compare forks, preferring forks with upper bounds.
4018    fn cmp_upper_bounds(&self, other: &Self) -> Ordering {
4019        // We'd prefer to solve `numpy <= 2` before solving `numpy >= 1`, since the resolution
4020        // produced by the former might work for the latter, but the inverse is unlikely to be true
4021        // due to maximum version selection. (Selecting `numpy==2.0.0` would satisfy both forks, but
4022        // selecting the latest `numpy` would not.)
4023        let self_upper_bounds = self
4024            .dependencies
4025            .iter()
4026            .filter(|dep| {
4027                dep.version
4028                    .bounding_range()
4029                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4030            })
4031            .count();
4032        let other_upper_bounds = other
4033            .dependencies
4034            .iter()
4035            .filter(|dep| {
4036                dep.version
4037                    .bounding_range()
4038                    .is_some_and(|(_, upper)| !matches!(upper, Bound::Unbounded))
4039            })
4040            .count();
4041
4042        self_upper_bounds.cmp(&other_upper_bounds)
4043    }
4044}
4045
4046impl Eq for Fork {}
4047
4048impl PartialEq for Fork {
4049    fn eq(&self, other: &Self) -> bool {
4050        self.dependencies == other.dependencies && self.env == other.env
4051    }
4052}
4053
4054#[derive(Debug, Clone)]
4055pub(crate) struct VersionFork {
4056    /// The environment to use in the fork.
4057    env: ResolverEnvironment,
4058    /// The initial package to select in the fork.
4059    id: Id<PubGrubPackage>,
4060    /// The initial version to set for the selected package in the fork.
4061    version: Option<Version>,
4062}
4063
4064/// Enrich a [`ResolveError`] with additional information about why a given package was included.
4065fn enrich_dependency_error(
4066    error: ResolveError,
4067    id: Id<PubGrubPackage>,
4068    version: &Version,
4069    pubgrub: &State<UvDependencyProvider>,
4070) -> ResolveError {
4071    let Some(name) = pubgrub.package_store[id].name_no_root() else {
4072        return error;
4073    };
4074    let chain = DerivationChainBuilder::from_state(id, version, pubgrub).unwrap_or_default();
4075    ResolveError::Dependencies(Box::new(error), name.clone(), version.clone(), chain)
4076}
4077
4078/// Compute the set of markers for which a package is known to be relevant.
4079fn find_environments(id: Id<PubGrubPackage>, state: &State<UvDependencyProvider>) -> MarkerTree {
4080    let package = &state.package_store[id];
4081    if package.is_root() {
4082        return MarkerTree::TRUE;
4083    }
4084
4085    // Retrieve the incompatibilities for the current package.
4086    let Some(incompatibilities) = state.incompatibilities.get(&id) else {
4087        return MarkerTree::FALSE;
4088    };
4089
4090    // Find all dependencies on the current package.
4091    let mut marker = MarkerTree::FALSE;
4092    for index in incompatibilities {
4093        let incompat = &state.incompatibility_store[*index];
4094        if let Kind::FromDependencyOf(id1, _, id2, _) = &incompat.kind {
4095            if id == *id2 {
4096                marker.or({
4097                    let mut marker = package.marker();
4098                    marker.and(find_environments(*id1, state));
4099                    marker
4100                });
4101            }
4102        }
4103    }
4104    marker
4105}
4106
4107#[derive(Debug, Default, Clone)]
4108struct ConflictTracker {
4109    /// How often a decision on the package was discarded due to another package decided earlier.
4110    affected: FxHashMap<Id<PubGrubPackage>, usize>,
4111    /// Package(s) to be prioritized after the next unit propagation
4112    ///
4113    /// Distilled from `affected` for fast checking in the hot loop.
4114    prioritize: Vec<Id<PubGrubPackage>>,
4115    /// How often a package was decided earlier and caused another package to be discarded.
4116    culprit: FxHashMap<Id<PubGrubPackage>, usize>,
4117    /// Package(s) to be de-prioritized after the next unit propagation
4118    ///
4119    /// Distilled from `culprit` for fast checking in the hot loop.
4120    deprioritize: Vec<Id<PubGrubPackage>>,
4121}