pubgrub/internal/core.rs
1// SPDX-License-Identifier: MPL-2.0
2
3//! Core model and functions
4//! to write a functional PubGrub algorithm.
5
6use std::collections::HashSet as Set;
7use std::sync::Arc;
8
9use crate::internal::{
10 Arena, DecisionLevel, HashArena, Id, IncompDpId, IncompId, Incompatibility, PartialSolution,
11 Relation, SatisfierSearch, SmallVec,
12};
13use crate::{DependencyProvider, DerivationTree, Map, NoSolutionError, VersionSet};
14
15/// Current state of the PubGrub algorithm.
16#[derive(Clone)]
17pub struct State<DP: DependencyProvider> {
18 /// The root package and version.
19 pub root_package: Id<DP::P>,
20 root_version: DP::V,
21
22 /// All incompatibilities indexed by package.
23 #[allow(clippy::type_complexity)]
24 pub incompatibilities: Map<Id<DP::P>, Vec<IncompDpId<DP>>>,
25
26 /// As an optimization, store the ids of incompatibilities that are already contradicted.
27 ///
28 /// For each one keep track of the decision level when it was found to be contradicted.
29 /// These will stay contradicted until we have backtracked beyond its associated decision level.
30 contradicted_incompatibilities: Map<IncompDpId<DP>, DecisionLevel>,
31
32 /// All incompatibilities expressing dependencies,
33 /// with common dependents merged.
34 #[allow(clippy::type_complexity)]
35 merged_dependencies: Map<(Id<DP::P>, Id<DP::P>), SmallVec<IncompDpId<DP>>>,
36
37 /// Partial solution.
38 pub partial_solution: PartialSolution<DP>,
39
40 /// The store is the reference storage for all incompatibilities.
41 pub incompatibility_store: Arena<Incompatibility<DP::P, DP::VS, DP::M>>,
42
43 /// The store is the reference storage for all packages.
44 pub package_store: HashArena<DP::P>,
45
46 /// This is a stack of work to be done in `unit_propagation`.
47 /// It can definitely be a local variable to that method, but
48 /// this way we can reuse the same allocation for better performance.
49 unit_propagation_buffer: SmallVec<Id<DP::P>>,
50}
51
52impl<DP: DependencyProvider> State<DP> {
53 /// Initialization of PubGrub state.
54 pub fn init(root_package: DP::P, root_version: DP::V) -> Self {
55 let mut incompatibility_store = Arena::new();
56 let mut package_store = HashArena::new();
57 let root_package = package_store.alloc(root_package);
58 let not_root_id = incompatibility_store.alloc(Incompatibility::not_root(
59 root_package,
60 root_version.clone(),
61 ));
62 let mut incompatibilities = Map::default();
63 incompatibilities.insert(root_package, vec![not_root_id]);
64 Self {
65 root_package,
66 root_version,
67 incompatibilities,
68 contradicted_incompatibilities: Map::default(),
69 partial_solution: PartialSolution::empty(),
70 incompatibility_store,
71 package_store,
72 unit_propagation_buffer: SmallVec::Empty,
73 merged_dependencies: Map::default(),
74 }
75 }
76
77 /// Add the dependencies for the current version of the current package as incompatibilities.
78 pub fn add_package_version_dependencies(
79 &mut self,
80 package: Id<DP::P>,
81 version: DP::V,
82 dependencies: impl IntoIterator<Item = (DP::P, DP::VS)>,
83 ) -> Option<IncompId<DP::P, DP::VS, DP::M>> {
84 let dep_incompats =
85 self.add_incompatibility_from_dependencies(package, version.clone(), dependencies);
86 self.partial_solution.add_package_version_incompatibilities(
87 package,
88 version.clone(),
89 dep_incompats,
90 &self.incompatibility_store,
91 )
92 }
93
94 /// Add an incompatibility to the state.
95 pub fn add_incompatibility(&mut self, incompat: Incompatibility<DP::P, DP::VS, DP::M>) {
96 let id = self.incompatibility_store.alloc(incompat);
97 self.merge_incompatibility(id);
98 }
99
100 /// Add a single custom incompatibility that requires the base package and the proxy package
101 /// share the same version range.
102 ///
103 /// This intended for cases where proxy packages (also known as virtual packages) are used.
104 /// Without this information, pubgrub does not know that these packages have to be at the same
105 /// version. In cases where the base package is already to an incompatible version, this avoids
106 /// going through all versions of the proxy package. In cases where there are two incompatible
107 /// proxy packages, it avoids trying versions for both of them. Both improve performance (we
108 /// don't need to check all versions when there is a conflict) and error messages (report a
109 /// conflict of version ranges instead of enumerating the conflicting versions).
110 ///
111 /// Using this method requires that each version of the proxy package depends on the exact
112 /// version of the base package.
113 pub fn add_proxy_package(
114 &mut self,
115 proxy_package: Id<DP::P>,
116 base_package: Id<DP::P>,
117 versions: DP::VS,
118 ) {
119 let incompat = Incompatibility::from_dependency(
120 proxy_package,
121 versions.clone(),
122 (base_package, versions),
123 );
124 let id = self
125 .incompatibility_store
126 .alloc_iter([incompat].into_iter());
127 for id in IncompDpId::<DP>::range_to_iter(id) {
128 self.merge_incompatibility(id);
129 }
130 }
131
132 /// Add an incompatibility to the state.
133 #[cold]
134 pub(crate) fn add_incompatibility_from_dependencies(
135 &mut self,
136 package: Id<DP::P>,
137 version: DP::V,
138 deps: impl IntoIterator<Item = (DP::P, DP::VS)>,
139 ) -> std::ops::Range<IncompDpId<DP>> {
140 // Create incompatibilities and allocate them in the store.
141 let new_incompats_id_range =
142 self.incompatibility_store
143 .alloc_iter(deps.into_iter().map(|(dep_p, dep_vs)| {
144 let dep_pid = self.package_store.alloc(dep_p);
145 Incompatibility::from_dependency(
146 package,
147 <DP::VS as VersionSet>::singleton(version.clone()),
148 (dep_pid, dep_vs),
149 )
150 }));
151 // Merge the newly created incompatibilities with the older ones.
152 for id in IncompDpId::<DP>::range_to_iter(new_incompats_id_range.clone()) {
153 self.merge_incompatibility(id);
154 }
155 new_incompats_id_range
156 }
157
158 /// Unit propagation is the core mechanism of the solving algorithm.
159 /// CF <https://github.com/dart-lang/pub/blob/master/doc/solver.md#unit-propagation>
160 ///
161 /// For each package with a satisfied incompatibility, returns the package and the root cause
162 /// incompatibility.
163 #[cold]
164 #[allow(clippy::type_complexity)] // Type definitions don't support impl trait.
165 pub fn unit_propagation(
166 &mut self,
167 package: Id<DP::P>,
168 ) -> Result<SmallVec<(Id<DP::P>, IncompDpId<DP>)>, NoSolutionError<DP>> {
169 let mut satisfier_causes = SmallVec::default();
170 self.unit_propagation_buffer.clear();
171 self.unit_propagation_buffer.push(package);
172 while let Some(current_package) = self.unit_propagation_buffer.pop() {
173 // Iterate over incompatibilities in reverse order
174 // to evaluate first the newest incompatibilities.
175 let mut conflict_id = None;
176 // We only care about incompatibilities if it contains the current package.
177 for &incompat_id in self.incompatibilities[¤t_package].iter().rev() {
178 if self
179 .contradicted_incompatibilities
180 .contains_key(&incompat_id)
181 {
182 continue;
183 }
184 let current_incompat = &self.incompatibility_store[incompat_id];
185 match self.partial_solution.relation(current_incompat) {
186 // If the partial solution satisfies the incompatibility
187 // we must perform conflict resolution.
188 Relation::Satisfied => {
189 log::info!(
190 "Start conflict resolution because incompat satisfied:\n {}",
191 current_incompat.display(&self.package_store)
192 );
193 conflict_id = Some(incompat_id);
194 break;
195 }
196 Relation::AlmostSatisfied(package_almost) => {
197 // Add `package_almost` to the `unit_propagation_buffer` set.
198 // Putting items in `unit_propagation_buffer` more than once waste cycles,
199 // but so does allocating a hash map and hashing each item.
200 // In practice `unit_propagation_buffer` is small enough that we can just do a linear scan.
201 if !self.unit_propagation_buffer.contains(&package_almost) {
202 self.unit_propagation_buffer.push(package_almost);
203 }
204 // Add (not term) to the partial solution with incompat as cause.
205 self.partial_solution.add_derivation(
206 package_almost,
207 incompat_id,
208 &self.incompatibility_store,
209 );
210 // With the partial solution updated, the incompatibility is now contradicted.
211 self.contradicted_incompatibilities
212 .insert(incompat_id, self.partial_solution.current_decision_level());
213 }
214 Relation::Contradicted(_) => {
215 self.contradicted_incompatibilities
216 .insert(incompat_id, self.partial_solution.current_decision_level());
217 }
218 _ => {}
219 }
220 }
221 if let Some(incompat_id) = conflict_id {
222 let (package_almost, root_cause) = self
223 .conflict_resolution(incompat_id, &mut satisfier_causes)
224 .map_err(|terminal_incompat_id| {
225 self.build_derivation_tree(terminal_incompat_id)
226 })?;
227 self.unit_propagation_buffer.clear();
228 self.unit_propagation_buffer.push(package_almost);
229 // Add to the partial solution with incompat as cause.
230 self.partial_solution.add_derivation(
231 package_almost,
232 root_cause,
233 &self.incompatibility_store,
234 );
235 // After conflict resolution and the partial solution update,
236 // the root cause incompatibility is now contradicted.
237 self.contradicted_incompatibilities
238 .insert(root_cause, self.partial_solution.current_decision_level());
239 }
240 }
241 // If there are no more changed packages, unit propagation is done.
242 Ok(satisfier_causes)
243 }
244
245 /// Return the root cause or the terminal incompatibility. CF
246 /// <https://github.com/dart-lang/pub/blob/master/doc/solver.md#unit-propagation>
247 ///
248 /// When we found a conflict, we want to learn as much as possible from it, to avoid making (or
249 /// keeping) decisions that will be rejected. Say we found that the dependency requirements on X and the
250 /// dependency requirements on Y are incompatible. We may find that the decisions on earlier packages B and C
251 /// require us to make incompatible requirements on X and Y, so we backtrack until either B or C
252 /// can be revisited. To make it practical, we really only need one of the terms to be a
253 /// decision. We may as well leave the other terms general. Something like "the dependency on
254 /// the package X is incompatible with the decision on C" tends to work out pretty well. Then if
255 /// A turns out to also have a dependency on X the resulting root cause is still useful.
256 /// (`unit_propagation` will ensure we don't try that version of C.)
257 /// Of course, this is more heuristics than science. If the output is too general, then
258 /// `unit_propagation` will handle the confusion by calling us again with the next most specific
259 /// conflict it comes across. If the output is too specific, then the outer `solver` loop will
260 /// eventually end up calling us again until all possibilities are enumerated.
261 ///
262 /// To end up with a more useful incompatibility, this function combines incompatibilities into
263 /// derivations. Fulfilling this derivation implies the later conflict. By banning it, we
264 /// prevent the intermediate steps from occurring again, at least in the exact same way.
265 /// However, the statistics collected for `prioritize` may want to analyze those intermediate
266 /// steps. For example we might start with "there is no version 1 of Z", and
267 /// `conflict_resolution` may be able to determine that "that was inevitable when we picked
268 /// version 1 of X" which was inevitable when we picked W and so on, until version 1 of B, which
269 /// was depended on by version 1 of A. Therefore the root cause may simplify all the way down to
270 /// "we cannot pick version 1 of A". This will prevent us going down this path again. However
271 /// when we start looking at version 2 of A, and discover that it depends on version 2 of B, we
272 /// will want to prioritize the chain of intermediate steps to check if it has a problem with
273 /// the same shape. The `satisfier_causes` argument keeps track of these intermediate steps so
274 /// that the caller can use them for prioritization.
275 #[allow(clippy::type_complexity)]
276 #[cold]
277 fn conflict_resolution(
278 &mut self,
279 incompatibility: IncompDpId<DP>,
280 satisfier_causes: &mut SmallVec<(Id<DP::P>, IncompDpId<DP>)>,
281 ) -> Result<(Id<DP::P>, IncompDpId<DP>), IncompDpId<DP>> {
282 let mut current_incompat_id = incompatibility;
283 let mut current_incompat_changed = false;
284 loop {
285 if self.incompatibility_store[current_incompat_id]
286 .is_terminal(self.root_package, &self.root_version)
287 {
288 return Err(current_incompat_id);
289 } else {
290 let (package, satisfier_search_result) = self.partial_solution.satisfier_search(
291 &self.incompatibility_store[current_incompat_id],
292 &self.incompatibility_store,
293 );
294 match satisfier_search_result {
295 SatisfierSearch::DifferentDecisionLevels {
296 previous_satisfier_level,
297 } => {
298 self.backtrack(
299 current_incompat_id,
300 current_incompat_changed,
301 previous_satisfier_level,
302 );
303 log::info!("backtrack to {previous_satisfier_level:?}");
304 satisfier_causes.push((package, current_incompat_id));
305 return Ok((package, current_incompat_id));
306 }
307 SatisfierSearch::SameDecisionLevels { satisfier_cause } => {
308 let prior_cause = Incompatibility::prior_cause(
309 current_incompat_id,
310 satisfier_cause,
311 package,
312 &self.incompatibility_store,
313 );
314 log::info!("prior cause: {}", prior_cause.display(&self.package_store));
315 current_incompat_id = self.incompatibility_store.alloc(prior_cause);
316 satisfier_causes.push((package, current_incompat_id));
317 current_incompat_changed = true;
318 }
319 }
320 }
321 }
322 }
323
324 /// After a conflict occurred, backtrack the partial solution to a given decision level, and add
325 /// the incompatibility if it was new.
326 fn backtrack(
327 &mut self,
328 incompat: IncompDpId<DP>,
329 incompat_changed: bool,
330 decision_level: DecisionLevel,
331 ) {
332 self.partial_solution.backtrack(decision_level);
333 // Remove contradicted incompatibilities that depend on decisions we just backtracked away.
334 self.contradicted_incompatibilities
335 .retain(|_, dl| *dl <= decision_level);
336 if incompat_changed {
337 self.merge_incompatibility(incompat);
338 }
339 }
340
341 /// Manually backtrack before the given package was selected.
342 ///
343 /// This can be used to switch the order of packages if the previous prioritization was bad.
344 ///
345 /// Returns the number of the decisions that were backtracked, or `None` if the package was not
346 /// decided on yet.
347 pub fn backtrack_package(&mut self, package: Id<DP::P>) -> Option<u32> {
348 let base_decision_level = self.partial_solution.current_decision_level();
349 let new_decision_level = self.partial_solution.backtrack_package(package).ok()?;
350 // Remove contradicted incompatibilities that depend on decisions we just backtracked away.
351 self.contradicted_incompatibilities
352 .retain(|_, dl| *dl <= new_decision_level);
353 Some(base_decision_level.0 - new_decision_level.0)
354 }
355
356 /// Add this incompatibility into the set of all incompatibilities.
357 ///
358 /// PubGrub collapses identical dependencies from adjacent package versions
359 /// into individual incompatibilities.
360 /// This substantially reduces the total number of incompatibilities
361 /// and makes it much easier for PubGrub to reason about multiple versions of packages at once.
362 ///
363 /// For example, rather than representing
364 /// foo 1.0.0 depends on bar ^1.0.0 and
365 /// foo 1.1.0 depends on bar ^1.0.0
366 /// as two separate incompatibilities,
367 /// they are collapsed together into the single incompatibility {foo ^1.0.0, not bar ^1.0.0}
368 /// (provided that no other version of foo exists between 1.0.0 and 2.0.0).
369 /// We could collapse them into { foo (1.0.0 ∪ 1.1.0), not bar ^1.0.0 }
370 /// without having to check the existence of other versions though.
371 fn merge_incompatibility(&mut self, mut id: IncompDpId<DP>) {
372 if let Some((p1, p2)) = self.incompatibility_store[id].as_dependency() {
373 // If we are a dependency, there's a good chance we can be merged with a previous dependency
374 let deps_lookup = self.merged_dependencies.entry((p1, p2)).or_default();
375 if let Some((past, merged)) = deps_lookup.as_mut_slice().iter_mut().find_map(|past| {
376 self.incompatibility_store[id]
377 .merge_dependents(&self.incompatibility_store[*past])
378 .map(|m| (past, m))
379 }) {
380 let new = self.incompatibility_store.alloc(merged);
381 for (pkg, _) in self.incompatibility_store[new].iter() {
382 self.incompatibilities
383 .entry(pkg)
384 .or_default()
385 .retain(|id| id != past);
386 }
387 *past = new;
388 id = new;
389 } else {
390 deps_lookup.push(id);
391 }
392 }
393 for (pkg, term) in self.incompatibility_store[id].iter() {
394 if cfg!(debug_assertions) {
395 assert_ne!(term, &crate::term::Term::any());
396 }
397 self.incompatibilities.entry(pkg).or_default().push(id);
398 }
399 }
400
401 // Error reporting #########################################################
402
403 fn build_derivation_tree(
404 &self,
405 incompat: IncompDpId<DP>,
406 ) -> DerivationTree<DP::P, DP::VS, DP::M> {
407 let mut all_ids: Set<IncompDpId<DP>> = Set::default();
408 let mut shared_ids = Set::default();
409 let mut stack = vec![incompat];
410 while let Some(i) = stack.pop() {
411 if let Some((id1, id2)) = self.incompatibility_store[i].causes() {
412 if all_ids.contains(&i) {
413 shared_ids.insert(i);
414 } else {
415 stack.push(id1);
416 stack.push(id2);
417 }
418 }
419 all_ids.insert(i);
420 }
421 // To avoid recursion we need to generate trees in topological order.
422 // That is to say we need to ensure that the causes are processed before the incompatibility they effect.
423 // It happens to be that sorting by their ID maintains this property.
424 let mut sorted_ids = all_ids.into_iter().collect::<Vec<_>>();
425 sorted_ids.sort_unstable_by_key(|id| id.into_raw());
426 let mut precomputed = Map::default();
427 for id in sorted_ids {
428 let tree = Incompatibility::build_derivation_tree(
429 id,
430 &shared_ids,
431 &self.incompatibility_store,
432 &self.package_store,
433 &precomputed,
434 );
435 precomputed.insert(id, Arc::new(tree));
436 }
437 // Now the user can refer to the entire tree from its root.
438 Arc::into_inner(precomputed.remove(&incompat).unwrap()).unwrap()
439 }
440}