pubgrub/internal/core.rs
1// SPDX-License-Identifier: MPL-2.0
2
3//! Core model and functions
4//! to write a functional PubGrub algorithm.
5
6use std::collections::HashSet as Set;
7use std::sync::Arc;
8
9use crate::internal::{
10 Arena, DecisionLevel, HashArena, Id, IncompDpId, IncompId, Incompatibility, PartialSolution,
11 Relation, SatisfierSearch, SmallVec,
12};
13use crate::{DependencyProvider, DerivationTree, Map, NoSolutionError, VersionSet};
14
15/// Current state of the PubGrub algorithm.
16#[derive(Clone)]
17pub(crate) struct State<DP: DependencyProvider> {
18 pub root_package: Id<DP::P>,
19 root_version: DP::V,
20
21 #[allow(clippy::type_complexity)]
22 incompatibilities: Map<Id<DP::P>, Vec<IncompDpId<DP>>>,
23
24 /// As an optimization, store the ids of incompatibilities that are already contradicted.
25 ///
26 /// For each one keep track of the decision level when it was found to be contradicted.
27 /// These will stay contradicted until we have backtracked beyond its associated decision level.
28 contradicted_incompatibilities: Map<IncompDpId<DP>, DecisionLevel>,
29
30 /// All incompatibilities expressing dependencies,
31 /// with common dependents merged.
32 #[allow(clippy::type_complexity)]
33 merged_dependencies: Map<(Id<DP::P>, Id<DP::P>), SmallVec<IncompDpId<DP>>>,
34
35 /// Partial solution.
36 /// TODO: remove pub.
37 pub(crate) partial_solution: PartialSolution<DP>,
38
39 /// The store is the reference storage for all incompatibilities.
40 pub(crate) incompatibility_store: Arena<Incompatibility<DP::P, DP::VS, DP::M>>,
41
42 /// The store is the reference storage for all packages.
43 pub(crate) package_store: HashArena<DP::P>,
44
45 /// This is a stack of work to be done in `unit_propagation`.
46 /// It can definitely be a local variable to that method, but
47 /// this way we can reuse the same allocation for better performance.
48 unit_propagation_buffer: SmallVec<Id<DP::P>>,
49}
50
51impl<DP: DependencyProvider> State<DP> {
52 /// Initialization of PubGrub state.
53 pub(crate) fn init(root_package: DP::P, root_version: DP::V) -> Self {
54 let mut incompatibility_store = Arena::new();
55 let mut package_store = HashArena::new();
56 let root_package = package_store.alloc(root_package);
57 let not_root_id = incompatibility_store.alloc(Incompatibility::not_root(
58 root_package,
59 root_version.clone(),
60 ));
61 let mut incompatibilities = Map::default();
62 incompatibilities.insert(root_package, vec![not_root_id]);
63 Self {
64 root_package,
65 root_version,
66 incompatibilities,
67 contradicted_incompatibilities: Map::default(),
68 partial_solution: PartialSolution::empty(),
69 incompatibility_store,
70 package_store,
71 unit_propagation_buffer: SmallVec::Empty,
72 merged_dependencies: Map::default(),
73 }
74 }
75
76 /// Add the dependencies for the current version of the current package as incompatibilities.
77 pub(crate) fn add_package_version_dependencies(
78 &mut self,
79 package: Id<DP::P>,
80 version: DP::V,
81 dependencies: impl IntoIterator<Item = (DP::P, DP::VS)>,
82 ) -> Option<IncompId<DP::P, DP::VS, DP::M>> {
83 let dep_incompats =
84 self.add_incompatibility_from_dependencies(package, version.clone(), dependencies);
85 self.partial_solution.add_package_version_incompatibilities(
86 package,
87 version.clone(),
88 dep_incompats,
89 &self.incompatibility_store,
90 )
91 }
92
93 /// Add an incompatibility to the state.
94 pub(crate) fn add_incompatibility(&mut self, incompat: Incompatibility<DP::P, DP::VS, DP::M>) {
95 let id = self.incompatibility_store.alloc(incompat);
96 self.merge_incompatibility(id);
97 }
98
99 /// Add an incompatibility to the state.
100 #[cold]
101 pub(crate) fn add_incompatibility_from_dependencies(
102 &mut self,
103 package: Id<DP::P>,
104 version: DP::V,
105 deps: impl IntoIterator<Item = (DP::P, DP::VS)>,
106 ) -> std::ops::Range<IncompDpId<DP>> {
107 // Create incompatibilities and allocate them in the store.
108 let new_incompats_id_range =
109 self.incompatibility_store
110 .alloc_iter(deps.into_iter().map(|(dep_p, dep_vs)| {
111 let dep_pid = self.package_store.alloc(dep_p);
112 Incompatibility::from_dependency(
113 package,
114 <DP::VS as VersionSet>::singleton(version.clone()),
115 (dep_pid, dep_vs),
116 )
117 }));
118 // Merge the newly created incompatibilities with the older ones.
119 for id in IncompDpId::<DP>::range_to_iter(new_incompats_id_range.clone()) {
120 self.merge_incompatibility(id);
121 }
122 new_incompats_id_range
123 }
124
125 /// Unit propagation is the core mechanism of the solving algorithm.
126 /// CF <https://github.com/dart-lang/pub/blob/master/doc/solver.md#unit-propagation>
127 ///
128 /// For each package with a satisfied incompatibility, returns the package and the root cause
129 /// incompatibility.
130 #[cold]
131 #[allow(clippy::type_complexity)] // Type definitions don't support impl trait.
132 pub(crate) fn unit_propagation(
133 &mut self,
134 package: Id<DP::P>,
135 ) -> Result<SmallVec<(Id<DP::P>, IncompDpId<DP>)>, NoSolutionError<DP>> {
136 let mut satisfier_causes = SmallVec::default();
137 self.unit_propagation_buffer.clear();
138 self.unit_propagation_buffer.push(package);
139 while let Some(current_package) = self.unit_propagation_buffer.pop() {
140 // Iterate over incompatibilities in reverse order
141 // to evaluate first the newest incompatibilities.
142 let mut conflict_id = None;
143 // We only care about incompatibilities if it contains the current package.
144 for &incompat_id in self.incompatibilities[¤t_package].iter().rev() {
145 if self
146 .contradicted_incompatibilities
147 .contains_key(&incompat_id)
148 {
149 continue;
150 }
151 let current_incompat = &self.incompatibility_store[incompat_id];
152 match self.partial_solution.relation(current_incompat) {
153 // If the partial solution satisfies the incompatibility
154 // we must perform conflict resolution.
155 Relation::Satisfied => {
156 log::info!(
157 "Start conflict resolution because incompat satisfied:\n {}",
158 current_incompat.display(&self.package_store)
159 );
160 conflict_id = Some(incompat_id);
161 break;
162 }
163 Relation::AlmostSatisfied(package_almost) => {
164 // Add `package_almost` to the `unit_propagation_buffer` set.
165 // Putting items in `unit_propagation_buffer` more than once waste cycles,
166 // but so does allocating a hash map and hashing each item.
167 // In practice `unit_propagation_buffer` is small enough that we can just do a linear scan.
168 if !self.unit_propagation_buffer.contains(&package_almost) {
169 self.unit_propagation_buffer.push(package_almost);
170 }
171 // Add (not term) to the partial solution with incompat as cause.
172 self.partial_solution.add_derivation(
173 package_almost,
174 incompat_id,
175 &self.incompatibility_store,
176 );
177 // With the partial solution updated, the incompatibility is now contradicted.
178 self.contradicted_incompatibilities
179 .insert(incompat_id, self.partial_solution.current_decision_level());
180 }
181 Relation::Contradicted(_) => {
182 self.contradicted_incompatibilities
183 .insert(incompat_id, self.partial_solution.current_decision_level());
184 }
185 _ => {}
186 }
187 }
188 if let Some(incompat_id) = conflict_id {
189 let (package_almost, root_cause) = self
190 .conflict_resolution(incompat_id, &mut satisfier_causes)
191 .map_err(|terminal_incompat_id| {
192 self.build_derivation_tree(terminal_incompat_id)
193 })?;
194 self.unit_propagation_buffer.clear();
195 self.unit_propagation_buffer.push(package_almost);
196 // Add to the partial solution with incompat as cause.
197 self.partial_solution.add_derivation(
198 package_almost,
199 root_cause,
200 &self.incompatibility_store,
201 );
202 // After conflict resolution and the partial solution update,
203 // the root cause incompatibility is now contradicted.
204 self.contradicted_incompatibilities
205 .insert(root_cause, self.partial_solution.current_decision_level());
206 }
207 }
208 // If there are no more changed packages, unit propagation is done.
209 Ok(satisfier_causes)
210 }
211
212 /// Return the root cause or the terminal incompatibility. CF
213 /// <https://github.com/dart-lang/pub/blob/master/doc/solver.md#unit-propagation>
214 ///
215 /// When we found a conflict, we want to learn as much as possible from it, to avoid making (or
216 /// keeping) decisions that will be rejected. Say we found that the dependency requirements on X and the
217 /// dependency requirements on Y are incompatible. We may find that the decisions on earlier packages B and C
218 /// require us to make incompatible requirements on X and Y, so we backtrack until either B or C
219 /// can be revisited. To make it practical, we really only need one of the terms to be a
220 /// decision. We may as well leave the other terms general. Something like "the dependency on
221 /// the package X is incompatible with the decision on C" tends to work out pretty well. Then if
222 /// A turns out to also have a dependency on X the resulting root cause is still useful.
223 /// (`unit_propagation` will ensure we don't try that version of C.)
224 /// Of course, this is more heuristics than science. If the output is too general, then
225 /// `unit_propagation` will handle the confusion by calling us again with the next most specific
226 /// conflict it comes across. If the output is too specific, then the outer `solver` loop will
227 /// eventually end up calling us again until all possibilities are enumerated.
228 ///
229 /// To end up with a more useful incompatibility, this function combines incompatibilities into
230 /// derivations. Fulfilling this derivation implies the later conflict. By banning it, we
231 /// prevent the intermediate steps from occurring again, at least in the exact same way.
232 /// However, the statistics collected for `prioritize` may want to analyze those intermediate
233 /// steps. For example we might start with "there is no version 1 of Z", and
234 /// `conflict_resolution` may be able to determine that "that was inevitable when we picked
235 /// version 1 of X" which was inevitable when we picked W and so on, until version 1 of B, which
236 /// was depended on by version 1 of A. Therefore the root cause may simplify all the way down to
237 /// "we cannot pick version 1 of A". This will prevent us going down this path again. However
238 /// when we start looking at version 2 of A, and discover that it depends on version 2 of B, we
239 /// will want to prioritize the chain of intermediate steps to check if it has a problem with
240 /// the same shape. The `satisfier_causes` argument keeps track of these intermediate steps so
241 /// that the caller can use them for prioritization.
242 #[allow(clippy::type_complexity)]
243 #[cold]
244 fn conflict_resolution(
245 &mut self,
246 incompatibility: IncompDpId<DP>,
247 satisfier_causes: &mut SmallVec<(Id<DP::P>, IncompDpId<DP>)>,
248 ) -> Result<(Id<DP::P>, IncompDpId<DP>), IncompDpId<DP>> {
249 let mut current_incompat_id = incompatibility;
250 let mut current_incompat_changed = false;
251 loop {
252 if self.incompatibility_store[current_incompat_id]
253 .is_terminal(self.root_package, &self.root_version)
254 {
255 return Err(current_incompat_id);
256 } else {
257 let (package, satisfier_search_result) = self.partial_solution.satisfier_search(
258 &self.incompatibility_store[current_incompat_id],
259 &self.incompatibility_store,
260 );
261 match satisfier_search_result {
262 SatisfierSearch::DifferentDecisionLevels {
263 previous_satisfier_level,
264 } => {
265 self.backtrack(
266 current_incompat_id,
267 current_incompat_changed,
268 previous_satisfier_level,
269 );
270 log::info!("backtrack to {:?}", previous_satisfier_level);
271 satisfier_causes.push((package, current_incompat_id));
272 return Ok((package, current_incompat_id));
273 }
274 SatisfierSearch::SameDecisionLevels { satisfier_cause } => {
275 let prior_cause = Incompatibility::prior_cause(
276 current_incompat_id,
277 satisfier_cause,
278 package,
279 &self.incompatibility_store,
280 );
281 log::info!("prior cause: {}", prior_cause.display(&self.package_store));
282 current_incompat_id = self.incompatibility_store.alloc(prior_cause);
283 satisfier_causes.push((package, current_incompat_id));
284 current_incompat_changed = true;
285 }
286 }
287 }
288 }
289 }
290
291 /// Backtracking.
292 fn backtrack(
293 &mut self,
294 incompat: IncompDpId<DP>,
295 incompat_changed: bool,
296 decision_level: DecisionLevel,
297 ) {
298 self.partial_solution.backtrack(decision_level);
299 // Remove contradicted incompatibilities that depend on decisions we just backtracked away.
300 self.contradicted_incompatibilities
301 .retain(|_, dl| *dl <= decision_level);
302 if incompat_changed {
303 self.merge_incompatibility(incompat);
304 }
305 }
306
307 /// Add this incompatibility into the set of all incompatibilities.
308 ///
309 /// PubGrub collapses identical dependencies from adjacent package versions
310 /// into individual incompatibilities.
311 /// This substantially reduces the total number of incompatibilities
312 /// and makes it much easier for PubGrub to reason about multiple versions of packages at once.
313 ///
314 /// For example, rather than representing
315 /// foo 1.0.0 depends on bar ^1.0.0 and
316 /// foo 1.1.0 depends on bar ^1.0.0
317 /// as two separate incompatibilities,
318 /// they are collapsed together into the single incompatibility {foo ^1.0.0, not bar ^1.0.0}
319 /// (provided that no other version of foo exists between 1.0.0 and 2.0.0).
320 /// We could collapse them into { foo (1.0.0 ∪ 1.1.0), not bar ^1.0.0 }
321 /// without having to check the existence of other versions though.
322 fn merge_incompatibility(&mut self, mut id: IncompDpId<DP>) {
323 if let Some((p1, p2)) = self.incompatibility_store[id].as_dependency() {
324 // If we are a dependency, there's a good chance we can be merged with a previous dependency
325 let deps_lookup = self.merged_dependencies.entry((p1, p2)).or_default();
326 if let Some((past, merged)) = deps_lookup.as_mut_slice().iter_mut().find_map(|past| {
327 self.incompatibility_store[id]
328 .merge_dependents(&self.incompatibility_store[*past])
329 .map(|m| (past, m))
330 }) {
331 let new = self.incompatibility_store.alloc(merged);
332 for (pkg, _) in self.incompatibility_store[new].iter() {
333 self.incompatibilities
334 .entry(pkg)
335 .or_default()
336 .retain(|id| id != past);
337 }
338 *past = new;
339 id = new;
340 } else {
341 deps_lookup.push(id);
342 }
343 }
344 for (pkg, term) in self.incompatibility_store[id].iter() {
345 if cfg!(debug_assertions) {
346 assert_ne!(term, &crate::term::Term::any());
347 }
348 self.incompatibilities.entry(pkg).or_default().push(id);
349 }
350 }
351
352 // Error reporting #########################################################
353
354 fn build_derivation_tree(
355 &self,
356 incompat: IncompDpId<DP>,
357 ) -> DerivationTree<DP::P, DP::VS, DP::M> {
358 let mut all_ids: Set<IncompDpId<DP>> = Set::default();
359 let mut shared_ids = Set::default();
360 let mut stack = vec![incompat];
361 while let Some(i) = stack.pop() {
362 if let Some((id1, id2)) = self.incompatibility_store[i].causes() {
363 if all_ids.contains(&i) {
364 shared_ids.insert(i);
365 } else {
366 stack.push(id1);
367 stack.push(id2);
368 }
369 }
370 all_ids.insert(i);
371 }
372 // To avoid recursion we need to generate trees in topological order.
373 // That is to say we need to ensure that the causes are processed before the incompatibility they effect.
374 // It happens to be that sorting by their ID maintains this property.
375 let mut sorted_ids = all_ids.into_iter().collect::<Vec<_>>();
376 sorted_ids.sort_unstable_by_key(|id| id.into_raw());
377 let mut precomputed = Map::default();
378 for id in sorted_ids {
379 let tree = Incompatibility::build_derivation_tree(
380 id,
381 &shared_ids,
382 &self.incompatibility_store,
383 &self.package_store,
384 &precomputed,
385 );
386 precomputed.insert(id, Arc::new(tree));
387 }
388 // Now the user can refer to the entire tree from its root.
389 Arc::into_inner(precomputed.remove(&incompat).unwrap()).unwrap()
390 }
391}