nickel_lang_core/eval/merge.rs
1//! Evaluation of the merge operator.
2//!
3//! Merge is a primitive operation of Nickel, which recursively combines records. Together with
4//! field metadata, it allows to write and mix contracts with standard records.
5//!
6//! # Operational semantics
7//!
8//! ## On records
9//!
10//! When records `r1` and `r2` are merged, the result is a new record with the following fields:
11//! - All the fields of `r1` that are not in `r2`
12//! - All the fields of `r2` that are not in `r1`
13//! - Fields that are both in `r1` and `r2` are recursively merged: for a field `f`, the result
14//! contains the binding `f = r1.f & r2.f`
15//!
16//! As fields are recursively merged, merge needs to operate on any value, not only on records:
17//!
18//! - *function*: merging a function with anything else fails
19//! - *values*: merging any other values succeeds if and only if these two values are equals, in
20//! which case it evaluates to this common value.
21//!
22//! ## Metadata
23//!
24//! One can think of merge to be defined on metadata as well. When merging two fields, the
25//! resulting metadata is the result of merging the two original field's metadata. The semantics
26//! depend on each metadata.
27
28use nickel_lang_parser::ast::MergeKind;
29
30use super::*;
31use crate::{
32 closurize::Closurize,
33 combine::Combine,
34 error::IllegalPolymorphicTailAction,
35 label::{Label, MergeLabel},
36 position::PosIdx,
37 term::{
38 BinaryOp, IndexMap, Term, TypeAnnotation, make as mk_term,
39 record::{self, Field, FieldDeps, FieldMetadata, RecordAttrs, RecordData},
40 },
41};
42
43/// Merging mode. Merging is used both to combine standard data and to apply contracts defined as
44/// records.
45///
46/// In [MergeMode::Contract] mode, the merge operator acts like a custom contract. Instead of
47/// returning the result directly, it either returns `'Ok result`, or `'Error {..}` if there were
48/// some unexpected extra fields.
49#[derive(Clone, PartialEq, Debug)]
50pub enum MergeMode {
51 /// Standard merging, for combining data.
52 Standard(MergeLabel),
53 /// Merging to apply a record contract to a value, with the associated label.
54 Contract(Label),
55}
56
57impl From<MergeMode> for MergeLabel {
58 /// Either takes the inner merge label if the mode is `Standard`, or converts a contract label
59 /// to a merge label if the mode is `Contract`.
60 fn from(mode: MergeMode) -> Self {
61 match mode {
62 MergeMode::Standard(merge_label) => merge_label,
63 MergeMode::Contract(label) => label.into(),
64 }
65 }
66}
67
68impl<'ctxt, R: ImportResolver, C: Cache> VirtualMachine<'ctxt, R, C> {
69 /// Compute the merge of two evaluated operands. Support both standard merging and record contract
70 /// application.
71 ///
72 /// # Mode
73 ///
74 /// In [`MergeMode::Contract`] mode, `t1` must be the value and `t2` must be the contract. It is
75 /// important as `merge` is not commutative in this mode.
76 // TODO: Is it worth to pack the inputs in an ad-hoc struct?
77 #[allow(clippy::too_many_arguments)]
78 pub fn merge(
79 &mut self,
80 v1: NickelValue,
81 env1: Environment,
82 v2: NickelValue,
83 env2: Environment,
84 pos_op: PosIdx,
85 mode: MergeMode,
86 ) -> Result<Closure, ErrorKind> {
87 let pos1 = v1.pos_idx();
88 let pos2 = v2.pos_idx();
89 let pos_op_inh = pos_op.to_inherited();
90
91 // Determines if we need to wrap the result in `'Ok` upon successful merging, which is the case
92 // when in contract merge mode. We're going to move out of `mode` at some point, so we need to
93 // save this information now.
94 let wrap_in_ok = matches!(mode, MergeMode::Contract(_));
95
96 let result = match (v1.content_ref(), v2.content_ref()) {
97 // Merge is idempotent on basic terms
98 (ValueContentRef::Null, ValueContentRef::Null) => {
99 Ok(NickelValue::null().with_pos_idx(pos_op_inh))
100 }
101 (ValueContentRef::Bool(b1), ValueContentRef::Bool(b2)) if b1 == b2 => {
102 Ok(NickelValue::bool_value(b1, pos_op_inh))
103 }
104 (ValueContentRef::Number(n1), ValueContentRef::Number(n2)) => {
105 if n1 == n2 {
106 Ok(NickelValue::number(n1.clone(), pos_op_inh))
107 } else {
108 Err(Box::new(EvalErrorKind::MergeIncompatibleArgs {
109 left_arg: NickelValue::number(n1.clone(), pos1),
110 right_arg: NickelValue::number(n2.clone(), pos2),
111 merge_label: mode.into(),
112 }))
113 }
114 }
115 (ValueContentRef::String(s1), ValueContentRef::String(s2)) => {
116 if s1 == s2 {
117 Ok(NickelValue::string(s1, pos_op_inh))
118 } else {
119 Err(Box::new(EvalErrorKind::MergeIncompatibleArgs {
120 left_arg: NickelValue::string(s1, pos1),
121 right_arg: NickelValue::string(s2, pos2),
122 merge_label: mode.into(),
123 }))
124 }
125 }
126 (ValueContentRef::Label(label1), ValueContentRef::Label(label2)) => {
127 if label1 == label2 {
128 Ok(NickelValue::label(label1.clone(), pos_op_inh))
129 } else {
130 Err(Box::new(EvalErrorKind::MergeIncompatibleArgs {
131 left_arg: NickelValue::label(label1.clone(), pos1),
132 right_arg: NickelValue::label(label2.clone(), pos2),
133 merge_label: mode.into(),
134 }))
135 }
136 }
137 (ValueContentRef::EnumVariant(enum1), ValueContentRef::EnumVariant(enum2)) => {
138 match (enum1, enum2) {
139 (
140 EnumVariantData {
141 tag: tag1,
142 arg: None,
143 },
144 EnumVariantData {
145 tag: tag2,
146 arg: None,
147 },
148 ) if tag1 == tag2 => Ok(NickelValue::enum_tag(*tag1, pos_op_inh)),
149 (
150 EnumVariantData {
151 tag: tag1,
152 arg: Some(arg1),
153 },
154 EnumVariantData {
155 tag: tag2,
156 arg: Some(arg2),
157 },
158 ) if tag1 == tag2 => {
159 let arg = NickelValue::term_posless(Term::op2(
160 BinaryOp::Merge(mode.into()),
161 arg1.clone().closurize(&mut self.context.cache, env1),
162 arg2.clone().closurize(&mut self.context.cache, env2),
163 ))
164 .closurize(&mut self.context.cache, Environment::new());
165
166 Ok(NickelValue::enum_variant(*tag1, Some(arg), pos_op_inh))
167 }
168 (enum1, enum2) => Err(Box::new(EvalErrorKind::MergeIncompatibleArgs {
169 left_arg: NickelValue::enum_variant(enum1.tag, enum1.arg.clone(), pos1),
170 right_arg: NickelValue::enum_variant(enum2.tag, enum2.arg.clone(), pos2),
171 merge_label: mode.into(),
172 })),
173 }
174 }
175 // There are several different (and valid) ways of merging arrays. We don't want to choose
176 // for the user, so future custom merge functions will provide a way to overload the native
177 // merging function. For the time being, we still need to be idempotent: thus we rewrite
178 // `array1 & array2` to `contract.Equal array1 array2`, so that we extend merge in the
179 // minimum way such that it is idempotent.
180 (ValueContentRef::Array(_), ValueContentRef::Array(_)) => {
181 use crate::{mk_app, stdlib, typ::TypeF};
182 use std::rc::Rc;
183
184 let v1 = v1.closurize(&mut self.context.cache, env1);
185 let v2 = v2.closurize(&mut self.context.cache, env2);
186
187 // We reconstruct the contract we apply later on just to fill the label. This will be
188 // printed out when reporting the error.
189 let contract_for_display = mk_app!(
190 mk_term::op1(
191 UnaryOp::RecordAccess("Equal".into()),
192 Term::Var("contract".into()),
193 ),
194 // We would need to substitute variables inside `t1` to make it useful to print,
195 // but currently we don't want to do it preventively at each array merging, so we
196 // just print `contract.Equal some_array`.
197 //
198 // If the error reporting proves to be insufficient, consider substituting the
199 // variables inside `t1`, but be aware that it might (or might not) have a
200 // noticeable impact on performance.
201 mk_term::var("some_array")
202 );
203
204 let merge_label: MergeLabel = mode.into();
205 let mut notes = vec!["\
206 This equality contract was auto-generated from a merge operation on two arrays. \
207 Arrays can only be merged if they are equal.".into()];
208 if merge_label.kind == MergeKind::PiecewiseDef {
209 notes.push(
210 "\
211 The arrays were merged because they were assigned to the same record field \
212 piecewise. This is likely to have been a mistake. Check for duplicate \
213 definitions of the same record field."
214 .into(),
215 );
216 }
217
218 let label = Label {
219 typ: Rc::new(TypeF::Contract(contract_for_display).into()),
220 span: merge_label.span,
221 ..Default::default()
222 }
223 .with_diagnostic_message("cannot merge unequal arrays")
224 .with_diagnostic_notes(notes);
225
226 // We don't actually use `contract.Equal` directly, because `contract` could have been
227 // locally redefined. We rather use the internal `$stdlib_contract_equal`, which is
228 // exactly the same, but can't be shadowed.
229 let eq_contract = mk_app!(stdlib::internals::stdlib_contract_equal(), v1);
230
231 Ok(mk_app!(
232 mk_term::op2(
233 BinaryOp::ContractApply,
234 eq_contract,
235 NickelValue::label_posless(label)
236 ),
237 v2
238 )
239 .with_pos_idx(pos_op_inh))
240 }
241 // The empty record is the neutral element for merging. We treat this case specifically
242 // for performance reasons: to avoid allocation, recomputation of fixpoint, etc.
243 (ValueContentRef::Record(_), ValueContentRef::Record(Container::Empty))
244 | (ValueContentRef::Record(Container::Empty), ValueContentRef::Record(_)) => {
245 let non_empty = if v1.is_inline_empty_record() { v2 } else { v1 };
246
247 // In merge contract mode, we need to maintain the position of the first argument,
248 // which is the scrutinized value, to maintain good contract error messages.
249 //
250 // Otherwise, since one of the argument is the neutral element for merging, it's
251 // better to keep the position of the other argument (instead of the position of
252 // the merge), since the result will inherit everything from it.
253 let final_pos = if let MergeMode::Standard(_) = mode {
254 non_empty.pos_idx()
255 } else {
256 pos1.to_inherited()
257 };
258
259 Ok(non_empty
260 // In `MergeMode::Contract(_)` mode, we want to maintain
261 .with_pos_idx(final_pos))
262 }
263 // Merge put together the fields of records, and recursively merge
264 // fields that are present in both terms
265 (
266 ValueContentRef::Record(Container::Alloc(r1)),
267 ValueContentRef::Record(Container::Alloc(r2)),
268 ) => {
269 // While it wouldn't be impossible to merge records with sealed tails,
270 // working out how to do so in a "sane" way that preserves parametricity
271 // is non-trivial. It's also not entirely clear that this is something
272 // users will generally have reason to do, so in the meantime we've
273 // decided to just prevent this entirely
274 if let Some(record::SealedTail { label, .. }) = r1
275 .sealed_tail
276 .as_ref()
277 .or(r2.sealed_tail.as_ref())
278 .map(|rc| &**rc)
279 {
280 return Err(Box::new(EvalErrorKind::IllegalPolymorphicTailAccess {
281 action: IllegalPolymorphicTailAction::Merge,
282 evaluated_arg: label.get_evaluated_arg(&self.context.cache),
283 label: label.clone(),
284 }));
285 }
286
287 let split::SplitResult {
288 left,
289 center,
290 right,
291 } = split::split_ref(&r1.fields, &r2.fields);
292
293 match mode {
294 MergeMode::Contract(_) if !r2.attrs.open && !left.is_empty() => {
295 let fields: Vec<String> =
296 left.keys().map(|field| format!("`{field}`")).collect();
297 let plural = if fields.len() == 1 { "" } else { "s" };
298 let fields_list = fields.join(", ");
299
300 let error_data = [
301 (
302 "message".into(),
303 NickelValue::string_posless(format!(
304 "extra field{plural} {fields_list}"
305 )),
306 ),
307 (
308 "notes".into(),
309 NickelValue::array_posless(
310 [
311 NickelValue::string_posless("Have you misspelled a field?"),
312 NickelValue::string_posless(
313 "The record contract might also be too strict. By default, \
314 record contracts exclude any field which is not listed.\n\
315 Append `, ..` at the end of the record contract, as in \
316 `{some_field | SomeContract, ..}`, to make it accept extra fields.",
317 ),
318 ]
319 .into_iter()
320 .collect(),
321 Default::default(),
322 ),
323 ),
324 ];
325
326 // The presence of extra fields is an immediate contract error. Thus, instead
327 // of raising a blame error as for a delayed contract error, which can't be
328 // caught in user-code, we return an `'Error {..}` value instead.
329 return Ok(mk_term::enum_variant(
330 "Error",
331 NickelValue::record_posless(RecordData::with_field_values(error_data)),
332 )
333 .into());
334 }
335 _ => (),
336 };
337
338 let final_pos = if let MergeMode::Standard(_) = mode {
339 pos_op_inh
340 } else {
341 pos1.to_inherited()
342 };
343
344 let merge_label = MergeLabel::from(mode);
345
346 let field_names: Vec<_> = left
347 .keys()
348 .chain(center.keys())
349 .chain(right.keys())
350 .copied()
351 .collect();
352 let mut m = IndexMap::with_capacity(left.len() + center.len() + right.len());
353
354 // Merging recursive records is the one operation that may override recursive fields. To
355 // have the recursive fields depend on the updated values, we need to revert the
356 // corresponding elements in the cache to their original expression.
357 //
358 // We do that for the left and the right part.
359 //
360 // The fields in the intersection (center) need a slightly more general treatment to
361 // correctly propagate the recursive values down each field: saturation. See
362 // [crate::eval::cache::Cache::saturate()].
363 m.extend(
364 left.into_iter()
365 .map(|(id, field)| (id, field.revert_closurize(&mut self.context.cache))),
366 );
367
368 m.extend(
369 right
370 .into_iter()
371 .map(|(id, field)| (id, field.revert_closurize(&mut self.context.cache))),
372 );
373
374 for (id, (field1, field2)) in center.into_iter() {
375 m.insert(
376 id,
377 merge_fields(
378 &mut self.context.cache,
379 merge_label,
380 field1,
381 field2,
382 field_names.iter(),
383 )?,
384 );
385 }
386
387 let attrs = RecordAttrs::combine(r1.attrs, r2.attrs);
388
389 Ok(NickelValue::term(
390 // We don't have to provide RecordDeps, which are required in a previous stage
391 // of program transformations. At this point, the interpreter doesn't care
392 // about them anymore, and dependencies are stored at the level of revertible
393 // cache elements directly.
394 //
395 // Include expressions are transformed to normal fields the very first time
396 // they are seen, and there's no way back. We always set them to empty here.
397 //
398 // The result is already closurized, so we set `closurized` to true.
399 Term::rec_record(
400 RecordData::new(m, attrs, None),
401 Vec::new(),
402 Vec::new(),
403 None,
404 true,
405 ),
406 final_pos,
407 ))
408 }
409 _ => Err(Box::new(EvalErrorKind::MergeIncompatibleArgs {
410 left_arg: v1,
411 right_arg: v2,
412 merge_label: mode.into(),
413 })),
414 };
415
416 result.map(|value| {
417 if wrap_in_ok {
418 let pos = value.pos_idx();
419 NickelValue::enum_variant(
420 "Ok",
421 Some(value.closurize(&mut self.context.cache, Environment::new())),
422 pos,
423 )
424 .into()
425 } else {
426 value.into()
427 }
428 })
429 }
430}
431
432/// Take two record fields in their respective environment and combine both their metadata and
433/// values. Apply the required saturate, revert or closurize operation, including on the final
434/// field returned.
435#[allow(clippy::too_many_arguments)]
436fn merge_fields<'a, C: Cache, I: DoubleEndedIterator<Item = &'a LocIdent> + Clone>(
437 cache: &mut C,
438 merge_label: MergeLabel,
439 field1: Field,
440 field2: Field,
441 fields: I,
442) -> Result<Field, ErrorKind> {
443 let Field {
444 metadata: metadata1,
445 value: value1,
446 pending_contracts: pending_contracts1,
447 } = field1;
448 let Field {
449 metadata: metadata2,
450 value: value2,
451 pending_contracts: pending_contracts2,
452 } = field2;
453
454 let metadata1 = metadata1.into_inner();
455 let metadata2 = metadata2.into_inner();
456
457 // Selecting either meta1's value, meta2's value, or the merge of the two values,
458 // depending on which is defined and respective priorities.
459 let (value, priority) = match (value1, value2) {
460 (Some(t1), Some(t2)) if metadata1.priority == metadata2.priority => (
461 Some(fields_merge_closurize(cache, merge_label, t1, t2, fields).unwrap()),
462 metadata1.priority,
463 ),
464 (Some(t1), _) if metadata1.priority > metadata2.priority => {
465 (Some(t1.revert_closurize(cache)), metadata1.priority)
466 }
467 (Some(t1), None) => (Some(t1.revert_closurize(cache)), metadata1.priority),
468 (_, Some(t2)) if metadata2.priority > metadata1.priority => {
469 (Some(t2.revert_closurize(cache)), metadata2.priority)
470 }
471 (None, Some(t2)) => (Some(t2.revert_closurize(cache)), metadata2.priority),
472 (None, None) => (None, Default::default()),
473 _ => unreachable!(),
474 };
475
476 // Since contracts are closurized, they don't need another local environment
477 let empty = Environment::new();
478
479 let pending_contracts = RuntimeContract::combine_dedup(
480 pending_contracts1.revert_closurize(cache),
481 &empty,
482 pending_contracts2.revert_closurize(cache),
483 &empty,
484 );
485
486 Ok(Field {
487 metadata: FieldMetadata {
488 doc: merge_doc(metadata1.doc, metadata2.doc),
489 annotation: TypeAnnotation::combine_dedup(metadata1.annotation, metadata2.annotation),
490 // If one of the record requires this field, then it musn't be optional. The
491 // resulting field is optional iff both are.
492 opt: metadata1.opt && metadata2.opt,
493 not_exported: metadata1.not_exported || metadata2.not_exported,
494 priority,
495 }
496 .into(),
497 value,
498 pending_contracts,
499 })
500}
501
502/// Merge two optional documentations.
503///
504/// This function is parametrized temporarily to accomodate both the mainline Nickel AST
505/// ([crate::term::Term]) where documentation is represented as a `String`, and the new bytecode
506/// AST where documentation is represented as an `Rc<str>`.
507//FIXME: remove the type parameter `D` once we've moved evaluation to the new bytecode VM.
508//Currently we need to handle both the old representation `D=String` and the new one `D=Rc<str>`.
509pub(crate) fn merge_doc<D>(doc1: Option<D>, doc2: Option<D>) -> Option<D> {
510 //FIXME: how to merge documentation? Just concatenate?
511 doc1.or(doc2)
512}
513
514/// See [crate::eval::cache::Cache::saturate]. Saturation is a transformation on recursive cache
515/// elements that is used when we must combine different values with different recursive
516/// dependencies (say, the two values of fields being merged) into one expression.
517///
518/// Saturation is first and foremost a transformation of terms, but like
519/// [crate::transform::Closurizable], it can be applied to other types that contain terms, hence
520/// the trait.
521trait Saturate: Sized {
522 /// Take the content of a record field, and saturate the potential revertible element with the
523 /// given fields. See [crate::eval::cache::Cache::saturate].
524 ///
525 /// If the expression is not a variable referring to an element in the cache (this can happen
526 /// e.g. for numeric constants), we just return the term as it is, which falls into the zero
527 /// dependencies special case.
528 fn saturate<'a, I: DoubleEndedIterator<Item = &'a LocIdent> + Clone, C: Cache>(
529 self,
530 cache: &mut C,
531 fields: I,
532 ) -> Result<Self, EvalError>;
533}
534
535impl Saturate for NickelValue {
536 fn saturate<'a, I: DoubleEndedIterator<Item = &'a LocIdent> + Clone, C: Cache>(
537 self,
538 cache: &mut C,
539 fields: I,
540 ) -> Result<NickelValue, EvalError> {
541 let pos_idx = self.pos_idx();
542
543 match self.try_into_thunk() {
544 Ok(idx) => Ok(cache
545 .saturate(idx.clone(), fields.map(LocIdent::ident))
546 .with_pos_idx(pos_idx)),
547 // It's possible for constants, or arbitrary deserialized data since the introduction
548 // of the compact value representation (`NickelValue`), to not be closurized.
549 Err(this) => Ok(this),
550 }
551 }
552}
553
554/// Return the dependencies of a field when represented as a `NickelValue`.
555fn field_deps<C: Cache>(cache: &C, value: &NickelValue) -> Result<FieldDeps, EvalError> {
556 if let Some(idx) = value.as_thunk() {
557 Ok(cache.deps(idx).unwrap_or_else(FieldDeps::empty))
558 } else {
559 Ok(FieldDeps::empty())
560 }
561}
562
563/// Take the current environment, two fields with their local environment, and return a term which
564/// is the merge of the two fields, closurized in the provided final environment.
565///
566/// The element in the cache allocated for the result is revertible if and only if at least one of
567/// the original elements is (if one of the original values is overridable, then so is the merge of
568/// the two). In this case, the field dependencies are the union of the dependencies of each field.
569///
570/// The fields are saturated (see [saturate]) to properly propagate recursive dependencies down to
571/// `t1` and `t2` in the final, merged record.
572#[allow(clippy::too_many_arguments)]
573fn fields_merge_closurize<'a, I: DoubleEndedIterator<Item = &'a LocIdent> + Clone, C: Cache>(
574 cache: &mut C,
575 merge_label: MergeLabel,
576 t1: NickelValue,
577 t2: NickelValue,
578 fields: I,
579) -> Result<NickelValue, EvalError> {
580 let combined_deps = field_deps(cache, &t1)?.union(field_deps(cache, &t2)?);
581 let body = NickelValue::from(Term::op2(
582 BinaryOp::Merge(merge_label),
583 t1.saturate(cache, fields.clone())?,
584 t2.saturate(cache, fields)?,
585 ));
586
587 // We closurized the final result with appropriate dependencies, so we can return a closure
588 // with an empty environment.
589 let idx = cache.add(body.into(), BindingType::Revertible(combined_deps));
590
591 Ok(idx.into())
592}
593
594/// Same as [Closurizable], but also revert the element if the term is a closure.
595pub(super) trait RevertClosurize {
596 /// Revert the element at the index inside the term (if any)
597 fn revert_closurize<C: Cache>(self, cache: &mut C) -> Self;
598}
599
600impl RevertClosurize for NickelValue {
601 fn revert_closurize<C: Cache>(self, _cache: &mut C) -> NickelValue {
602 if let Some(thunk) = self.as_thunk() {
603 thunk.revert().into()
604 } else {
605 // It's possible for constants, or arbitrary deserialized data since the introduction
606 // of the compact value representation (`NickelValue`), to not be closurized.
607 self
608 }
609 }
610}
611
612impl RevertClosurize for Field {
613 fn revert_closurize<C: Cache>(self, cache: &mut C) -> Field {
614 let value = self.value.map(|value| value.revert_closurize(cache));
615 let pending_contracts = self.pending_contracts.revert_closurize(cache);
616
617 Field {
618 metadata: self.metadata,
619 value,
620 pending_contracts,
621 }
622 }
623}
624
625impl RevertClosurize for RuntimeContract {
626 fn revert_closurize<C: Cache>(self, cache: &mut C) -> RuntimeContract {
627 self.map_contract(|ctr| ctr.revert_closurize(cache))
628 }
629}
630
631impl RevertClosurize for Vec<RuntimeContract> {
632 fn revert_closurize<C: Cache>(self, cache: &mut C) -> Vec<RuntimeContract> {
633 self.into_iter()
634 .map(|pending_contract| pending_contract.revert_closurize(cache))
635 .collect()
636 }
637}
638
639pub mod split {
640 use crate::term::IndexMap;
641
642 pub struct SplitResult<K, V1, V2> {
643 pub left: IndexMap<K, V1>,
644 pub center: IndexMap<K, (V1, V2)>,
645 pub right: IndexMap<K, V2>,
646 }
647
648 /// Split two maps m1 and m2 in three parts (left,center,right), where left holds bindings
649 /// `(key,value)` where key is not in `m2.keys()`, right is the dual (keys of m2 that are not
650 /// in m1), and center holds bindings for keys that are both in m1 and m2.
651 pub fn split<K, V1, V2>(m1: IndexMap<K, V1>, m2: IndexMap<K, V2>) -> SplitResult<K, V1, V2>
652 where
653 K: std::hash::Hash + Eq,
654 {
655 let mut left = IndexMap::new();
656 let mut center = IndexMap::new();
657 let mut right = m2;
658
659 for (key, value) in m1 {
660 // We don't perserve the ordering of the right part. However, note that currently, what
661 // matters is that the iteration order on hashmap is _deterministic_. It's a bonus that
662 // it corresponds to insertion order for record literal, but we don't make any
663 // guarantee on the result of merging or other operations. Exporting will sort the
664 // result anyway.
665 if let Some(v2) = right.swap_remove(&key) {
666 center.insert(key, (value, v2));
667 } else {
668 left.insert(key, value);
669 }
670 }
671
672 SplitResult {
673 left,
674 center,
675 right,
676 }
677 }
678
679 /// Same as [split], but takes the maps by reference, and clone data as needed.
680 pub fn split_ref<K, V1: Clone, V2: Clone>(
681 m1: &IndexMap<K, V1>,
682 m2: &IndexMap<K, V2>,
683 ) -> SplitResult<K, V1, V2>
684 where
685 K: std::hash::Hash + Eq + Clone,
686 {
687 let mut center = IndexMap::new();
688 // We reuse the hashmap structure to remember what has been already put in the center. To
689 // do so, we clone of the map and remove elements as found in the other. To limit the work
690 // done here, we clone the smallest of the two maps.
691 //
692 // We don't perserve the ordering of the non-cloned part. However, note that currently,
693 // what matters is that the iteration order on hashmap is _deterministic_. It's a bonus
694 // that it corresponds to insertion order for record literal, but we don't make any
695 // guarantee on the result of merging or other operations. Exporting will sort the result
696 // anyway.
697 if m1.len() < m2.len() {
698 let mut left = m1.clone();
699 let mut right = IndexMap::new();
700
701 for (key, v2) in m2.iter() {
702 if let Some(v1) = left.swap_remove(key) {
703 center.insert(key.clone(), (v1, v2.clone()));
704 } else {
705 right.insert(key.clone(), v2.clone());
706 }
707 }
708
709 SplitResult {
710 left,
711 center,
712 right,
713 }
714 } else {
715 let mut left = IndexMap::new();
716 let mut right = m2.clone();
717
718 for (key, v1) in m1.iter() {
719 if let Some(v2) = right.swap_remove(key) {
720 center.insert(key.clone(), (v1.clone(), v2));
721 } else {
722 left.insert(key.clone(), v1.clone());
723 }
724 }
725
726 SplitResult {
727 left,
728 center,
729 right,
730 }
731 }
732 }
733
734 #[cfg(test)]
735 mod tests {
736 use super::*;
737
738 #[test]
739 fn all_left() -> Result<(), String> {
740 let mut m1 = IndexMap::new();
741 let m2 = IndexMap::<isize, isize>::new();
742
743 m1.insert(1, 1);
744 let SplitResult {
745 mut left,
746 center,
747 right,
748 } = split(m1, m2);
749
750 if left.swap_remove(&1) == Some(1)
751 && left.is_empty()
752 && center.is_empty()
753 && right.is_empty()
754 {
755 Ok(())
756 } else {
757 Err(String::from("Expected all elements to be in the left part"))
758 }
759 }
760
761 #[test]
762 fn all_right() -> Result<(), String> {
763 let m1 = IndexMap::<isize, isize>::new();
764 let mut m2 = IndexMap::new();
765
766 m2.insert(1, 1);
767 let SplitResult {
768 left,
769 center,
770 mut right,
771 } = split(m1, m2);
772
773 if right.swap_remove(&1) == Some(1)
774 && right.is_empty()
775 && left.is_empty()
776 && center.is_empty()
777 {
778 Ok(())
779 } else {
780 Err(String::from(
781 "Expected all elements to be in the right part",
782 ))
783 }
784 }
785
786 #[test]
787 fn all_center() -> Result<(), String> {
788 let mut m1 = IndexMap::new();
789 let mut m2 = IndexMap::new();
790
791 m1.insert(1, 1);
792 m2.insert(1, 2);
793 let SplitResult {
794 left,
795 mut center,
796 right,
797 } = split(m1, m2);
798
799 if center.swap_remove(&1) == Some((1, 2))
800 && center.is_empty()
801 && left.is_empty()
802 && right.is_empty()
803 {
804 Ok(())
805 } else {
806 Err(String::from(
807 "Expected all elements to be in the center part",
808 ))
809 }
810 }
811
812 #[test]
813 fn mixed() -> Result<(), String> {
814 let mut m1 = IndexMap::new();
815 let mut m2 = IndexMap::new();
816
817 m1.insert(1, 1);
818 m1.insert(2, 1);
819 m2.insert(1, -1);
820 m2.insert(3, -1);
821 let SplitResult {
822 mut left,
823 mut center,
824 mut right,
825 } = split(m1, m2);
826
827 if left.swap_remove(&2) == Some(1)
828 && center.swap_remove(&1) == Some((1, -1))
829 && right.swap_remove(&3) == Some(-1)
830 && left.is_empty()
831 && center.is_empty()
832 && right.is_empty()
833 {
834 Ok(())
835 } else {
836 Err(String::from(
837 "Expected all elements to be in the center part",
838 ))
839 }
840 }
841 }
842}