Skip to main content

mlt_core/encoder/
writer.rs

1use std::collections::HashMap;
2use std::{io, mem};
3
4use fsst::Compressor;
5use integer_encoding::VarIntWriter as _;
6
7use crate::decoder::{ColumnType, Morton};
8use crate::encoder::model::{CurveParams, ExplicitEncoder, StrEncoding, StreamCtx};
9use crate::encoder::{EncoderConfig, IntEncoder, VertexBufferType};
10use crate::utils::BinarySerializer as _;
11use crate::{MltError, MltResult};
12
13/// Stateful encoder that accumulates encoded layer bytes.
14///
15/// Logical temporary buffers live in `Codecs` and are passed alongside
16/// the encoder while a stream is being transformed and serialized. Physical
17/// encoders live here with their own scratch buffers, then copy complete
18/// payloads into [`data`](Encoder::data).
19///
20/// # Buffer layout
21///
22/// The MLT layer wire format is:
23///
24/// ```text
25/// [varint(body_len + 1)] [tag = 1]
26/// [name: string] [extent: varint] [column_count: varint]   <- hdr
27/// [col_type₁] [col_type₂] … [col_typeN]                    <- meta
28/// [col₁ stream data] [col₂ stream data] … [colN stream data] <- data
29/// ```
30///
31/// The three sections are accumulated into separate buffers so they can be
32/// combined at the end *without* any in-place insertion or extra copies:
33///
34/// * [`hdr`] – layer header (name, extent, `column_count`).
35/// * [`meta`] – column-type bytes (one byte + optional name per column).
36/// * [`data`] – encoded stream data; also the target of [`impl Write`].
37///
38/// # Sort-strategy trialing
39///
40/// Create one `Encoder` per sort-strategy trial, encode the layer into it,
41/// and keep the one whose `total_len()` is smallest:
42///
43/// ```rust,ignore
44/// let mut codecs = Codecs::default();
45/// let mut best: Option<Encoder> = None;
46/// for strategy in strategies {
47///     let mut enc = Encoder::new(cfg);
48///     layer.write_to(&mut enc, &mut codecs)?;
49///     if best.as_ref().is_none_or(|b| enc.total_len() < b.total_len()) {
50///         best = Some(enc);
51///     }
52/// }
53/// return best.unwrap().into_layer_bytes();
54/// ```
55///
56/// # Stream-level encoding alternatives
57///
58/// Use [`Encoder::try_alternatives`] to open a competition,
59/// then submit each candidate via `AltSession::with`.  The guard's `Drop`
60/// impl finalises the competition automatically:
61///
62/// ```rust,ignore
63/// let mut alt = enc.try_alternatives();
64/// alt.with(|enc| write_stream_as_varint(data, enc))?;
65/// alt.with(|enc| write_stream_as_fastpfor(data, enc))?;
66/// // alt drops → keeps whichever was shorter
67/// ```
68///
69/// [`hdr`]: Encoder::hdr
70/// [`meta`]: Encoder::meta
71/// [`data`]: Encoder::data
72/// [`impl Write`]: Encoder#impl-Write
73#[derive(Default)]
74pub struct Encoder {
75    /// Encoding configuration: controls which optimization strategies are tried
76    /// (sort orders, compression algorithms, etc.).
77    ///
78    /// Set once at construction time via [`Encoder::new`]; propagated
79    /// automatically to all sub-encoders so individual encode methods do not
80    /// need a separate `cfg` argument.
81    pub cfg: EncoderConfig,
82
83    /// When [`Some`], property / ID / geometry encoders use `ExplicitEncoder`
84    /// callbacks instead of trying candidate encodings. When [`None`], the
85    /// automatic optimization path runs.
86    pub(crate) explicit: Option<ExplicitEncoder>,
87
88    /// Layer header bytes: `name`, `extent`, `column_count`.
89    ///
90    /// Written to `hdr` via [`Encoder::write_header`].  This section comes
91    /// first in the wire format and is never subject to alternatives.
92    pub hdr: Vec<u8>,
93
94    /// Column-type metadata bytes.
95    ///
96    /// Each column contributes one type byte (plus a name string for property
97    /// columns).  Written by the `write_columns_meta_to` methods, which write
98    /// directly to `enc.meta`.  This section comes second in the wire format
99    /// and is never subject to alternatives (column types are fixed).
100    pub meta: Vec<u8>,
101
102    /// Encoded stream data.
103    ///
104    /// All stream counts, per-stream encoding-metadata bytes, and encoded
105    /// data bytes land here via [`impl Write`].  This section comes last in
106    /// the wire format and is where stream-level alternatives compete.
107    ///
108    /// [`impl Write`]: Encoder#impl-Write
109    pub data: Vec<u8>,
110
111    /// Morton parameters for this layer's vertex set; `None` if the extent
112    /// exceeds 16 bits per axis (Morton encoding is unusable in that case).
113    /// Pre-populated by [`StagedLayer::encode_into`](crate::encoder::StagedLayer::encode_into).
114    pub(crate) morton_cache: Option<Morton>,
115
116    /// Hilbert curve parameters for this layer's vertex set. Pre-populated by
117    /// [`StagedLayer::encode_into`](crate::encoder::StagedLayer::encode_into).
118    pub(crate) hilbert_cache: Option<CurveParams>,
119
120    /// Cached FSST compressor per string column, keyed by column name.
121    /// `None` means training found FSST not viable for that column.
122    /// Trained on deduplicated values on the first sort trial, reused on subsequent trials.
123    pub(crate) fsst_cache: HashMap<String, Option<Compressor>>,
124
125    // -----------------------------------------------------------------------
126    // Alternatives state — a stack that supports nested competitions.
127    //
128    // Invariant between candidates at any level:
129    //   data.len() == level.data_start + level.best_data_size.unwrap_or(0)
130    //   meta.len() == level.meta_start + level.best_meta_size.unwrap_or(0)
131    //
132    // Empty stack ↔ no competition in progress.
133    // -----------------------------------------------------------------------
134    /// Stack of active encoding competitions, innermost last.
135    ///
136    /// Empty while no [`Encoder::try_alternatives`] session
137    /// is in progress.
138    alt_stack: Vec<AltLevel>,
139}
140
141impl Encoder {
142    /// Create a new encoder with the given [`EncoderConfig`].
143    ///
144    /// Use [`Encoder::default()`] when the default configuration is sufficient.
145    #[inline]
146    #[must_use]
147    pub fn new(cfg: EncoderConfig) -> Self {
148        Self {
149            cfg,
150            ..Self::default()
151        }
152    }
153
154    /// Like [`Self::new`] but with the explicit encoder set for deterministic encoding
155    /// (tests, synthetics). Use with `StagedLayer::encode_explicit`.
156    #[inline]
157    #[must_use]
158    pub fn with_explicit(cfg: EncoderConfig, explicit: ExplicitEncoder) -> Self {
159        Self {
160            cfg,
161            explicit: Some(explicit),
162            ..Self::default()
163        }
164    }
165
166    /// Ensure this encoder is in the good state, and moves results to a new instance.
167    /// This allows current instance to be reused for other experiment, avoiding repeat of some operations.
168    #[must_use]
169    pub(crate) fn preserve_results(&mut self) -> Self {
170        assert_eq!(self.alt_stack.len(), 0, "Alternatives stack is not empty");
171        Self {
172            cfg: EncoderConfig::default(),
173            explicit: None,
174            hdr: mem::take(&mut self.hdr),
175            meta: mem::take(&mut self.meta),
176            data: mem::take(&mut self.data),
177            morton_cache: None,
178            hilbert_cache: None,
179            fsst_cache: HashMap::new(),
180            alt_stack: vec![],
181        }
182    }
183
184    #[inline]
185    pub(crate) fn write_column_type(&mut self, column_type: ColumnType) -> MltResult<()> {
186        column_type.write_to(&mut self.meta).map_err(MltError::from)
187    }
188
189    #[inline]
190    pub(crate) fn write_column_name(&mut self, name: &str) -> MltResult<()> {
191        self.meta.write_string(name).map_err(MltError::from)
192    }
193
194    #[inline]
195    pub(crate) fn write_column_header(
196        &mut self,
197        column_type: ColumnType,
198        name: &str,
199    ) -> MltResult<()> {
200        self.write_column_type(column_type)?;
201        self.write_column_name(name)
202    }
203
204    /// Write the layer header (`name`, `extent`, `column_count`) to [`hdr`].
205    ///
206    /// Must be called exactly once per layer, after all column meta and data.
207    ///
208    /// [`hdr`]: Encoder::hdr
209    #[hotpath::measure]
210    pub fn write_header(&mut self, name: &str, extent: u32, column_count: usize) -> MltResult<()> {
211        debug_assert!(
212            self.alt_stack.is_empty(),
213            "write_header called with an open alternatives session"
214        );
215        let name_len = u32::try_from(name.len())?;
216        let column_count = u32::try_from(column_count)?;
217        self.hdr.write_varint(name_len).map_err(MltError::from)?;
218        self.hdr.extend_from_slice(name.as_bytes());
219        self.hdr.write_varint(extent).map_err(MltError::from)?;
220        self.hdr
221            .write_varint(column_count)
222            .map_err(MltError::from)?;
223        Ok(())
224    }
225
226    /// When [`Self::explicit`] is [`Some`], returns the callback-chosen [`IntEncoder`].
227    /// [`None`] means run automatic candidate selection for that stream.
228    #[inline]
229    pub(crate) fn override_int_enc(&self, ctx: &StreamCtx<'_>) -> Option<IntEncoder> {
230        self.explicit.as_ref().map(|e| (e.get_int_encoder)(ctx))
231    }
232
233    /// When [`Self::explicit`] is [`Some`], returns the callback-chosen [`StrEncoding`].
234    /// [`None`] means run automatic string / shared-dict corpus selection.
235    #[inline]
236    pub(crate) fn override_str_enc(&self, name: &str) -> Option<StrEncoding> {
237        self.explicit.as_ref().map(|e| (e.get_str_encoding)(name))
238    }
239
240    /// Pinned vertex layout when an explicit encoder is active.
241    #[inline]
242    #[allow(clippy::unused_self)]
243    pub(crate) fn override_vertex_buffer_type(&self) -> Option<VertexBufferType> {
244        self.explicit.as_ref().map(|e| e.vertex_buffer_type)
245    }
246
247    /// Whether to force writing a geometry stream even when its data is empty.
248    ///
249    /// Delegates to [`ExplicitEncoder::force_stream`]; returns `false` when no explicit
250    /// encoder is active (the default "skip empty streams" behavior).
251    #[inline]
252    pub(crate) fn force_stream(&self, ctx: &StreamCtx<'_>) -> bool {
253        self.explicit
254            .as_ref()
255            .is_some_and(|e| (e.force_stream)(ctx))
256    }
257
258    /// Total encoded bytes across all three sections (`hdr + meta + data`).
259    #[inline]
260    #[must_use]
261    pub fn total_len(&self) -> usize {
262        self.hdr.len() + self.meta.len() + self.data.len()
263    }
264
265    /// Concatenate `hdr + meta + data` into a single buffer **without** a
266    /// tag/size prefix.
267    ///
268    /// Use this when the caller expects raw layer body bytes (without the size/tag framing)
269    /// rather than a complete framed wire record — see [`Self::into_layer_bytes`] for the framed form.
270    #[must_use]
271    pub fn into_raw_bytes(mut self) -> Vec<u8> {
272        let mut out = Vec::with_capacity(self.hdr.len() + self.meta.len() + self.data.len());
273        out.append(&mut self.hdr);
274        out.append(&mut self.meta);
275        out.append(&mut self.data);
276        out
277    }
278
279    /// Assemble the complete Tag-01 layer record.
280    pub fn into_layer_bytes(self) -> MltResult<Vec<u8>> {
281        self.into_layer_bytes_with_tag(1)
282    }
283
284    /// Assemble a complete layer record for the given `tag`:
285    /// `[varint(body_len + 1)][tag][hdr][meta][data]`.
286    fn into_layer_bytes_with_tag(mut self, tag: u8) -> MltResult<Vec<u8>> {
287        debug_assert!(
288            self.alt_stack.is_empty(),
289            "into_layer_bytes_with_tag called with an open alternatives session"
290        );
291        let body_len = self.hdr.len() + self.meta.len() + self.data.len();
292        let size = u32::try_from(body_len + 1)?; // +1 for the tag byte
293        let mut out = Vec::with_capacity(5 + 1 + body_len);
294        out.write_varint(size).map_err(MltError::from)?;
295        out.push(tag);
296        out.append(&mut self.hdr);
297        out.append(&mut self.meta);
298        out.append(&mut self.data);
299        Ok(out)
300    }
301
302    /// Begin a new encoding competition.
303    ///
304    /// Returns an `AltSession` guard.  Submit each candidate via
305    /// `AltSession::with`; the guard's `Drop` impl finalises
306    /// the competition and retains the shortest candidate automatically.
307    ///
308    /// Nesting is supported: calling `try_alternatives` inside a
309    /// `with` closure opens an inner competition on the same stack,
310    /// resolved before the outer candidate is committed.
311    ///
312    /// # Example
313    ///
314    /// ```rust,ignore
315    /// let mut alt = enc.try_alternatives();
316    /// for cand in candidates {
317    ///     alt.with(|enc| write_candidate(cand, enc))?;
318    /// }
319    /// // alt drops → finalises the competition
320    /// ```
321    pub fn try_alternatives(&mut self) -> AltSession<'_> {
322        self.alt_stack.push(AltLevel {
323            data_start: self.data.len(),
324            meta_start: self.meta.len(),
325            best_data: None,
326            best_meta: None,
327        });
328        AltSession { enc: self }
329    }
330
331    /// Commit the current candidate at the innermost competition level.
332    ///
333    /// Compares bytes written since the last commit against the running best
334    /// by **total** (`data + meta`) size; keeps the shorter one.
335    ///
336    /// Called internally by `AltSession::with` on `Ok`.
337    fn alt_commit(&mut self) {
338        debug_assert!(
339            !self.alt_stack.is_empty(),
340            "alt_commit called outside an active AltSession"
341        );
342        let (data, meta, stack) = (&mut self.data, &mut self.meta, &mut self.alt_stack);
343        let level = stack.last_mut().unwrap();
344        Self::close_candidate(data, meta, level);
345    }
346
347    /// Finalize the innermost competition and pop it from the stack.
348    ///
349    /// Any bytes written since the last `alt_commit` are evaluated as a
350    /// final candidate; if no pending bytes exist and a best is already
351    /// recorded this is a cheap stack-pop.
352    fn alt_pop(&mut self) {
353        debug_assert!(
354            !self.alt_stack.is_empty(),
355            "alt_pop called outside an active AltSession"
356        );
357        {
358            let (data, meta, stack) = (&mut self.data, &mut self.meta, &mut self.alt_stack);
359            let level = stack.last_mut().unwrap();
360            let data_pending = data.len() - (level.data_start + level.best_data.unwrap_or(0));
361            let meta_pending = meta.len() - (level.meta_start + level.best_meta.unwrap_or(0));
362            if data_pending > 0 || meta_pending > 0 || level.best_data.is_none() {
363                Self::close_candidate(data, meta, level);
364            }
365        }
366        self.alt_stack.pop();
367    }
368
369    /// Shared compare-and-keep logic used by both `alt_commit` and `alt_pop`.
370    ///
371    /// Compares the bytes written since the last committed candidate against
372    /// the current best by **total** (`data + meta`) size.
373    /// Keeps the shorter one; ties preserve the existing best.
374    fn close_candidate(data: &mut Vec<u8>, meta: &mut Vec<u8>, level: &mut AltLevel) {
375        let best_data_end = level.data_start + level.best_data.unwrap_or(0);
376        let best_meta_end = level.meta_start + level.best_meta.unwrap_or(0);
377        let cand_data = data.len() - best_data_end;
378        let cand_meta = meta.len() - best_meta_end;
379        let cand_total = cand_data + cand_meta;
380        let best_total = level.best_data.unwrap_or(0) + level.best_meta.unwrap_or(0);
381        if level.best_data.is_none_or(|_| cand_total < best_total) {
382            // New best: shift data candidate bytes to data_start.
383            if level.best_data.is_some() {
384                data.copy_within(best_data_end..best_data_end + cand_data, level.data_start);
385                meta.copy_within(best_meta_end..best_meta_end + cand_meta, level.meta_start);
386            }
387            data.truncate(level.data_start + cand_data);
388            meta.truncate(level.meta_start + cand_meta);
389            level.best_data = Some(cand_data);
390            level.best_meta = Some(cand_meta);
391        } else {
392            // Not an improvement: discard.
393            data.truncate(best_data_end);
394            meta.truncate(best_meta_end);
395        }
396    }
397}
398
399/// State for one level of an encoding competition.
400///
401/// Tracks the starting position in both the [`data`](Encoder::data) and
402/// [`meta`](Encoder::meta) buffers, and the byte count of the best candidate
403/// committed so far.
404///
405/// Candidates are compared by **total** bytes (`data + meta`); the shorter one
406/// wins, with ties resolved in favor of the earlier candidate.
407#[derive(Debug, Default, Clone)]
408struct AltLevel {
409    data_start: usize,
410    meta_start: usize,
411    /// Byte count appended to `data` by the current best candidate.
412    best_data: Option<usize>,
413    /// Byte count appended to `meta` by the current best candidate.
414    best_meta: Option<usize>,
415}
416
417/// RAII guard for a stream-encoding competition opened by [`Encoder::try_alternatives`].
418///
419/// Submit each candidate via [`with`](AltSession::with); on `Ok` the candidate is
420/// committed (compared against the running best and kept if shorter); on `Err`
421/// the partial write is rolled back and the error propagates.  The guard's
422/// `Drop` impl finalises the competition automatically, so the [`Encoder`] is
423/// always left in a consistent state even when an error exits the loop early.
424///
425/// Nesting is allowed: calling [`Encoder::try_alternatives`] inside a
426/// `with` closure opens an inner competition that is fully
427/// resolved before the outer candidate is committed.
428#[must_use = "AltSession must be used; drop it to finalise the competition"]
429pub struct AltSession<'a> {
430    enc: &'a mut Encoder,
431}
432
433impl AltSession<'_> {
434    /// Encode one candidate.
435    ///
436    /// - **`Ok`** — commits the candidate; replaces the running best if shorter.
437    /// - **`Err`** — truncates the partial write back to the pre-call checkpoint
438    ///   and returns the error.  The guard's `Drop` still finalises the
439    ///   competition cleanly using whichever candidates succeeded so far.
440    #[hotpath::measure]
441    pub fn with<F>(&mut self, f: F) -> MltResult<()>
442    where
443        F: FnOnce(&mut Encoder) -> MltResult<()>,
444    {
445        let data_cp = self.enc.data.len();
446        let meta_cp = self.enc.meta.len();
447        match f(self.enc) {
448            Ok(()) => {
449                self.enc.alt_commit();
450                Ok(())
451            }
452            Err(e) => {
453                self.enc.data.truncate(data_cp);
454                self.enc.meta.truncate(meta_cp);
455                Err(e)
456            }
457        }
458    }
459}
460
461impl Drop for AltSession<'_> {
462    fn drop(&mut self) {
463        self.enc.alt_pop();
464    }
465}
466
467/// Writes bytes to [`Encoder::data`].
468///
469/// This blanket implementation makes `Encoder` compatible with all
470/// `BinarySerializer`, `VarIntWriter`, and other `Write`-based utilities so that
471/// stream-data methods do not need a separate code path.
472impl io::Write for Encoder {
473    #[inline]
474    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
475        self.data.write(buf)
476    }
477
478    #[inline]
479    fn flush(&mut self) -> io::Result<()> {
480        Ok(())
481    }
482
483    #[inline]
484    fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
485        self.data.write_all(buf)
486    }
487}
488
489#[cfg(test)]
490mod tests {
491    use super::*;
492
493    /// Helper: directly extend `enc.data` with raw bytes (simulates a stream write).
494    fn push(enc: &mut Encoder, bytes: &[u8]) {
495        enc.data.extend_from_slice(bytes);
496    }
497
498    // ── basic single-level behavior ──────────────────────────────────────
499
500    /// The shortest candidate wins.
501    #[test]
502    fn alternatives_keeps_shortest() {
503        let mut enc = Encoder::default();
504        push(&mut enc, b"prefix");
505
506        let mut alt = enc.try_alternatives();
507        alt.with(|enc| {
508            push(enc, b"longer");
509            Ok(())
510        })
511        .unwrap(); // 6 bytes
512        alt.with(|enc| {
513            push(enc, b"ab");
514            Ok(())
515        })
516        .unwrap(); // 2 bytes — shortest
517        alt.with(|enc| {
518            push(enc, b"xyz");
519            Ok(())
520        })
521        .unwrap(); // 3 bytes
522        drop(alt);
523
524        assert_eq!(enc.data, b"prefixab");
525    }
526
527    /// On a tie the first candidate is kept (strict `<`, not `<=`).
528    #[test]
529    fn alternatives_tie_keeps_first() {
530        let mut enc = Encoder::default();
531
532        let mut alt = enc.try_alternatives();
533        alt.with(|enc| {
534            push(enc, b"aaa");
535            Ok(())
536        })
537        .unwrap(); // 3 bytes
538        alt.with(|enc| {
539            push(enc, b"bbb");
540            Ok(())
541        })
542        .unwrap(); // 3 bytes — equal
543        drop(alt);
544
545        assert_eq!(enc.data, b"aaa");
546    }
547
548    /// A single candidate is unconditionally the winner.
549    #[test]
550    fn alternatives_single_candidate() {
551        let mut enc = Encoder::default();
552
553        let mut alt = enc.try_alternatives();
554        alt.with(|enc| {
555            push(enc, b"only");
556            Ok(())
557        })
558        .unwrap();
559        drop(alt);
560
561        assert_eq!(enc.data, b"only");
562    }
563
564    /// Bytes written before `try_alternatives` are left intact throughout.
565    #[test]
566    fn prefix_bytes_are_preserved() {
567        let mut enc = Encoder::default();
568        push(&mut enc, b"HDR");
569
570        let mut alt = enc.try_alternatives();
571        alt.with(|enc| {
572            push(enc, b"long_encoding");
573            Ok(())
574        })
575        .unwrap(); // 13 bytes
576        alt.with(|enc| {
577            push(enc, b"short");
578            Ok(())
579        })
580        .unwrap(); // 5 bytes — winner
581        drop(alt);
582
583        assert_eq!(&enc.data[..3], b"HDR");
584        assert_eq!(&enc.data[3..], b"short");
585    }
586
587    /// Dropping the guard after all candidates are committed is a cheap stack-pop.
588    #[test]
589    fn drop_after_all_committed_is_noop() {
590        let mut enc = Encoder::default();
591
592        let mut alt = enc.try_alternatives();
593        alt.with(|enc| {
594            push(enc, b"best");
595            Ok(())
596        })
597        .unwrap();
598        drop(alt); // all candidates committed; drop just pops the stack
599
600        assert!(enc.alt_stack.is_empty(), "stack empty after drop");
601        assert_eq!(enc.data, b"best");
602    }
603
604    // ── nesting ───────────────────────────────────────────────────────────
605
606    /// An inner competition is resolved before the outer candidate is committed.
607    #[test]
608    fn nested_alternatives() {
609        let mut enc = Encoder::default();
610
611        let mut outer = enc.try_alternatives();
612
613        // Outer candidate A: header bytes + inner competition.
614        outer
615            .with(|enc| {
616                push(enc, b"A:");
617                let mut inner = enc.try_alternatives(); // inner level pushed
618                inner.with(|enc| {
619                    push(enc, b"long_inner");
620                    Ok(())
621                })?; // 10 bytes
622                inner.with(|enc| {
623                    push(enc, b"in");
624                    Ok(())
625                })?; // 2 bytes — inner winner
626                drop(inner); // inner done; enc = b"A:in"
627                push(enc, b"!");
628                Ok(())
629            })
630            .unwrap(); // outer candidate A = b"A:in!" (5 bytes)
631
632        // Outer candidate B: shorter overall.
633        outer
634            .with(|enc| {
635                push(enc, b"B");
636                Ok(())
637            })
638            .unwrap(); // 1 byte — winner
639        drop(outer);
640
641        assert_eq!(enc.data, b"B");
642    }
643
644    /// Stack depth tracks nesting level; inner guard drops before outer closure returns.
645    #[test]
646    fn nesting_depth_reflected_in_stack() {
647        let mut enc = Encoder::default();
648
649        assert_eq!(enc.alt_stack.len(), 0);
650        let mut outer = enc.try_alternatives();
651
652        outer
653            .with(|enc| {
654                assert_eq!(enc.alt_stack.len(), 1); // outer level on stack
655                let mut inner = enc.try_alternatives();
656                inner.with(|enc| {
657                    assert_eq!(enc.alt_stack.len(), 2); // both levels on stack
658                    push(enc, b"x");
659                    Ok(())
660                })?;
661                drop(inner); // inner popped
662                assert_eq!(enc.alt_stack.len(), 1);
663                push(enc, b"y");
664                Ok(())
665            })
666            .unwrap();
667
668        drop(outer); // outer popped
669        assert_eq!(enc.alt_stack.len(), 0);
670    }
671
672    // ── meta buffer tracking ──────────────────────────────────────────────
673
674    /// Writes to both `data` and `meta` are rolled back for the losing
675    /// candidate and kept for the winner, measured by total bytes.
676    #[test]
677    fn alternatives_tracks_meta_and_data() {
678        let mut enc = Encoder::default();
679        enc.data.extend_from_slice(b"D");
680        enc.meta.extend_from_slice(b"M");
681
682        let mut alt = enc.try_alternatives();
683        // Candidate A: 4 data + 2 meta = 6 total
684        alt.with(|enc| {
685            push(enc, b"DDDD");
686            enc.meta.extend_from_slice(b"mm");
687            Ok(())
688        })
689        .unwrap();
690        // Candidate B: 1 data + 1 meta = 2 total — winner
691        alt.with(|enc| {
692            push(enc, b"d");
693            enc.meta.extend_from_slice(b"n");
694            Ok(())
695        })
696        .unwrap();
697        drop(alt);
698
699        assert_eq!(enc.data, b"Dd");
700        assert_eq!(enc.meta, b"Mn");
701    }
702
703    // ── error rollback ────────────────────────────────────────────────────
704
705    /// A failing candidate is rolled back; prior best is preserved.
706    #[test]
707    fn error_candidate_is_rolled_back() {
708        let mut enc = Encoder::default();
709
710        let mut alt = enc.try_alternatives();
711        alt.with(|enc| {
712            push(enc, b"ok");
713            Ok(())
714        })
715        .unwrap();
716        let _ = alt.with(|enc| {
717            push(enc, b"partial");
718            Err(MltError::IntegerOverflow) // simulated failure
719        });
720        drop(alt);
721
722        assert_eq!(enc.data, b"ok"); // "partial" was rolled back; "ok" kept
723    }
724}