regex_automata/dfa/
sparse.rs

1/*!
2Types and routines specific to sparse DFAs.
3
4This module is the home of [`sparse::DFA`](DFA).
5
6Unlike the [`dense`] module, this module does not contain a builder or
7configuration specific for sparse DFAs. Instead, the intended way to build a
8sparse DFA is either by using a default configuration with its constructor
9[`sparse::DFA::new`](DFA::new), or by first configuring the construction of a
10dense DFA with [`dense::Builder`] and then calling [`dense::DFA::to_sparse`].
11For example, this configures a sparse DFA to do an overlapping search:
12
13```
14use regex_automata::{
15    dfa::{Automaton, OverlappingState, dense},
16    HalfMatch, Input, MatchKind,
17};
18
19let dense_re = dense::Builder::new()
20    .configure(dense::Config::new().match_kind(MatchKind::All))
21    .build(r"Samwise|Sam")?;
22let sparse_re = dense_re.to_sparse()?;
23
24// Setup our haystack and initial start state.
25let input = Input::new("Samwise");
26let mut state = OverlappingState::start();
27
28// First, 'Sam' will match.
29sparse_re.try_search_overlapping_fwd(&input, &mut state)?;
30assert_eq!(Some(HalfMatch::must(0, 3)), state.get_match());
31
32// And now 'Samwise' will match.
33sparse_re.try_search_overlapping_fwd(&input, &mut state)?;
34assert_eq!(Some(HalfMatch::must(0, 7)), state.get_match());
35# Ok::<(), Box<dyn std::error::Error>>(())
36```
37*/
38
39#[cfg(feature = "dfa-build")]
40use core::iter;
41use core::{fmt, mem::size_of};
42
43#[cfg(feature = "dfa-build")]
44use alloc::{vec, vec::Vec};
45
46#[cfg(feature = "dfa-build")]
47use crate::dfa::dense::{self, BuildError};
48use crate::{
49    dfa::{
50        automaton::{fmt_state_indicator, Automaton, StartError},
51        dense::Flags,
52        special::Special,
53        StartKind, DEAD,
54    },
55    util::{
56        alphabet::{ByteClasses, ByteSet},
57        escape::DebugByte,
58        int::{Pointer, Usize, U16, U32},
59        prefilter::Prefilter,
60        primitives::{PatternID, StateID},
61        search::Anchored,
62        start::{self, Start, StartByteMap},
63        wire::{self, DeserializeError, Endian, SerializeError},
64    },
65};
66
67const LABEL: &str = "rust-regex-automata-dfa-sparse";
68const VERSION: u32 = 2;
69
70/// A sparse deterministic finite automaton (DFA) with variable sized states.
71///
72/// In contrast to a [dense::DFA], a sparse DFA uses a more space efficient
73/// representation for its transitions. Consequently, sparse DFAs may use much
74/// less memory than dense DFAs, but this comes at a price. In particular,
75/// reading the more space efficient transitions takes more work, and
76/// consequently, searching using a sparse DFA is typically slower than a dense
77/// DFA.
78///
79/// A sparse DFA can be built using the default configuration via the
80/// [`DFA::new`] constructor. Otherwise, one can configure various aspects of a
81/// dense DFA via [`dense::Builder`], and then convert a dense DFA to a sparse
82/// DFA using [`dense::DFA::to_sparse`].
83///
84/// In general, a sparse DFA supports all the same search operations as a dense
85/// DFA.
86///
87/// Making the choice between a dense and sparse DFA depends on your specific
88/// work load. If you can sacrifice a bit of search time performance, then a
89/// sparse DFA might be the best choice. In particular, while sparse DFAs are
90/// probably always slower than dense DFAs, you may find that they are easily
91/// fast enough for your purposes!
92///
93/// # Type parameters
94///
95/// A `DFA` has one type parameter, `T`, which is used to represent the parts
96/// of a sparse DFA. `T` is typically a `Vec<u8>` or a `&[u8]`.
97///
98/// # The `Automaton` trait
99///
100/// This type implements the [`Automaton`] trait, which means it can be used
101/// for searching. For example:
102///
103/// ```
104/// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
105///
106/// let dfa = DFA::new("foo[0-9]+")?;
107/// let expected = Some(HalfMatch::must(0, 8));
108/// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
109/// # Ok::<(), Box<dyn std::error::Error>>(())
110/// ```
111#[derive(Clone)]
112pub struct DFA<T> {
113    // When compared to a dense DFA, a sparse DFA *looks* a lot simpler
114    // representation-wise. In reality, it is perhaps more complicated. Namely,
115    // in a dense DFA, all information needs to be very cheaply accessible
116    // using only state IDs. In a sparse DFA however, each state uses a
117    // variable amount of space because each state encodes more information
118    // than just its transitions. Each state also includes an accelerator if
119    // one exists, along with the matching pattern IDs if the state is a match
120    // state.
121    //
122    // That is, a lot of the complexity is pushed down into how each state
123    // itself is represented.
124    tt: Transitions<T>,
125    st: StartTable<T>,
126    special: Special,
127    pre: Option<Prefilter>,
128    quitset: ByteSet,
129    flags: Flags,
130}
131
132#[cfg(feature = "dfa-build")]
133impl DFA<Vec<u8>> {
134    /// Parse the given regular expression using a default configuration and
135    /// return the corresponding sparse DFA.
136    ///
137    /// If you want a non-default configuration, then use the
138    /// [`dense::Builder`] to set your own configuration, and then call
139    /// [`dense::DFA::to_sparse`] to create a sparse DFA.
140    ///
141    /// # Example
142    ///
143    /// ```
144    /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input};
145    ///
146    /// let dfa = sparse::DFA::new("foo[0-9]+bar")?;
147    ///
148    /// let expected = Some(HalfMatch::must(0, 11));
149    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?);
150    /// # Ok::<(), Box<dyn std::error::Error>>(())
151    /// ```
152    #[cfg(feature = "syntax")]
153    pub fn new(pattern: &str) -> Result<DFA<Vec<u8>>, BuildError> {
154        dense::Builder::new()
155            .build(pattern)
156            .and_then(|dense| dense.to_sparse())
157    }
158
159    /// Parse the given regular expressions using a default configuration and
160    /// return the corresponding multi-DFA.
161    ///
162    /// If you want a non-default configuration, then use the
163    /// [`dense::Builder`] to set your own configuration, and then call
164    /// [`dense::DFA::to_sparse`] to create a sparse DFA.
165    ///
166    /// # Example
167    ///
168    /// ```
169    /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input};
170    ///
171    /// let dfa = sparse::DFA::new_many(&["[0-9]+", "[a-z]+"])?;
172    /// let expected = Some(HalfMatch::must(1, 3));
173    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?);
174    /// # Ok::<(), Box<dyn std::error::Error>>(())
175    /// ```
176    #[cfg(feature = "syntax")]
177    pub fn new_many<P: AsRef<str>>(
178        patterns: &[P],
179    ) -> Result<DFA<Vec<u8>>, BuildError> {
180        dense::Builder::new()
181            .build_many(patterns)
182            .and_then(|dense| dense.to_sparse())
183    }
184}
185
186#[cfg(feature = "dfa-build")]
187impl DFA<Vec<u8>> {
188    /// Create a new DFA that matches every input.
189    ///
190    /// # Example
191    ///
192    /// ```
193    /// use regex_automata::{
194    ///     dfa::{Automaton, sparse},
195    ///     HalfMatch, Input,
196    /// };
197    ///
198    /// let dfa = sparse::DFA::always_match()?;
199    ///
200    /// let expected = Some(HalfMatch::must(0, 0));
201    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?);
202    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?);
203    /// # Ok::<(), Box<dyn std::error::Error>>(())
204    /// ```
205    pub fn always_match() -> Result<DFA<Vec<u8>>, BuildError> {
206        dense::DFA::always_match()?.to_sparse()
207    }
208
209    /// Create a new sparse DFA that never matches any input.
210    ///
211    /// # Example
212    ///
213    /// ```
214    /// use regex_automata::{dfa::{Automaton, sparse}, Input};
215    ///
216    /// let dfa = sparse::DFA::never_match()?;
217    /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?);
218    /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?);
219    /// # Ok::<(), Box<dyn std::error::Error>>(())
220    /// ```
221    pub fn never_match() -> Result<DFA<Vec<u8>>, BuildError> {
222        dense::DFA::never_match()?.to_sparse()
223    }
224
225    /// The implementation for constructing a sparse DFA from a dense DFA.
226    pub(crate) fn from_dense<T: AsRef<[u32]>>(
227        dfa: &dense::DFA<T>,
228    ) -> Result<DFA<Vec<u8>>, BuildError> {
229        // In order to build the transition table, we need to be able to write
230        // state identifiers for each of the "next" transitions in each state.
231        // Our state identifiers correspond to the byte offset in the
232        // transition table at which the state is encoded. Therefore, we do not
233        // actually know what the state identifiers are until we've allocated
234        // exactly as much space as we need for each state. Thus, construction
235        // of the transition table happens in two passes.
236        //
237        // In the first pass, we fill out the shell of each state, which
238        // includes the transition length, the input byte ranges and
239        // zero-filled space for the transitions and accelerators, if present.
240        // In this first pass, we also build up a map from the state identifier
241        // index of the dense DFA to the state identifier in this sparse DFA.
242        //
243        // In the second pass, we fill in the transitions based on the map
244        // built in the first pass.
245
246        // The capacity given here reflects a minimum. (Well, the true minimum
247        // is likely even bigger, but hopefully this saves a few reallocs.)
248        let mut sparse = Vec::with_capacity(StateID::SIZE * dfa.state_len());
249        // This maps state indices from the dense DFA to StateIDs in the sparse
250        // DFA. We build out this map on the first pass, and then use it in the
251        // second pass to back-fill our transitions.
252        let mut remap: Vec<StateID> = vec![DEAD; dfa.state_len()];
253        for state in dfa.states() {
254            let pos = sparse.len();
255
256            remap[dfa.to_index(state.id())] = StateID::new(pos)
257                .map_err(|_| BuildError::too_many_states())?;
258            // zero-filled space for the transition length
259            sparse.push(0);
260            sparse.push(0);
261
262            let mut transition_len = 0;
263            for (unit1, unit2, _) in state.sparse_transitions() {
264                match (unit1.as_u8(), unit2.as_u8()) {
265                    (Some(b1), Some(b2)) => {
266                        transition_len += 1;
267                        sparse.push(b1);
268                        sparse.push(b2);
269                    }
270                    (None, None) => {}
271                    (Some(_), None) | (None, Some(_)) => {
272                        // can never occur because sparse_transitions never
273                        // groups EOI with any other transition.
274                        unreachable!()
275                    }
276                }
277            }
278            // Add dummy EOI transition. This is never actually read while
279            // searching, but having space equivalent to the total number
280            // of transitions is convenient. Otherwise, we'd need to track
281            // a different number of transitions for the byte ranges as for
282            // the 'next' states.
283            //
284            // N.B. The loop above is not guaranteed to yield the EOI
285            // transition, since it may point to a DEAD state. By putting
286            // it here, we always write the EOI transition, and thus
287            // guarantee that our transition length is >0. Why do we always
288            // need the EOI transition? Because in order to implement
289            // Automaton::next_eoi_state, this lets us just ask for the last
290            // transition. There are probably other/better ways to do this.
291            transition_len += 1;
292            sparse.push(0);
293            sparse.push(0);
294
295            // Check some assumptions about transition length.
296            assert_ne!(
297                transition_len, 0,
298                "transition length should be non-zero",
299            );
300            assert!(
301                transition_len <= 257,
302                "expected transition length {transition_len} to be <= 257",
303            );
304
305            // Fill in the transition length.
306            // Since transition length is always <= 257, we use the most
307            // significant bit to indicate whether this is a match state or
308            // not.
309            let ntrans = if dfa.is_match_state(state.id()) {
310                transition_len | (1 << 15)
311            } else {
312                transition_len
313            };
314            wire::NE::write_u16(ntrans, &mut sparse[pos..]);
315
316            // zero-fill the actual transitions.
317            // Unwraps are OK since transition_length <= 257 and our minimum
318            // support usize size is 16-bits.
319            let zeros = usize::try_from(transition_len)
320                .unwrap()
321                .checked_mul(StateID::SIZE)
322                .unwrap();
323            sparse.extend(iter::repeat(0).take(zeros));
324
325            // If this is a match state, write the pattern IDs matched by this
326            // state.
327            if dfa.is_match_state(state.id()) {
328                let plen = dfa.match_pattern_len(state.id());
329                // Write the actual pattern IDs with a u32 length prefix.
330                // First, zero-fill space.
331                let mut pos = sparse.len();
332                // Unwraps are OK since it's guaranteed that plen <=
333                // PatternID::LIMIT, which is in turn guaranteed to fit into a
334                // u32.
335                let zeros = size_of::<u32>()
336                    .checked_mul(plen)
337                    .unwrap()
338                    .checked_add(size_of::<u32>())
339                    .unwrap();
340                sparse.extend(iter::repeat(0).take(zeros));
341
342                // Now write the length prefix.
343                wire::NE::write_u32(
344                    // Will never fail since u32::MAX is invalid pattern ID.
345                    // Thus, the number of pattern IDs is representable by a
346                    // u32.
347                    plen.try_into().expect("pattern ID length fits in u32"),
348                    &mut sparse[pos..],
349                );
350                pos += size_of::<u32>();
351
352                // Now write the pattern IDs.
353                for &pid in dfa.pattern_id_slice(state.id()) {
354                    pos += wire::write_pattern_id::<wire::NE>(
355                        pid,
356                        &mut sparse[pos..],
357                    );
358                }
359            }
360
361            // And now add the accelerator, if one exists. An accelerator is
362            // at most 4 bytes and at least 1 byte. The first byte is the
363            // length, N. N bytes follow the length. The set of bytes that
364            // follow correspond (exhaustively) to the bytes that must be seen
365            // to leave this state.
366            let accel = dfa.accelerator(state.id());
367            sparse.push(accel.len().try_into().unwrap());
368            sparse.extend_from_slice(accel);
369        }
370
371        let mut new = DFA {
372            tt: Transitions {
373                sparse,
374                classes: dfa.byte_classes().clone(),
375                state_len: dfa.state_len(),
376                pattern_len: dfa.pattern_len(),
377            },
378            st: StartTable::from_dense_dfa(dfa, &remap)?,
379            special: dfa.special().remap(|id| remap[dfa.to_index(id)]),
380            pre: dfa.get_prefilter().map(|p| p.clone()),
381            quitset: dfa.quitset().clone(),
382            flags: dfa.flags().clone(),
383        };
384        // And here's our second pass. Iterate over all of the dense states
385        // again, and update the transitions in each of the states in the
386        // sparse DFA.
387        for old_state in dfa.states() {
388            let new_id = remap[dfa.to_index(old_state.id())];
389            let mut new_state = new.tt.state_mut(new_id);
390            let sparse = old_state.sparse_transitions();
391            for (i, (_, _, next)) in sparse.enumerate() {
392                let next = remap[dfa.to_index(next)];
393                new_state.set_next_at(i, next);
394            }
395        }
396        new.tt.sparse.shrink_to_fit();
397        new.st.table.shrink_to_fit();
398        debug!(
399            "created sparse DFA, memory usage: {} (dense memory usage: {})",
400            new.memory_usage(),
401            dfa.memory_usage(),
402        );
403        Ok(new)
404    }
405}
406
407impl<T: AsRef<[u8]>> DFA<T> {
408    /// Cheaply return a borrowed version of this sparse DFA. Specifically, the
409    /// DFA returned always uses `&[u8]` for its transitions.
410    pub fn as_ref<'a>(&'a self) -> DFA<&'a [u8]> {
411        DFA {
412            tt: self.tt.as_ref(),
413            st: self.st.as_ref(),
414            special: self.special,
415            pre: self.pre.clone(),
416            quitset: self.quitset,
417            flags: self.flags,
418        }
419    }
420
421    /// Return an owned version of this sparse DFA. Specifically, the DFA
422    /// returned always uses `Vec<u8>` for its transitions.
423    ///
424    /// Effectively, this returns a sparse DFA whose transitions live on the
425    /// heap.
426    #[cfg(feature = "alloc")]
427    pub fn to_owned(&self) -> DFA<alloc::vec::Vec<u8>> {
428        DFA {
429            tt: self.tt.to_owned(),
430            st: self.st.to_owned(),
431            special: self.special,
432            pre: self.pre.clone(),
433            quitset: self.quitset,
434            flags: self.flags,
435        }
436    }
437
438    /// Returns the starting state configuration for this DFA.
439    ///
440    /// The default is [`StartKind::Both`], which means the DFA supports both
441    /// unanchored and anchored searches. However, this can generally lead to
442    /// bigger DFAs. Therefore, a DFA might be compiled with support for just
443    /// unanchored or anchored searches. In that case, running a search with
444    /// an unsupported configuration will panic.
445    pub fn start_kind(&self) -> StartKind {
446        self.st.kind
447    }
448
449    /// Returns true only if this DFA has starting states for each pattern.
450    ///
451    /// When a DFA has starting states for each pattern, then a search with the
452    /// DFA can be configured to only look for anchored matches of a specific
453    /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can
454    /// accept a [`Anchored::Pattern`] if and only if this method returns true.
455    /// Otherwise, an error will be returned.
456    ///
457    /// Note that if the DFA is empty, this always returns false.
458    pub fn starts_for_each_pattern(&self) -> bool {
459        self.st.pattern_len.is_some()
460    }
461
462    /// Returns the equivalence classes that make up the alphabet for this DFA.
463    ///
464    /// Unless [`dense::Config::byte_classes`] was disabled, it is possible
465    /// that multiple distinct bytes are grouped into the same equivalence
466    /// class if it is impossible for them to discriminate between a match and
467    /// a non-match. This has the effect of reducing the overall alphabet size
468    /// and in turn potentially substantially reducing the size of the DFA's
469    /// transition table.
470    ///
471    /// The downside of using equivalence classes like this is that every state
472    /// transition will automatically use this map to convert an arbitrary
473    /// byte to its corresponding equivalence class. In practice this has a
474    /// negligible impact on performance.
475    pub fn byte_classes(&self) -> &ByteClasses {
476        &self.tt.classes
477    }
478
479    /// Returns the memory usage, in bytes, of this DFA.
480    ///
481    /// The memory usage is computed based on the number of bytes used to
482    /// represent this DFA.
483    ///
484    /// This does **not** include the stack size used up by this DFA. To
485    /// compute that, use `std::mem::size_of::<sparse::DFA>()`.
486    pub fn memory_usage(&self) -> usize {
487        self.tt.memory_usage() + self.st.memory_usage()
488    }
489}
490
491/// Routines for converting a sparse DFA to other representations, such as raw
492/// bytes suitable for persistent storage.
493impl<T: AsRef<[u8]>> DFA<T> {
494    /// Serialize this DFA as raw bytes to a `Vec<u8>` in little endian
495    /// format.
496    ///
497    /// The written bytes are guaranteed to be deserialized correctly and
498    /// without errors in a semver compatible release of this crate by a
499    /// `DFA`'s deserialization APIs (assuming all other criteria for the
500    /// deserialization APIs has been satisfied):
501    ///
502    /// * [`DFA::from_bytes`]
503    /// * [`DFA::from_bytes_unchecked`]
504    ///
505    /// Note that unlike a [`dense::DFA`]'s serialization methods, this does
506    /// not add any initial padding to the returned bytes. Padding isn't
507    /// required for sparse DFAs since they have no alignment requirements.
508    ///
509    /// # Example
510    ///
511    /// This example shows how to serialize and deserialize a DFA:
512    ///
513    /// ```
514    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
515    ///
516    /// // Compile our original DFA.
517    /// let original_dfa = DFA::new("foo[0-9]+")?;
518    ///
519    /// // N.B. We use native endianness here to make the example work, but
520    /// // using to_bytes_little_endian would work on a little endian target.
521    /// let buf = original_dfa.to_bytes_native_endian();
522    /// // Even if buf has initial padding, DFA::from_bytes will automatically
523    /// // ignore it.
524    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0;
525    ///
526    /// let expected = Some(HalfMatch::must(0, 8));
527    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
528    /// # Ok::<(), Box<dyn std::error::Error>>(())
529    /// ```
530    #[cfg(feature = "dfa-build")]
531    pub fn to_bytes_little_endian(&self) -> Vec<u8> {
532        self.to_bytes::<wire::LE>()
533    }
534
535    /// Serialize this DFA as raw bytes to a `Vec<u8>` in big endian
536    /// format.
537    ///
538    /// The written bytes are guaranteed to be deserialized correctly and
539    /// without errors in a semver compatible release of this crate by a
540    /// `DFA`'s deserialization APIs (assuming all other criteria for the
541    /// deserialization APIs has been satisfied):
542    ///
543    /// * [`DFA::from_bytes`]
544    /// * [`DFA::from_bytes_unchecked`]
545    ///
546    /// Note that unlike a [`dense::DFA`]'s serialization methods, this does
547    /// not add any initial padding to the returned bytes. Padding isn't
548    /// required for sparse DFAs since they have no alignment requirements.
549    ///
550    /// # Example
551    ///
552    /// This example shows how to serialize and deserialize a DFA:
553    ///
554    /// ```
555    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
556    ///
557    /// // Compile our original DFA.
558    /// let original_dfa = DFA::new("foo[0-9]+")?;
559    ///
560    /// // N.B. We use native endianness here to make the example work, but
561    /// // using to_bytes_big_endian would work on a big endian target.
562    /// let buf = original_dfa.to_bytes_native_endian();
563    /// // Even if buf has initial padding, DFA::from_bytes will automatically
564    /// // ignore it.
565    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0;
566    ///
567    /// let expected = Some(HalfMatch::must(0, 8));
568    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
569    /// # Ok::<(), Box<dyn std::error::Error>>(())
570    /// ```
571    #[cfg(feature = "dfa-build")]
572    pub fn to_bytes_big_endian(&self) -> Vec<u8> {
573        self.to_bytes::<wire::BE>()
574    }
575
576    /// Serialize this DFA as raw bytes to a `Vec<u8>` in native endian
577    /// format.
578    ///
579    /// The written bytes are guaranteed to be deserialized correctly and
580    /// without errors in a semver compatible release of this crate by a
581    /// `DFA`'s deserialization APIs (assuming all other criteria for the
582    /// deserialization APIs has been satisfied):
583    ///
584    /// * [`DFA::from_bytes`]
585    /// * [`DFA::from_bytes_unchecked`]
586    ///
587    /// Note that unlike a [`dense::DFA`]'s serialization methods, this does
588    /// not add any initial padding to the returned bytes. Padding isn't
589    /// required for sparse DFAs since they have no alignment requirements.
590    ///
591    /// Generally speaking, native endian format should only be used when
592    /// you know that the target you're compiling the DFA for matches the
593    /// endianness of the target on which you're compiling DFA. For example,
594    /// if serialization and deserialization happen in the same process or on
595    /// the same machine. Otherwise, when serializing a DFA for use in a
596    /// portable environment, you'll almost certainly want to serialize _both_
597    /// a little endian and a big endian version and then load the correct one
598    /// based on the target's configuration.
599    ///
600    /// # Example
601    ///
602    /// This example shows how to serialize and deserialize a DFA:
603    ///
604    /// ```
605    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
606    ///
607    /// // Compile our original DFA.
608    /// let original_dfa = DFA::new("foo[0-9]+")?;
609    ///
610    /// let buf = original_dfa.to_bytes_native_endian();
611    /// // Even if buf has initial padding, DFA::from_bytes will automatically
612    /// // ignore it.
613    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0;
614    ///
615    /// let expected = Some(HalfMatch::must(0, 8));
616    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
617    /// # Ok::<(), Box<dyn std::error::Error>>(())
618    /// ```
619    #[cfg(feature = "dfa-build")]
620    pub fn to_bytes_native_endian(&self) -> Vec<u8> {
621        self.to_bytes::<wire::NE>()
622    }
623
624    /// The implementation of the public `to_bytes` serialization methods,
625    /// which is generic over endianness.
626    #[cfg(feature = "dfa-build")]
627    fn to_bytes<E: Endian>(&self) -> Vec<u8> {
628        let mut buf = vec![0; self.write_to_len()];
629        // This should always succeed since the only possible serialization
630        // error is providing a buffer that's too small, but we've ensured that
631        // `buf` is big enough here.
632        self.write_to::<E>(&mut buf).unwrap();
633        buf
634    }
635
636    /// Serialize this DFA as raw bytes to the given slice, in little endian
637    /// format. Upon success, the total number of bytes written to `dst` is
638    /// returned.
639    ///
640    /// The written bytes are guaranteed to be deserialized correctly and
641    /// without errors in a semver compatible release of this crate by a
642    /// `DFA`'s deserialization APIs (assuming all other criteria for the
643    /// deserialization APIs has been satisfied):
644    ///
645    /// * [`DFA::from_bytes`]
646    /// * [`DFA::from_bytes_unchecked`]
647    ///
648    /// # Errors
649    ///
650    /// This returns an error if the given destination slice is not big enough
651    /// to contain the full serialized DFA. If an error occurs, then nothing
652    /// is written to `dst`.
653    ///
654    /// # Example
655    ///
656    /// This example shows how to serialize and deserialize a DFA without
657    /// dynamic memory allocation.
658    ///
659    /// ```
660    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
661    ///
662    /// // Compile our original DFA.
663    /// let original_dfa = DFA::new("foo[0-9]+")?;
664    ///
665    /// // Create a 4KB buffer on the stack to store our serialized DFA.
666    /// let mut buf = [0u8; 4 * (1<<10)];
667    /// // N.B. We use native endianness here to make the example work, but
668    /// // using write_to_little_endian would work on a little endian target.
669    /// let written = original_dfa.write_to_native_endian(&mut buf)?;
670    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0;
671    ///
672    /// let expected = Some(HalfMatch::must(0, 8));
673    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
674    /// # Ok::<(), Box<dyn std::error::Error>>(())
675    /// ```
676    pub fn write_to_little_endian(
677        &self,
678        dst: &mut [u8],
679    ) -> Result<usize, SerializeError> {
680        self.write_to::<wire::LE>(dst)
681    }
682
683    /// Serialize this DFA as raw bytes to the given slice, in big endian
684    /// format. Upon success, the total number of bytes written to `dst` is
685    /// returned.
686    ///
687    /// The written bytes are guaranteed to be deserialized correctly and
688    /// without errors in a semver compatible release of this crate by a
689    /// `DFA`'s deserialization APIs (assuming all other criteria for the
690    /// deserialization APIs has been satisfied):
691    ///
692    /// * [`DFA::from_bytes`]
693    /// * [`DFA::from_bytes_unchecked`]
694    ///
695    /// # Errors
696    ///
697    /// This returns an error if the given destination slice is not big enough
698    /// to contain the full serialized DFA. If an error occurs, then nothing
699    /// is written to `dst`.
700    ///
701    /// # Example
702    ///
703    /// This example shows how to serialize and deserialize a DFA without
704    /// dynamic memory allocation.
705    ///
706    /// ```
707    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
708    ///
709    /// // Compile our original DFA.
710    /// let original_dfa = DFA::new("foo[0-9]+")?;
711    ///
712    /// // Create a 4KB buffer on the stack to store our serialized DFA.
713    /// let mut buf = [0u8; 4 * (1<<10)];
714    /// // N.B. We use native endianness here to make the example work, but
715    /// // using write_to_big_endian would work on a big endian target.
716    /// let written = original_dfa.write_to_native_endian(&mut buf)?;
717    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0;
718    ///
719    /// let expected = Some(HalfMatch::must(0, 8));
720    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
721    /// # Ok::<(), Box<dyn std::error::Error>>(())
722    /// ```
723    pub fn write_to_big_endian(
724        &self,
725        dst: &mut [u8],
726    ) -> Result<usize, SerializeError> {
727        self.write_to::<wire::BE>(dst)
728    }
729
730    /// Serialize this DFA as raw bytes to the given slice, in native endian
731    /// format. Upon success, the total number of bytes written to `dst` is
732    /// returned.
733    ///
734    /// The written bytes are guaranteed to be deserialized correctly and
735    /// without errors in a semver compatible release of this crate by a
736    /// `DFA`'s deserialization APIs (assuming all other criteria for the
737    /// deserialization APIs has been satisfied):
738    ///
739    /// * [`DFA::from_bytes`]
740    /// * [`DFA::from_bytes_unchecked`]
741    ///
742    /// Generally speaking, native endian format should only be used when
743    /// you know that the target you're compiling the DFA for matches the
744    /// endianness of the target on which you're compiling DFA. For example,
745    /// if serialization and deserialization happen in the same process or on
746    /// the same machine. Otherwise, when serializing a DFA for use in a
747    /// portable environment, you'll almost certainly want to serialize _both_
748    /// a little endian and a big endian version and then load the correct one
749    /// based on the target's configuration.
750    ///
751    /// # Errors
752    ///
753    /// This returns an error if the given destination slice is not big enough
754    /// to contain the full serialized DFA. If an error occurs, then nothing
755    /// is written to `dst`.
756    ///
757    /// # Example
758    ///
759    /// This example shows how to serialize and deserialize a DFA without
760    /// dynamic memory allocation.
761    ///
762    /// ```
763    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
764    ///
765    /// // Compile our original DFA.
766    /// let original_dfa = DFA::new("foo[0-9]+")?;
767    ///
768    /// // Create a 4KB buffer on the stack to store our serialized DFA.
769    /// let mut buf = [0u8; 4 * (1<<10)];
770    /// let written = original_dfa.write_to_native_endian(&mut buf)?;
771    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0;
772    ///
773    /// let expected = Some(HalfMatch::must(0, 8));
774    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
775    /// # Ok::<(), Box<dyn std::error::Error>>(())
776    /// ```
777    pub fn write_to_native_endian(
778        &self,
779        dst: &mut [u8],
780    ) -> Result<usize, SerializeError> {
781        self.write_to::<wire::NE>(dst)
782    }
783
784    /// The implementation of the public `write_to` serialization methods,
785    /// which is generic over endianness.
786    fn write_to<E: Endian>(
787        &self,
788        dst: &mut [u8],
789    ) -> Result<usize, SerializeError> {
790        let mut nw = 0;
791        nw += wire::write_label(LABEL, &mut dst[nw..])?;
792        nw += wire::write_endianness_check::<E>(&mut dst[nw..])?;
793        nw += wire::write_version::<E>(VERSION, &mut dst[nw..])?;
794        nw += {
795            // Currently unused, intended for future flexibility
796            E::write_u32(0, &mut dst[nw..]);
797            size_of::<u32>()
798        };
799        nw += self.flags.write_to::<E>(&mut dst[nw..])?;
800        nw += self.tt.write_to::<E>(&mut dst[nw..])?;
801        nw += self.st.write_to::<E>(&mut dst[nw..])?;
802        nw += self.special.write_to::<E>(&mut dst[nw..])?;
803        nw += self.quitset.write_to::<E>(&mut dst[nw..])?;
804        Ok(nw)
805    }
806
807    /// Return the total number of bytes required to serialize this DFA.
808    ///
809    /// This is useful for determining the size of the buffer required to pass
810    /// to one of the serialization routines:
811    ///
812    /// * [`DFA::write_to_little_endian`]
813    /// * [`DFA::write_to_big_endian`]
814    /// * [`DFA::write_to_native_endian`]
815    ///
816    /// Passing a buffer smaller than the size returned by this method will
817    /// result in a serialization error.
818    ///
819    /// # Example
820    ///
821    /// This example shows how to dynamically allocate enough room to serialize
822    /// a sparse DFA.
823    ///
824    /// ```
825    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
826    ///
827    /// // Compile our original DFA.
828    /// let original_dfa = DFA::new("foo[0-9]+")?;
829    ///
830    /// let mut buf = vec![0; original_dfa.write_to_len()];
831    /// let written = original_dfa.write_to_native_endian(&mut buf)?;
832    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0;
833    ///
834    /// let expected = Some(HalfMatch::must(0, 8));
835    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
836    /// # Ok::<(), Box<dyn std::error::Error>>(())
837    /// ```
838    pub fn write_to_len(&self) -> usize {
839        wire::write_label_len(LABEL)
840        + wire::write_endianness_check_len()
841        + wire::write_version_len()
842        + size_of::<u32>() // unused, intended for future flexibility
843        + self.flags.write_to_len()
844        + self.tt.write_to_len()
845        + self.st.write_to_len()
846        + self.special.write_to_len()
847        + self.quitset.write_to_len()
848    }
849}
850
851impl<'a> DFA<&'a [u8]> {
852    /// Safely deserialize a sparse DFA with a specific state identifier
853    /// representation. Upon success, this returns both the deserialized DFA
854    /// and the number of bytes read from the given slice. Namely, the contents
855    /// of the slice beyond the DFA are not read.
856    ///
857    /// Deserializing a DFA using this routine will never allocate heap memory.
858    /// For safety purposes, the DFA's transitions will be verified such that
859    /// every transition points to a valid state. If this verification is too
860    /// costly, then a [`DFA::from_bytes_unchecked`] API is provided, which
861    /// will always execute in constant time.
862    ///
863    /// The bytes given must be generated by one of the serialization APIs
864    /// of a `DFA` using a semver compatible release of this crate. Those
865    /// include:
866    ///
867    /// * [`DFA::to_bytes_little_endian`]
868    /// * [`DFA::to_bytes_big_endian`]
869    /// * [`DFA::to_bytes_native_endian`]
870    /// * [`DFA::write_to_little_endian`]
871    /// * [`DFA::write_to_big_endian`]
872    /// * [`DFA::write_to_native_endian`]
873    ///
874    /// The `to_bytes` methods allocate and return a `Vec<u8>` for you. The
875    /// `write_to` methods do not allocate and write to an existing slice
876    /// (which may be on the stack). Since deserialization always uses the
877    /// native endianness of the target platform, the serialization API you use
878    /// should match the endianness of the target platform. (It's often a good
879    /// idea to generate serialized DFAs for both forms of endianness and then
880    /// load the correct one based on endianness.)
881    ///
882    /// # Errors
883    ///
884    /// Generally speaking, it's easier to state the conditions in which an
885    /// error is _not_ returned. All of the following must be true:
886    ///
887    /// * The bytes given must be produced by one of the serialization APIs
888    ///   on this DFA, as mentioned above.
889    /// * The endianness of the target platform matches the endianness used to
890    ///   serialized the provided DFA.
891    ///
892    /// If any of the above are not true, then an error will be returned.
893    ///
894    /// Note that unlike deserializing a [`dense::DFA`], deserializing a sparse
895    /// DFA has no alignment requirements. That is, an alignment of `1` is
896    /// valid.
897    ///
898    /// # Panics
899    ///
900    /// This routine will never panic for any input.
901    ///
902    /// # Example
903    ///
904    /// This example shows how to serialize a DFA to raw bytes, deserialize it
905    /// and then use it for searching.
906    ///
907    /// ```
908    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
909    ///
910    /// let initial = DFA::new("foo[0-9]+")?;
911    /// let bytes = initial.to_bytes_native_endian();
912    /// let dfa: DFA<&[u8]> = DFA::from_bytes(&bytes)?.0;
913    ///
914    /// let expected = Some(HalfMatch::must(0, 8));
915    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
916    /// # Ok::<(), Box<dyn std::error::Error>>(())
917    /// ```
918    ///
919    /// # Example: loading a DFA from static memory
920    ///
921    /// One use case this library supports is the ability to serialize a
922    /// DFA to disk and then use `include_bytes!` to store it in a compiled
923    /// Rust program. Those bytes can then be cheaply deserialized into a
924    /// `DFA` structure at runtime and used for searching without having to
925    /// re-compile the DFA (which can be quite costly).
926    ///
927    /// We can show this in two parts. The first part is serializing the DFA to
928    /// a file:
929    ///
930    /// ```no_run
931    /// use regex_automata::dfa::sparse::DFA;
932    ///
933    /// let dfa = DFA::new("foo[0-9]+")?;
934    ///
935    /// // Write a big endian serialized version of this DFA to a file.
936    /// let bytes = dfa.to_bytes_big_endian();
937    /// std::fs::write("foo.bigendian.dfa", &bytes)?;
938    ///
939    /// // Do it again, but this time for little endian.
940    /// let bytes = dfa.to_bytes_little_endian();
941    /// std::fs::write("foo.littleendian.dfa", &bytes)?;
942    /// # Ok::<(), Box<dyn std::error::Error>>(())
943    /// ```
944    ///
945    /// And now the second part is embedding the DFA into the compiled program
946    /// and deserializing it at runtime on first use. We use conditional
947    /// compilation to choose the correct endianness. We do not need to employ
948    /// any special tricks to ensure a proper alignment, since a sparse DFA has
949    /// no alignment requirements.
950    ///
951    /// ```no_run
952    /// use regex_automata::{
953    ///     dfa::{Automaton, sparse::DFA},
954    ///     util::lazy::Lazy,
955    ///     HalfMatch, Input,
956    /// };
957    ///
958    /// // This crate provides its own "lazy" type, kind of like
959    /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc
960    /// // no-std environments and let's us write this using completely
961    /// // safe code.
962    /// static RE: Lazy<DFA<&'static [u8]>> = Lazy::new(|| {
963    ///     # const _: &str = stringify! {
964    ///     #[cfg(target_endian = "big")]
965    ///     static BYTES: &[u8] = include_bytes!("foo.bigendian.dfa");
966    ///     #[cfg(target_endian = "little")]
967    ///     static BYTES: &[u8] = include_bytes!("foo.littleendian.dfa");
968    ///     # };
969    ///     # static BYTES: &[u8] = b"";
970    ///
971    ///     let (dfa, _) = DFA::from_bytes(BYTES)
972    ///         .expect("serialized DFA should be valid");
973    ///     dfa
974    /// });
975    ///
976    /// let expected = Ok(Some(HalfMatch::must(0, 8)));
977    /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345")));
978    /// ```
979    ///
980    /// Alternatively, consider using
981    /// [`lazy_static`](https://crates.io/crates/lazy_static)
982    /// or
983    /// [`once_cell`](https://crates.io/crates/once_cell),
984    /// which will guarantee safety for you.
985    pub fn from_bytes(
986        slice: &'a [u8],
987    ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> {
988        // SAFETY: This is safe because we validate both the sparse transitions
989        // (by trying to decode every state) and start state ID list below. If
990        // either validation fails, then we return an error.
991        let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? };
992        let seen = dfa.tt.validate(&dfa.special)?;
993        dfa.st.validate(&dfa.special, &seen)?;
994        // N.B. dfa.special doesn't have a way to do unchecked deserialization,
995        // so it has already been validated.
996        Ok((dfa, nread))
997    }
998
999    /// Deserialize a DFA with a specific state identifier representation in
1000    /// constant time by omitting the verification of the validity of the
1001    /// sparse transitions.
1002    ///
1003    /// This is just like [`DFA::from_bytes`], except it can potentially return
1004    /// a DFA that exhibits undefined behavior if its transitions contains
1005    /// invalid state identifiers.
1006    ///
1007    /// This routine is useful if you need to deserialize a DFA cheaply and
1008    /// cannot afford the transition validation performed by `from_bytes`.
1009    ///
1010    /// # Safety
1011    ///
1012    /// This routine is not safe because it permits callers to provide
1013    /// arbitrary transitions with possibly incorrect state identifiers. While
1014    /// the various serialization routines will never return an incorrect
1015    /// DFA, there is no guarantee that the bytes provided here are correct.
1016    /// While `from_bytes_unchecked` will still do several forms of basic
1017    /// validation, this routine does not check that the transitions themselves
1018    /// are correct. Given an incorrect transition table, it is possible for
1019    /// the search routines to access out-of-bounds memory because of explicit
1020    /// bounds check elision.
1021    ///
1022    /// # Example
1023    ///
1024    /// ```
1025    /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input};
1026    ///
1027    /// let initial = DFA::new("foo[0-9]+")?;
1028    /// let bytes = initial.to_bytes_native_endian();
1029    /// // SAFETY: This is guaranteed to be safe since the bytes given come
1030    /// // directly from a compatible serialization routine.
1031    /// let dfa: DFA<&[u8]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 };
1032    ///
1033    /// let expected = Some(HalfMatch::must(0, 8));
1034    /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?);
1035    /// # Ok::<(), Box<dyn std::error::Error>>(())
1036    /// ```
1037    pub unsafe fn from_bytes_unchecked(
1038        slice: &'a [u8],
1039    ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> {
1040        let mut nr = 0;
1041
1042        nr += wire::read_label(&slice[nr..], LABEL)?;
1043        nr += wire::read_endianness_check(&slice[nr..])?;
1044        nr += wire::read_version(&slice[nr..], VERSION)?;
1045
1046        let _unused = wire::try_read_u32(&slice[nr..], "unused space")?;
1047        nr += size_of::<u32>();
1048
1049        let (flags, nread) = Flags::from_bytes(&slice[nr..])?;
1050        nr += nread;
1051
1052        let (tt, nread) = Transitions::from_bytes_unchecked(&slice[nr..])?;
1053        nr += nread;
1054
1055        let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?;
1056        nr += nread;
1057
1058        let (special, nread) = Special::from_bytes(&slice[nr..])?;
1059        nr += nread;
1060        if special.max.as_usize() >= tt.sparse().len() {
1061            return Err(DeserializeError::generic(
1062                "max should not be greater than or equal to sparse bytes",
1063            ));
1064        }
1065
1066        let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?;
1067        nr += nread;
1068
1069        // Prefilters don't support serialization, so they're always absent.
1070        let pre = None;
1071        Ok((DFA { tt, st, special, pre, quitset, flags }, nr))
1072    }
1073}
1074
1075/// Other routines that work for all `T`.
1076impl<T> DFA<T> {
1077    /// Set or unset the prefilter attached to this DFA.
1078    ///
1079    /// This is useful when one has deserialized a DFA from `&[u8]`.
1080    /// Deserialization does not currently include prefilters, so if you
1081    /// want prefilter acceleration, you'll need to rebuild it and attach
1082    /// it here.
1083    pub fn set_prefilter(&mut self, prefilter: Option<Prefilter>) {
1084        self.pre = prefilter
1085    }
1086}
1087
1088impl<T: AsRef<[u8]>> fmt::Debug for DFA<T> {
1089    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1090        writeln!(f, "sparse::DFA(")?;
1091        for state in self.tt.states() {
1092            fmt_state_indicator(f, self, state.id())?;
1093            writeln!(f, "{:06?}: {:?}", state.id().as_usize(), state)?;
1094        }
1095        writeln!(f, "")?;
1096        for (i, (start_id, anchored, sty)) in self.st.iter().enumerate() {
1097            if i % self.st.stride == 0 {
1098                match anchored {
1099                    Anchored::No => writeln!(f, "START-GROUP(unanchored)")?,
1100                    Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?,
1101                    Anchored::Pattern(pid) => writeln!(
1102                        f,
1103                        "START_GROUP(pattern: {:?})",
1104                        pid.as_usize()
1105                    )?,
1106                }
1107            }
1108            writeln!(f, "  {:?} => {:06?}", sty, start_id.as_usize())?;
1109        }
1110        writeln!(f, "state length: {:?}", self.tt.state_len)?;
1111        writeln!(f, "pattern length: {:?}", self.pattern_len())?;
1112        writeln!(f, "flags: {:?}", self.flags)?;
1113        writeln!(f, ")")?;
1114        Ok(())
1115    }
1116}
1117
1118// SAFETY: We assert that our implementation of each method is correct.
1119unsafe impl<T: AsRef<[u8]>> Automaton for DFA<T> {
1120    #[inline]
1121    fn is_special_state(&self, id: StateID) -> bool {
1122        self.special.is_special_state(id)
1123    }
1124
1125    #[inline]
1126    fn is_dead_state(&self, id: StateID) -> bool {
1127        self.special.is_dead_state(id)
1128    }
1129
1130    #[inline]
1131    fn is_quit_state(&self, id: StateID) -> bool {
1132        self.special.is_quit_state(id)
1133    }
1134
1135    #[inline]
1136    fn is_match_state(&self, id: StateID) -> bool {
1137        self.special.is_match_state(id)
1138    }
1139
1140    #[inline]
1141    fn is_start_state(&self, id: StateID) -> bool {
1142        self.special.is_start_state(id)
1143    }
1144
1145    #[inline]
1146    fn is_accel_state(&self, id: StateID) -> bool {
1147        self.special.is_accel_state(id)
1148    }
1149
1150    // This is marked as inline to help dramatically boost sparse searching,
1151    // which decodes each state it enters to follow the next transition.
1152    #[cfg_attr(feature = "perf-inline", inline(always))]
1153    fn next_state(&self, current: StateID, input: u8) -> StateID {
1154        let input = self.tt.classes.get(input);
1155        self.tt.state(current).next(input)
1156    }
1157
1158    #[inline]
1159    unsafe fn next_state_unchecked(
1160        &self,
1161        current: StateID,
1162        input: u8,
1163    ) -> StateID {
1164        self.next_state(current, input)
1165    }
1166
1167    #[inline]
1168    fn next_eoi_state(&self, current: StateID) -> StateID {
1169        self.tt.state(current).next_eoi()
1170    }
1171
1172    #[inline]
1173    fn pattern_len(&self) -> usize {
1174        self.tt.pattern_len
1175    }
1176
1177    #[inline]
1178    fn match_len(&self, id: StateID) -> usize {
1179        self.tt.state(id).pattern_len()
1180    }
1181
1182    #[inline]
1183    fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID {
1184        // This is an optimization for the very common case of a DFA with a
1185        // single pattern. This conditional avoids a somewhat more costly path
1186        // that finds the pattern ID from the state machine, which requires
1187        // a bit of slicing/pointer-chasing. This optimization tends to only
1188        // matter when matches are frequent.
1189        if self.tt.pattern_len == 1 {
1190            return PatternID::ZERO;
1191        }
1192        self.tt.state(id).pattern_id(match_index)
1193    }
1194
1195    #[inline]
1196    fn has_empty(&self) -> bool {
1197        self.flags.has_empty
1198    }
1199
1200    #[inline]
1201    fn is_utf8(&self) -> bool {
1202        self.flags.is_utf8
1203    }
1204
1205    #[inline]
1206    fn is_always_start_anchored(&self) -> bool {
1207        self.flags.is_always_start_anchored
1208    }
1209
1210    #[inline]
1211    fn start_state(
1212        &self,
1213        config: &start::Config,
1214    ) -> Result<StateID, StartError> {
1215        let anchored = config.get_anchored();
1216        let start = match config.get_look_behind() {
1217            None => Start::Text,
1218            Some(byte) => {
1219                if !self.quitset.is_empty() && self.quitset.contains(byte) {
1220                    return Err(StartError::quit(byte));
1221                }
1222                self.st.start_map.get(byte)
1223            }
1224        };
1225        self.st.start(anchored, start)
1226    }
1227
1228    #[inline]
1229    fn universal_start_state(&self, mode: Anchored) -> Option<StateID> {
1230        match mode {
1231            Anchored::No => self.st.universal_start_unanchored,
1232            Anchored::Yes => self.st.universal_start_anchored,
1233            Anchored::Pattern(_) => None,
1234        }
1235    }
1236
1237    #[inline]
1238    fn accelerator(&self, id: StateID) -> &[u8] {
1239        self.tt.state(id).accelerator()
1240    }
1241
1242    #[inline]
1243    fn get_prefilter(&self) -> Option<&Prefilter> {
1244        self.pre.as_ref()
1245    }
1246}
1247
1248/// The transition table portion of a sparse DFA.
1249///
1250/// The transition table is the core part of the DFA in that it describes how
1251/// to move from one state to another based on the input sequence observed.
1252///
1253/// Unlike a typical dense table based DFA, states in a sparse transition
1254/// table have variable size. That is, states with more transitions use more
1255/// space than states with fewer transitions. This means that finding the next
1256/// transition takes more work than with a dense DFA, but also typically uses
1257/// much less space.
1258#[derive(Clone)]
1259struct Transitions<T> {
1260    /// The raw encoding of each state in this DFA.
1261    ///
1262    /// Each state has the following information:
1263    ///
1264    /// * A set of transitions to subsequent states. Transitions to the dead
1265    ///   state are omitted.
1266    /// * If the state can be accelerated, then any additional accelerator
1267    ///   information.
1268    /// * If the state is a match state, then the state contains all pattern
1269    ///   IDs that match when in that state.
1270    ///
1271    /// To decode a state, use Transitions::state.
1272    ///
1273    /// In practice, T is either Vec<u8> or &[u8].
1274    sparse: T,
1275    /// A set of equivalence classes, where a single equivalence class
1276    /// represents a set of bytes that never discriminate between a match
1277    /// and a non-match in the DFA. Each equivalence class corresponds to a
1278    /// single character in this DFA's alphabet, where the maximum number of
1279    /// characters is 257 (each possible value of a byte plus the special
1280    /// EOI transition). Consequently, the number of equivalence classes
1281    /// corresponds to the number of transitions for each DFA state. Note
1282    /// though that the *space* used by each DFA state in the transition table
1283    /// may be larger. The total space used by each DFA state is known as the
1284    /// stride and is documented above.
1285    ///
1286    /// The only time the number of equivalence classes is fewer than 257 is
1287    /// if the DFA's kind uses byte classes which is the default. Equivalence
1288    /// classes should generally only be disabled when debugging, so that
1289    /// the transitions themselves aren't obscured. Disabling them has no
1290    /// other benefit, since the equivalence class map is always used while
1291    /// searching. In the vast majority of cases, the number of equivalence
1292    /// classes is substantially smaller than 257, particularly when large
1293    /// Unicode classes aren't used.
1294    ///
1295    /// N.B. Equivalence classes aren't particularly useful in a sparse DFA
1296    /// in the current implementation, since equivalence classes generally tend
1297    /// to correspond to continuous ranges of bytes that map to the same
1298    /// transition. So in a sparse DFA, equivalence classes don't really lead
1299    /// to a space savings. In the future, it would be good to try and remove
1300    /// them from sparse DFAs entirely, but requires a bit of work since sparse
1301    /// DFAs are built from dense DFAs, which are in turn built on top of
1302    /// equivalence classes.
1303    classes: ByteClasses,
1304    /// The total number of states in this DFA. Note that a DFA always has at
1305    /// least one state---the dead state---even the empty DFA. In particular,
1306    /// the dead state always has ID 0 and is correspondingly always the first
1307    /// state. The dead state is never a match state.
1308    state_len: usize,
1309    /// The total number of unique patterns represented by these match states.
1310    pattern_len: usize,
1311}
1312
1313impl<'a> Transitions<&'a [u8]> {
1314    unsafe fn from_bytes_unchecked(
1315        mut slice: &'a [u8],
1316    ) -> Result<(Transitions<&'a [u8]>, usize), DeserializeError> {
1317        let slice_start = slice.as_ptr().as_usize();
1318
1319        let (state_len, nr) =
1320            wire::try_read_u32_as_usize(&slice, "state length")?;
1321        slice = &slice[nr..];
1322
1323        let (pattern_len, nr) =
1324            wire::try_read_u32_as_usize(&slice, "pattern length")?;
1325        slice = &slice[nr..];
1326
1327        let (classes, nr) = ByteClasses::from_bytes(&slice)?;
1328        slice = &slice[nr..];
1329
1330        let (len, nr) =
1331            wire::try_read_u32_as_usize(&slice, "sparse transitions length")?;
1332        slice = &slice[nr..];
1333
1334        wire::check_slice_len(slice, len, "sparse states byte length")?;
1335        let sparse = &slice[..len];
1336        slice = &slice[len..];
1337
1338        let trans = Transitions { sparse, classes, state_len, pattern_len };
1339        Ok((trans, slice.as_ptr().as_usize() - slice_start))
1340    }
1341}
1342
1343impl<T: AsRef<[u8]>> Transitions<T> {
1344    /// Writes a serialized form of this transition table to the buffer given.
1345    /// If the buffer is too small, then an error is returned. To determine
1346    /// how big the buffer must be, use `write_to_len`.
1347    fn write_to<E: Endian>(
1348        &self,
1349        mut dst: &mut [u8],
1350    ) -> Result<usize, SerializeError> {
1351        let nwrite = self.write_to_len();
1352        if dst.len() < nwrite {
1353            return Err(SerializeError::buffer_too_small(
1354                "sparse transition table",
1355            ));
1356        }
1357        dst = &mut dst[..nwrite];
1358
1359        // write state length
1360        E::write_u32(u32::try_from(self.state_len).unwrap(), dst);
1361        dst = &mut dst[size_of::<u32>()..];
1362
1363        // write pattern length
1364        E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst);
1365        dst = &mut dst[size_of::<u32>()..];
1366
1367        // write byte class map
1368        let n = self.classes.write_to(dst)?;
1369        dst = &mut dst[n..];
1370
1371        // write number of bytes in sparse transitions
1372        E::write_u32(u32::try_from(self.sparse().len()).unwrap(), dst);
1373        dst = &mut dst[size_of::<u32>()..];
1374
1375        // write actual transitions
1376        let mut id = DEAD;
1377        while id.as_usize() < self.sparse().len() {
1378            let state = self.state(id);
1379            let n = state.write_to::<E>(&mut dst)?;
1380            dst = &mut dst[n..];
1381            // The next ID is the offset immediately following `state`.
1382            id = StateID::new(id.as_usize() + state.write_to_len()).unwrap();
1383        }
1384        Ok(nwrite)
1385    }
1386
1387    /// Returns the number of bytes the serialized form of this transition
1388    /// table will use.
1389    fn write_to_len(&self) -> usize {
1390        size_of::<u32>()   // state length
1391        + size_of::<u32>() // pattern length
1392        + self.classes.write_to_len()
1393        + size_of::<u32>() // sparse transitions length
1394        + self.sparse().len()
1395    }
1396
1397    /// Validates that every state ID in this transition table is valid.
1398    ///
1399    /// That is, every state ID can be used to correctly index a state in this
1400    /// table.
1401    fn validate(&self, sp: &Special) -> Result<Seen, DeserializeError> {
1402        let mut verified = Seen::new();
1403        // We need to make sure that we decode the correct number of states.
1404        // Otherwise, an empty set of transitions would validate even if the
1405        // recorded state length is non-empty.
1406        let mut len = 0;
1407        // We can't use the self.states() iterator because it assumes the state
1408        // encodings are valid. It could panic if they aren't.
1409        let mut id = DEAD;
1410        while id.as_usize() < self.sparse().len() {
1411            // Before we even decode the state, we check that the ID itself
1412            // is well formed. That is, if it's a special state then it must
1413            // actually be a quit, dead, accel, match or start state.
1414            if sp.is_special_state(id) {
1415                let is_actually_special = sp.is_dead_state(id)
1416                    || sp.is_quit_state(id)
1417                    || sp.is_match_state(id)
1418                    || sp.is_start_state(id)
1419                    || sp.is_accel_state(id);
1420                if !is_actually_special {
1421                    // This is kind of a cryptic error message...
1422                    return Err(DeserializeError::generic(
1423                        "found sparse state tagged as special but \
1424                         wasn't actually special",
1425                    ));
1426                }
1427            }
1428            let state = self.try_state(sp, id)?;
1429            verified.insert(id);
1430            // The next ID should be the offset immediately following `state`.
1431            id = StateID::new(wire::add(
1432                id.as_usize(),
1433                state.write_to_len(),
1434                "next state ID offset",
1435            )?)
1436            .map_err(|err| {
1437                DeserializeError::state_id_error(err, "next state ID offset")
1438            })?;
1439            len += 1;
1440        }
1441        // Now that we've checked that all top-level states are correct and
1442        // importantly, collected a set of valid state IDs, we have all the
1443        // information we need to check that all transitions are correct too.
1444        //
1445        // Note that we can't use `valid_ids` to iterate because it will
1446        // be empty in no-std no-alloc contexts. (And yes, that means our
1447        // verification isn't quite as good.) We can use `self.states()`
1448        // though at least, since we know that all states can at least be
1449        // decoded and traversed correctly.
1450        for state in self.states() {
1451            // Check that all transitions in this state are correct.
1452            for i in 0..state.ntrans {
1453                let to = state.next_at(i);
1454                // For no-alloc, we just check that the state can decode. It is
1455                // technically possible that the state ID could still point to
1456                // a non-existent state even if it decodes (fuzzing proved this
1457                // to be true), but it shouldn't result in any memory unsafety
1458                // or panics in non-debug mode.
1459                #[cfg(not(feature = "alloc"))]
1460                {
1461                    let _ = self.try_state(sp, to)?;
1462                }
1463                #[cfg(feature = "alloc")]
1464                {
1465                    if !verified.contains(&to) {
1466                        return Err(DeserializeError::generic(
1467                            "found transition that points to a \
1468                             non-existent state",
1469                        ));
1470                    }
1471                }
1472            }
1473        }
1474        if len != self.state_len {
1475            return Err(DeserializeError::generic(
1476                "mismatching sparse state length",
1477            ));
1478        }
1479        Ok(verified)
1480    }
1481
1482    /// Converts these transitions to a borrowed value.
1483    fn as_ref(&self) -> Transitions<&'_ [u8]> {
1484        Transitions {
1485            sparse: self.sparse(),
1486            classes: self.classes.clone(),
1487            state_len: self.state_len,
1488            pattern_len: self.pattern_len,
1489        }
1490    }
1491
1492    /// Converts these transitions to an owned value.
1493    #[cfg(feature = "alloc")]
1494    fn to_owned(&self) -> Transitions<alloc::vec::Vec<u8>> {
1495        Transitions {
1496            sparse: self.sparse().to_vec(),
1497            classes: self.classes.clone(),
1498            state_len: self.state_len,
1499            pattern_len: self.pattern_len,
1500        }
1501    }
1502
1503    /// Return a convenient representation of the given state.
1504    ///
1505    /// This panics if the state is invalid.
1506    ///
1507    /// This is marked as inline to help dramatically boost sparse searching,
1508    /// which decodes each state it enters to follow the next transition. Other
1509    /// functions involved are also inlined, which should hopefully eliminate
1510    /// a lot of the extraneous decoding that is never needed just to follow
1511    /// the next transition.
1512    #[cfg_attr(feature = "perf-inline", inline(always))]
1513    fn state(&self, id: StateID) -> State<'_> {
1514        let mut state = &self.sparse()[id.as_usize()..];
1515        let mut ntrans = wire::read_u16(&state).as_usize();
1516        let is_match = (1 << 15) & ntrans != 0;
1517        ntrans &= !(1 << 15);
1518        state = &state[2..];
1519
1520        let (input_ranges, state) = state.split_at(ntrans * 2);
1521        let (next, state) = state.split_at(ntrans * StateID::SIZE);
1522        let (pattern_ids, state) = if is_match {
1523            let npats = wire::read_u32(&state).as_usize();
1524            state[4..].split_at(npats * 4)
1525        } else {
1526            (&[][..], state)
1527        };
1528
1529        let accel_len = usize::from(state[0]);
1530        let accel = &state[1..accel_len + 1];
1531        State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel }
1532    }
1533
1534    /// Like `state`, but will return an error if the state encoding is
1535    /// invalid. This is useful for verifying states after deserialization,
1536    /// which is required for a safe deserialization API.
1537    ///
1538    /// Note that this only verifies that this state is decodable and that
1539    /// all of its data is consistent. It does not verify that its state ID
1540    /// transitions point to valid states themselves, nor does it verify that
1541    /// every pattern ID is valid.
1542    fn try_state(
1543        &self,
1544        sp: &Special,
1545        id: StateID,
1546    ) -> Result<State<'_>, DeserializeError> {
1547        if id.as_usize() > self.sparse().len() {
1548            return Err(DeserializeError::generic(
1549                "invalid caller provided sparse state ID",
1550            ));
1551        }
1552        let mut state = &self.sparse()[id.as_usize()..];
1553        // Encoding format starts with a u16 that stores the total number of
1554        // transitions in this state.
1555        let (mut ntrans, _) =
1556            wire::try_read_u16_as_usize(state, "state transition length")?;
1557        let is_match = ((1 << 15) & ntrans) != 0;
1558        ntrans &= !(1 << 15);
1559        state = &state[2..];
1560        if ntrans > 257 || ntrans == 0 {
1561            return Err(DeserializeError::generic(
1562                "invalid transition length",
1563            ));
1564        }
1565        if is_match && !sp.is_match_state(id) {
1566            return Err(DeserializeError::generic(
1567                "state marked as match but not in match ID range",
1568            ));
1569        } else if !is_match && sp.is_match_state(id) {
1570            return Err(DeserializeError::generic(
1571                "state in match ID range but not marked as match state",
1572            ));
1573        }
1574
1575        // Each transition has two pieces: an inclusive range of bytes on which
1576        // it is defined, and the state ID that those bytes transition to. The
1577        // pairs come first, followed by a corresponding sequence of state IDs.
1578        let input_ranges_len = ntrans.checked_mul(2).unwrap();
1579        wire::check_slice_len(state, input_ranges_len, "sparse byte pairs")?;
1580        let (input_ranges, state) = state.split_at(input_ranges_len);
1581        // Every range should be of the form A-B, where A<=B.
1582        for pair in input_ranges.chunks(2) {
1583            let (start, end) = (pair[0], pair[1]);
1584            if start > end {
1585                return Err(DeserializeError::generic("invalid input range"));
1586            }
1587        }
1588
1589        // And now extract the corresponding sequence of state IDs. We leave
1590        // this sequence as a &[u8] instead of a &[S] because sparse DFAs do
1591        // not have any alignment requirements.
1592        let next_len = ntrans
1593            .checked_mul(self.id_len())
1594            .expect("state size * #trans should always fit in a usize");
1595        wire::check_slice_len(state, next_len, "sparse trans state IDs")?;
1596        let (next, state) = state.split_at(next_len);
1597        // We can at least verify that every state ID is in bounds.
1598        for idbytes in next.chunks(self.id_len()) {
1599            let (id, _) =
1600                wire::read_state_id(idbytes, "sparse state ID in try_state")?;
1601            wire::check_slice_len(
1602                self.sparse(),
1603                id.as_usize(),
1604                "invalid sparse state ID",
1605            )?;
1606        }
1607
1608        // If this is a match state, then read the pattern IDs for this state.
1609        // Pattern IDs is a u32-length prefixed sequence of native endian
1610        // encoded 32-bit integers.
1611        let (pattern_ids, state) = if is_match {
1612            let (npats, nr) =
1613                wire::try_read_u32_as_usize(state, "pattern ID length")?;
1614            let state = &state[nr..];
1615            if npats == 0 {
1616                return Err(DeserializeError::generic(
1617                    "state marked as a match, but pattern length is zero",
1618                ));
1619            }
1620
1621            let pattern_ids_len =
1622                wire::mul(npats, 4, "sparse pattern ID byte length")?;
1623            wire::check_slice_len(
1624                state,
1625                pattern_ids_len,
1626                "sparse pattern IDs",
1627            )?;
1628            let (pattern_ids, state) = state.split_at(pattern_ids_len);
1629            for patbytes in pattern_ids.chunks(PatternID::SIZE) {
1630                wire::read_pattern_id(
1631                    patbytes,
1632                    "sparse pattern ID in try_state",
1633                )?;
1634            }
1635            (pattern_ids, state)
1636        } else {
1637            (&[][..], state)
1638        };
1639        if is_match && pattern_ids.is_empty() {
1640            return Err(DeserializeError::generic(
1641                "state marked as a match, but has no pattern IDs",
1642            ));
1643        }
1644        if sp.is_match_state(id) && pattern_ids.is_empty() {
1645            return Err(DeserializeError::generic(
1646                "state marked special as a match, but has no pattern IDs",
1647            ));
1648        }
1649        if sp.is_match_state(id) != is_match {
1650            return Err(DeserializeError::generic(
1651                "whether state is a match or not is inconsistent",
1652            ));
1653        }
1654
1655        // Now read this state's accelerator info. The first byte is the length
1656        // of the accelerator, which is typically 0 (for no acceleration) but
1657        // is no bigger than 3. The length indicates the number of bytes that
1658        // follow, where each byte corresponds to a transition out of this
1659        // state.
1660        if state.is_empty() {
1661            return Err(DeserializeError::generic("no accelerator length"));
1662        }
1663        let (accel_len, state) = (usize::from(state[0]), &state[1..]);
1664
1665        if accel_len > 3 {
1666            return Err(DeserializeError::generic(
1667                "sparse invalid accelerator length",
1668            ));
1669        } else if accel_len == 0 && sp.is_accel_state(id) {
1670            return Err(DeserializeError::generic(
1671                "got no accelerators in state, but in accelerator ID range",
1672            ));
1673        } else if accel_len > 0 && !sp.is_accel_state(id) {
1674            return Err(DeserializeError::generic(
1675                "state in accelerator ID range, but has no accelerators",
1676            ));
1677        }
1678
1679        wire::check_slice_len(
1680            state,
1681            accel_len,
1682            "sparse corrupt accelerator length",
1683        )?;
1684        let (accel, _) = (&state[..accel_len], &state[accel_len..]);
1685
1686        let state = State {
1687            id,
1688            is_match,
1689            ntrans,
1690            input_ranges,
1691            next,
1692            pattern_ids,
1693            accel,
1694        };
1695        if sp.is_quit_state(state.next_at(state.ntrans - 1)) {
1696            return Err(DeserializeError::generic(
1697                "state with EOI transition to quit state is illegal",
1698            ));
1699        }
1700        Ok(state)
1701    }
1702
1703    /// Return an iterator over all of the states in this DFA.
1704    ///
1705    /// The iterator returned yields tuples, where the first element is the
1706    /// state ID and the second element is the state itself.
1707    fn states(&self) -> StateIter<'_, T> {
1708        StateIter { trans: self, id: DEAD.as_usize() }
1709    }
1710
1711    /// Returns the sparse transitions as raw bytes.
1712    fn sparse(&self) -> &[u8] {
1713        self.sparse.as_ref()
1714    }
1715
1716    /// Returns the number of bytes represented by a single state ID.
1717    fn id_len(&self) -> usize {
1718        StateID::SIZE
1719    }
1720
1721    /// Return the memory usage, in bytes, of these transitions.
1722    ///
1723    /// This does not include the size of a `Transitions` value itself.
1724    fn memory_usage(&self) -> usize {
1725        self.sparse().len()
1726    }
1727}
1728
1729#[cfg(feature = "dfa-build")]
1730impl<T: AsMut<[u8]>> Transitions<T> {
1731    /// Return a convenient mutable representation of the given state.
1732    /// This panics if the state is invalid.
1733    fn state_mut(&mut self, id: StateID) -> StateMut<'_> {
1734        let mut state = &mut self.sparse_mut()[id.as_usize()..];
1735        let mut ntrans = wire::read_u16(&state).as_usize();
1736        let is_match = (1 << 15) & ntrans != 0;
1737        ntrans &= !(1 << 15);
1738        state = &mut state[2..];
1739
1740        let (input_ranges, state) = state.split_at_mut(ntrans * 2);
1741        let (next, state) = state.split_at_mut(ntrans * StateID::SIZE);
1742        let (pattern_ids, state) = if is_match {
1743            let npats = wire::read_u32(&state).as_usize();
1744            state[4..].split_at_mut(npats * 4)
1745        } else {
1746            (&mut [][..], state)
1747        };
1748
1749        let accel_len = usize::from(state[0]);
1750        let accel = &mut state[1..accel_len + 1];
1751        StateMut {
1752            id,
1753            is_match,
1754            ntrans,
1755            input_ranges,
1756            next,
1757            pattern_ids,
1758            accel,
1759        }
1760    }
1761
1762    /// Returns the sparse transitions as raw mutable bytes.
1763    fn sparse_mut(&mut self) -> &mut [u8] {
1764        self.sparse.as_mut()
1765    }
1766}
1767
1768/// The set of all possible starting states in a DFA.
1769///
1770/// See the eponymous type in the `dense` module for more details. This type
1771/// is very similar to `dense::StartTable`, except that its underlying
1772/// representation is `&[u8]` instead of `&[S]`. (The latter would require
1773/// sparse DFAs to be aligned, which is explicitly something we do not require
1774/// because we don't really need it.)
1775#[derive(Clone)]
1776struct StartTable<T> {
1777    /// The initial start state IDs as a contiguous table of native endian
1778    /// encoded integers, represented by `S`.
1779    ///
1780    /// In practice, T is either Vec<u8> or &[u8] and has no alignment
1781    /// requirements.
1782    ///
1783    /// The first `2 * stride` (currently always 8) entries always correspond
1784    /// to the starts states for the entire DFA, with the first 4 entries being
1785    /// for unanchored searches and the second 4 entries being for anchored
1786    /// searches. To keep things simple, we always use 8 entries even if the
1787    /// `StartKind` is not both.
1788    ///
1789    /// After that, there are `stride * patterns` state IDs, where `patterns`
1790    /// may be zero in the case of a DFA with no patterns or in the case where
1791    /// the DFA was built without enabling starting states for each pattern.
1792    table: T,
1793    /// The starting state configuration supported. When 'both', both
1794    /// unanchored and anchored searches work. When 'unanchored', anchored
1795    /// searches panic. When 'anchored', unanchored searches panic.
1796    kind: StartKind,
1797    /// The start state configuration for every possible byte.
1798    start_map: StartByteMap,
1799    /// The number of starting state IDs per pattern.
1800    stride: usize,
1801    /// The total number of patterns for which starting states are encoded.
1802    /// This is `None` for DFAs that were built without start states for each
1803    /// pattern. Thus, one cannot use this field to say how many patterns
1804    /// are in the DFA in all cases. It is specific to how many patterns are
1805    /// represented in this start table.
1806    pattern_len: Option<usize>,
1807    /// The universal starting state for unanchored searches. This is only
1808    /// present when the DFA supports unanchored searches and when all starting
1809    /// state IDs for an unanchored search are equivalent.
1810    universal_start_unanchored: Option<StateID>,
1811    /// The universal starting state for anchored searches. This is only
1812    /// present when the DFA supports anchored searches and when all starting
1813    /// state IDs for an anchored search are equivalent.
1814    universal_start_anchored: Option<StateID>,
1815}
1816
1817#[cfg(feature = "dfa-build")]
1818impl StartTable<Vec<u8>> {
1819    fn new<T: AsRef<[u32]>>(
1820        dfa: &dense::DFA<T>,
1821        pattern_len: Option<usize>,
1822    ) -> StartTable<Vec<u8>> {
1823        let stride = Start::len();
1824        // This is OK since the only way we're here is if a dense DFA could be
1825        // constructed successfully, which uses the same space.
1826        let len = stride
1827            .checked_mul(pattern_len.unwrap_or(0))
1828            .unwrap()
1829            .checked_add(stride.checked_mul(2).unwrap())
1830            .unwrap()
1831            .checked_mul(StateID::SIZE)
1832            .unwrap();
1833        StartTable {
1834            table: vec![0; len],
1835            kind: dfa.start_kind(),
1836            start_map: dfa.start_map().clone(),
1837            stride,
1838            pattern_len,
1839            universal_start_unanchored: dfa
1840                .universal_start_state(Anchored::No),
1841            universal_start_anchored: dfa.universal_start_state(Anchored::Yes),
1842        }
1843    }
1844
1845    fn from_dense_dfa<T: AsRef<[u32]>>(
1846        dfa: &dense::DFA<T>,
1847        remap: &[StateID],
1848    ) -> Result<StartTable<Vec<u8>>, BuildError> {
1849        // Unless the DFA has start states compiled for each pattern, then
1850        // as far as the starting state table is concerned, there are zero
1851        // patterns to account for. It will instead only store starting states
1852        // for the entire DFA.
1853        let start_pattern_len = if dfa.starts_for_each_pattern() {
1854            Some(dfa.pattern_len())
1855        } else {
1856            None
1857        };
1858        let mut sl = StartTable::new(dfa, start_pattern_len);
1859        for (old_start_id, anchored, sty) in dfa.starts() {
1860            let new_start_id = remap[dfa.to_index(old_start_id)];
1861            sl.set_start(anchored, sty, new_start_id);
1862        }
1863        Ok(sl)
1864    }
1865}
1866
1867impl<'a> StartTable<&'a [u8]> {
1868    unsafe fn from_bytes_unchecked(
1869        mut slice: &'a [u8],
1870    ) -> Result<(StartTable<&'a [u8]>, usize), DeserializeError> {
1871        let slice_start = slice.as_ptr().as_usize();
1872
1873        let (kind, nr) = StartKind::from_bytes(slice)?;
1874        slice = &slice[nr..];
1875
1876        let (start_map, nr) = StartByteMap::from_bytes(slice)?;
1877        slice = &slice[nr..];
1878
1879        let (stride, nr) =
1880            wire::try_read_u32_as_usize(slice, "sparse start table stride")?;
1881        slice = &slice[nr..];
1882        if stride != Start::len() {
1883            return Err(DeserializeError::generic(
1884                "invalid sparse starting table stride",
1885            ));
1886        }
1887
1888        let (maybe_pattern_len, nr) =
1889            wire::try_read_u32_as_usize(slice, "sparse start table patterns")?;
1890        slice = &slice[nr..];
1891        let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX {
1892            None
1893        } else {
1894            Some(maybe_pattern_len)
1895        };
1896        if pattern_len.map_or(false, |len| len > PatternID::LIMIT) {
1897            return Err(DeserializeError::generic(
1898                "sparse invalid number of patterns",
1899            ));
1900        }
1901
1902        let (universal_unanchored, nr) =
1903            wire::try_read_u32(slice, "universal unanchored start")?;
1904        slice = &slice[nr..];
1905        let universal_start_unanchored = if universal_unanchored == u32::MAX {
1906            None
1907        } else {
1908            Some(StateID::try_from(universal_unanchored).map_err(|e| {
1909                DeserializeError::state_id_error(
1910                    e,
1911                    "universal unanchored start",
1912                )
1913            })?)
1914        };
1915
1916        let (universal_anchored, nr) =
1917            wire::try_read_u32(slice, "universal anchored start")?;
1918        slice = &slice[nr..];
1919        let universal_start_anchored = if universal_anchored == u32::MAX {
1920            None
1921        } else {
1922            Some(StateID::try_from(universal_anchored).map_err(|e| {
1923                DeserializeError::state_id_error(e, "universal anchored start")
1924            })?)
1925        };
1926
1927        let pattern_table_size = wire::mul(
1928            stride,
1929            pattern_len.unwrap_or(0),
1930            "sparse invalid pattern length",
1931        )?;
1932        // Our start states always start with a single stride of start states
1933        // for the entire automaton which permit it to match any pattern. What
1934        // follows it are an optional set of start states for each pattern.
1935        let start_state_len = wire::add(
1936            wire::mul(2, stride, "start state stride too big")?,
1937            pattern_table_size,
1938            "sparse invalid 'any' pattern starts size",
1939        )?;
1940        let table_bytes_len = wire::mul(
1941            start_state_len,
1942            StateID::SIZE,
1943            "sparse pattern table bytes length",
1944        )?;
1945        wire::check_slice_len(
1946            slice,
1947            table_bytes_len,
1948            "sparse start ID table",
1949        )?;
1950        let table = &slice[..table_bytes_len];
1951        slice = &slice[table_bytes_len..];
1952
1953        let sl = StartTable {
1954            table,
1955            kind,
1956            start_map,
1957            stride,
1958            pattern_len,
1959            universal_start_unanchored,
1960            universal_start_anchored,
1961        };
1962        Ok((sl, slice.as_ptr().as_usize() - slice_start))
1963    }
1964}
1965
1966impl<T: AsRef<[u8]>> StartTable<T> {
1967    fn write_to<E: Endian>(
1968        &self,
1969        mut dst: &mut [u8],
1970    ) -> Result<usize, SerializeError> {
1971        let nwrite = self.write_to_len();
1972        if dst.len() < nwrite {
1973            return Err(SerializeError::buffer_too_small(
1974                "sparse starting table ids",
1975            ));
1976        }
1977        dst = &mut dst[..nwrite];
1978
1979        // write start kind
1980        let nw = self.kind.write_to::<E>(dst)?;
1981        dst = &mut dst[nw..];
1982        // write start byte map
1983        let nw = self.start_map.write_to(dst)?;
1984        dst = &mut dst[nw..];
1985        // write stride
1986        E::write_u32(u32::try_from(self.stride).unwrap(), dst);
1987        dst = &mut dst[size_of::<u32>()..];
1988        // write pattern length
1989        E::write_u32(
1990            u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(),
1991            dst,
1992        );
1993        dst = &mut dst[size_of::<u32>()..];
1994        // write universal start unanchored state id, u32::MAX if absent
1995        E::write_u32(
1996            self.universal_start_unanchored
1997                .map_or(u32::MAX, |sid| sid.as_u32()),
1998            dst,
1999        );
2000        dst = &mut dst[size_of::<u32>()..];
2001        // write universal start anchored state id, u32::MAX if absent
2002        E::write_u32(
2003            self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()),
2004            dst,
2005        );
2006        dst = &mut dst[size_of::<u32>()..];
2007        // write start IDs
2008        for (sid, _, _) in self.iter() {
2009            E::write_u32(sid.as_u32(), dst);
2010            dst = &mut dst[StateID::SIZE..];
2011        }
2012        Ok(nwrite)
2013    }
2014
2015    /// Returns the number of bytes the serialized form of this transition
2016    /// table will use.
2017    fn write_to_len(&self) -> usize {
2018        self.kind.write_to_len()
2019        + self.start_map.write_to_len()
2020        + size_of::<u32>() // stride
2021        + size_of::<u32>() // # patterns
2022        + size_of::<u32>() // universal unanchored start
2023        + size_of::<u32>() // universal anchored start
2024        + self.table().len()
2025    }
2026
2027    /// Validates that every starting state ID in this table is valid.
2028    ///
2029    /// That is, every starting state ID can be used to correctly decode a
2030    /// state in the DFA's sparse transitions.
2031    fn validate(
2032        &self,
2033        sp: &Special,
2034        seen: &Seen,
2035    ) -> Result<(), DeserializeError> {
2036        for (id, _, _) in self.iter() {
2037            if !seen.contains(&id) {
2038                return Err(DeserializeError::generic(
2039                    "found invalid start state ID",
2040                ));
2041            }
2042            if sp.is_match_state(id) {
2043                return Err(DeserializeError::generic(
2044                    "start states cannot be match states",
2045                ));
2046            }
2047        }
2048        Ok(())
2049    }
2050
2051    /// Converts this start list to a borrowed value.
2052    fn as_ref(&self) -> StartTable<&'_ [u8]> {
2053        StartTable {
2054            table: self.table(),
2055            kind: self.kind,
2056            start_map: self.start_map.clone(),
2057            stride: self.stride,
2058            pattern_len: self.pattern_len,
2059            universal_start_unanchored: self.universal_start_unanchored,
2060            universal_start_anchored: self.universal_start_anchored,
2061        }
2062    }
2063
2064    /// Converts this start list to an owned value.
2065    #[cfg(feature = "alloc")]
2066    fn to_owned(&self) -> StartTable<alloc::vec::Vec<u8>> {
2067        StartTable {
2068            table: self.table().to_vec(),
2069            kind: self.kind,
2070            start_map: self.start_map.clone(),
2071            stride: self.stride,
2072            pattern_len: self.pattern_len,
2073            universal_start_unanchored: self.universal_start_unanchored,
2074            universal_start_anchored: self.universal_start_anchored,
2075        }
2076    }
2077
2078    /// Return the start state for the given index and pattern ID. If the
2079    /// pattern ID is None, then the corresponding start state for the entire
2080    /// DFA is returned. If the pattern ID is not None, then the corresponding
2081    /// starting state for the given pattern is returned. If this start table
2082    /// does not have individual starting states for each pattern, then this
2083    /// panics.
2084    fn start(
2085        &self,
2086        anchored: Anchored,
2087        start: Start,
2088    ) -> Result<StateID, StartError> {
2089        let start_index = start.as_usize();
2090        let index = match anchored {
2091            Anchored::No => {
2092                if !self.kind.has_unanchored() {
2093                    return Err(StartError::unsupported_anchored(anchored));
2094                }
2095                start_index
2096            }
2097            Anchored::Yes => {
2098                if !self.kind.has_anchored() {
2099                    return Err(StartError::unsupported_anchored(anchored));
2100                }
2101                self.stride + start_index
2102            }
2103            Anchored::Pattern(pid) => {
2104                let len = match self.pattern_len {
2105                    None => {
2106                        return Err(StartError::unsupported_anchored(anchored))
2107                    }
2108                    Some(len) => len,
2109                };
2110                if pid.as_usize() >= len {
2111                    return Ok(DEAD);
2112                }
2113                (2 * self.stride)
2114                    + (self.stride * pid.as_usize())
2115                    + start_index
2116            }
2117        };
2118        let start = index * StateID::SIZE;
2119        // This OK since we're allowed to assume that the start table contains
2120        // valid StateIDs.
2121        Ok(wire::read_state_id_unchecked(&self.table()[start..]).0)
2122    }
2123
2124    /// Return an iterator over all start IDs in this table.
2125    fn iter(&self) -> StartStateIter<'_, T> {
2126        StartStateIter { st: self, i: 0 }
2127    }
2128
2129    /// Returns the total number of start state IDs in this table.
2130    fn len(&self) -> usize {
2131        self.table().len() / StateID::SIZE
2132    }
2133
2134    /// Returns the table as a raw slice of bytes.
2135    fn table(&self) -> &[u8] {
2136        self.table.as_ref()
2137    }
2138
2139    /// Return the memory usage, in bytes, of this start list.
2140    ///
2141    /// This does not include the size of a `StartTable` value itself.
2142    fn memory_usage(&self) -> usize {
2143        self.table().len()
2144    }
2145}
2146
2147#[cfg(feature = "dfa-build")]
2148impl<T: AsMut<[u8]>> StartTable<T> {
2149    /// Set the start state for the given index and pattern.
2150    ///
2151    /// If the pattern ID or state ID are not valid, then this will panic.
2152    fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) {
2153        let start_index = start.as_usize();
2154        let index = match anchored {
2155            Anchored::No => start_index,
2156            Anchored::Yes => self.stride + start_index,
2157            Anchored::Pattern(pid) => {
2158                let pid = pid.as_usize();
2159                let len = self
2160                    .pattern_len
2161                    .expect("start states for each pattern enabled");
2162                assert!(pid < len, "invalid pattern ID {pid:?}");
2163                self.stride
2164                    .checked_mul(pid)
2165                    .unwrap()
2166                    .checked_add(self.stride.checked_mul(2).unwrap())
2167                    .unwrap()
2168                    .checked_add(start_index)
2169                    .unwrap()
2170            }
2171        };
2172        let start = index * StateID::SIZE;
2173        let end = start + StateID::SIZE;
2174        wire::write_state_id::<wire::NE>(
2175            id,
2176            &mut self.table.as_mut()[start..end],
2177        );
2178    }
2179}
2180
2181/// An iterator over all state state IDs in a sparse DFA.
2182struct StartStateIter<'a, T> {
2183    st: &'a StartTable<T>,
2184    i: usize,
2185}
2186
2187impl<'a, T: AsRef<[u8]>> Iterator for StartStateIter<'a, T> {
2188    type Item = (StateID, Anchored, Start);
2189
2190    fn next(&mut self) -> Option<(StateID, Anchored, Start)> {
2191        let i = self.i;
2192        if i >= self.st.len() {
2193            return None;
2194        }
2195        self.i += 1;
2196
2197        // This unwrap is okay since the stride of any DFA must always match
2198        // the number of start state types.
2199        let start_type = Start::from_usize(i % self.st.stride).unwrap();
2200        let anchored = if i < self.st.stride {
2201            Anchored::No
2202        } else if i < (2 * self.st.stride) {
2203            Anchored::Yes
2204        } else {
2205            let pid = (i - (2 * self.st.stride)) / self.st.stride;
2206            Anchored::Pattern(PatternID::new(pid).unwrap())
2207        };
2208        let start = i * StateID::SIZE;
2209        let end = start + StateID::SIZE;
2210        let bytes = self.st.table()[start..end].try_into().unwrap();
2211        // This is OK since we're allowed to assume that any IDs in this start
2212        // table are correct and valid for this DFA.
2213        let id = StateID::from_ne_bytes_unchecked(bytes);
2214        Some((id, anchored, start_type))
2215    }
2216}
2217
2218impl<'a, T> fmt::Debug for StartStateIter<'a, T> {
2219    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2220        f.debug_struct("StartStateIter").field("i", &self.i).finish()
2221    }
2222}
2223
2224/// An iterator over all states in a sparse DFA.
2225///
2226/// This iterator yields tuples, where the first element is the state ID and
2227/// the second element is the state itself.
2228struct StateIter<'a, T> {
2229    trans: &'a Transitions<T>,
2230    id: usize,
2231}
2232
2233impl<'a, T: AsRef<[u8]>> Iterator for StateIter<'a, T> {
2234    type Item = State<'a>;
2235
2236    fn next(&mut self) -> Option<State<'a>> {
2237        if self.id >= self.trans.sparse().len() {
2238            return None;
2239        }
2240        let state = self.trans.state(StateID::new_unchecked(self.id));
2241        self.id = self.id + state.write_to_len();
2242        Some(state)
2243    }
2244}
2245
2246impl<'a, T> fmt::Debug for StateIter<'a, T> {
2247    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2248        f.debug_struct("StateIter").field("id", &self.id).finish()
2249    }
2250}
2251
2252/// A representation of a sparse DFA state that can be cheaply materialized
2253/// from a state identifier.
2254#[derive(Clone)]
2255struct State<'a> {
2256    /// The identifier of this state.
2257    id: StateID,
2258    /// Whether this is a match state or not.
2259    is_match: bool,
2260    /// The number of transitions in this state.
2261    ntrans: usize,
2262    /// Pairs of input ranges, where there is one pair for each transition.
2263    /// Each pair specifies an inclusive start and end byte range for the
2264    /// corresponding transition.
2265    input_ranges: &'a [u8],
2266    /// Transitions to the next state. This slice contains native endian
2267    /// encoded state identifiers, with `S` as the representation. Thus, there
2268    /// are `ntrans * size_of::<S>()` bytes in this slice.
2269    next: &'a [u8],
2270    /// If this is a match state, then this contains the pattern IDs that match
2271    /// when the DFA is in this state.
2272    ///
2273    /// This is a contiguous sequence of 32-bit native endian encoded integers.
2274    pattern_ids: &'a [u8],
2275    /// An accelerator for this state, if present. If this state has no
2276    /// accelerator, then this is an empty slice. When non-empty, this slice
2277    /// has length at most 3 and corresponds to the exhaustive set of bytes
2278    /// that must be seen in order to transition out of this state.
2279    accel: &'a [u8],
2280}
2281
2282impl<'a> State<'a> {
2283    /// Searches for the next transition given an input byte. If no such
2284    /// transition could be found, then a dead state is returned.
2285    ///
2286    /// This is marked as inline to help dramatically boost sparse searching,
2287    /// which decodes each state it enters to follow the next transition.
2288    #[cfg_attr(feature = "perf-inline", inline(always))]
2289    fn next(&self, input: u8) -> StateID {
2290        // This straight linear search was observed to be much better than
2291        // binary search on ASCII haystacks, likely because a binary search
2292        // visits the ASCII case last but a linear search sees it first. A
2293        // binary search does do a little better on non-ASCII haystacks, but
2294        // not by much. There might be a better trade off lurking here.
2295        for i in 0..(self.ntrans - 1) {
2296            let (start, end) = self.range(i);
2297            if start <= input && input <= end {
2298                return self.next_at(i);
2299            }
2300            // We could bail early with an extra branch: if input < b1, then
2301            // we know we'll never find a matching transition. Interestingly,
2302            // this extra branch seems to not help performance, or will even
2303            // hurt it. It's likely very dependent on the DFA itself and what
2304            // is being searched.
2305        }
2306        DEAD
2307    }
2308
2309    /// Returns the next state ID for the special EOI transition.
2310    fn next_eoi(&self) -> StateID {
2311        self.next_at(self.ntrans - 1)
2312    }
2313
2314    /// Returns the identifier for this state.
2315    fn id(&self) -> StateID {
2316        self.id
2317    }
2318
2319    /// Returns the inclusive input byte range for the ith transition in this
2320    /// state.
2321    fn range(&self, i: usize) -> (u8, u8) {
2322        (self.input_ranges[i * 2], self.input_ranges[i * 2 + 1])
2323    }
2324
2325    /// Returns the next state for the ith transition in this state.
2326    fn next_at(&self, i: usize) -> StateID {
2327        let start = i * StateID::SIZE;
2328        let end = start + StateID::SIZE;
2329        let bytes = self.next[start..end].try_into().unwrap();
2330        StateID::from_ne_bytes_unchecked(bytes)
2331    }
2332
2333    /// Returns the pattern ID for the given match index. If the match index
2334    /// is invalid, then this panics.
2335    fn pattern_id(&self, match_index: usize) -> PatternID {
2336        let start = match_index * PatternID::SIZE;
2337        wire::read_pattern_id_unchecked(&self.pattern_ids[start..]).0
2338    }
2339
2340    /// Returns the total number of pattern IDs for this state. This is always
2341    /// zero when `is_match` is false.
2342    fn pattern_len(&self) -> usize {
2343        assert_eq!(0, self.pattern_ids.len() % 4);
2344        self.pattern_ids.len() / 4
2345    }
2346
2347    /// Return an accelerator for this state.
2348    fn accelerator(&self) -> &'a [u8] {
2349        self.accel
2350    }
2351
2352    /// Write the raw representation of this state to the given buffer using
2353    /// the given endianness.
2354    fn write_to<E: Endian>(
2355        &self,
2356        mut dst: &mut [u8],
2357    ) -> Result<usize, SerializeError> {
2358        let nwrite = self.write_to_len();
2359        if dst.len() < nwrite {
2360            return Err(SerializeError::buffer_too_small(
2361                "sparse state transitions",
2362            ));
2363        }
2364
2365        let ntrans =
2366            if self.is_match { self.ntrans | (1 << 15) } else { self.ntrans };
2367        E::write_u16(u16::try_from(ntrans).unwrap(), dst);
2368        dst = &mut dst[size_of::<u16>()..];
2369
2370        dst[..self.input_ranges.len()].copy_from_slice(self.input_ranges);
2371        dst = &mut dst[self.input_ranges.len()..];
2372
2373        for i in 0..self.ntrans {
2374            E::write_u32(self.next_at(i).as_u32(), dst);
2375            dst = &mut dst[StateID::SIZE..];
2376        }
2377
2378        if self.is_match {
2379            E::write_u32(u32::try_from(self.pattern_len()).unwrap(), dst);
2380            dst = &mut dst[size_of::<u32>()..];
2381            for i in 0..self.pattern_len() {
2382                let pid = self.pattern_id(i);
2383                E::write_u32(pid.as_u32(), dst);
2384                dst = &mut dst[PatternID::SIZE..];
2385            }
2386        }
2387
2388        dst[0] = u8::try_from(self.accel.len()).unwrap();
2389        dst[1..][..self.accel.len()].copy_from_slice(self.accel);
2390
2391        Ok(nwrite)
2392    }
2393
2394    /// Return the total number of bytes that this state consumes in its
2395    /// encoded form.
2396    fn write_to_len(&self) -> usize {
2397        let mut len = 2
2398            + (self.ntrans * 2)
2399            + (self.ntrans * StateID::SIZE)
2400            + (1 + self.accel.len());
2401        if self.is_match {
2402            len += size_of::<u32>() + self.pattern_ids.len();
2403        }
2404        len
2405    }
2406}
2407
2408impl<'a> fmt::Debug for State<'a> {
2409    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2410        let mut printed = false;
2411        for i in 0..(self.ntrans - 1) {
2412            let next = self.next_at(i);
2413            if next == DEAD {
2414                continue;
2415            }
2416
2417            if printed {
2418                write!(f, ", ")?;
2419            }
2420            let (start, end) = self.range(i);
2421            if start == end {
2422                write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize())?;
2423            } else {
2424                write!(
2425                    f,
2426                    "{:?}-{:?} => {:?}",
2427                    DebugByte(start),
2428                    DebugByte(end),
2429                    next.as_usize(),
2430                )?;
2431            }
2432            printed = true;
2433        }
2434        let eoi = self.next_at(self.ntrans - 1);
2435        if eoi != DEAD {
2436            if printed {
2437                write!(f, ", ")?;
2438            }
2439            write!(f, "EOI => {:?}", eoi.as_usize())?;
2440        }
2441        Ok(())
2442    }
2443}
2444
2445/// A representation of a mutable sparse DFA state that can be cheaply
2446/// materialized from a state identifier.
2447#[cfg(feature = "dfa-build")]
2448struct StateMut<'a> {
2449    /// The identifier of this state.
2450    id: StateID,
2451    /// Whether this is a match state or not.
2452    is_match: bool,
2453    /// The number of transitions in this state.
2454    ntrans: usize,
2455    /// Pairs of input ranges, where there is one pair for each transition.
2456    /// Each pair specifies an inclusive start and end byte range for the
2457    /// corresponding transition.
2458    input_ranges: &'a mut [u8],
2459    /// Transitions to the next state. This slice contains native endian
2460    /// encoded state identifiers, with `S` as the representation. Thus, there
2461    /// are `ntrans * size_of::<S>()` bytes in this slice.
2462    next: &'a mut [u8],
2463    /// If this is a match state, then this contains the pattern IDs that match
2464    /// when the DFA is in this state.
2465    ///
2466    /// This is a contiguous sequence of 32-bit native endian encoded integers.
2467    pattern_ids: &'a [u8],
2468    /// An accelerator for this state, if present. If this state has no
2469    /// accelerator, then this is an empty slice. When non-empty, this slice
2470    /// has length at most 3 and corresponds to the exhaustive set of bytes
2471    /// that must be seen in order to transition out of this state.
2472    accel: &'a mut [u8],
2473}
2474
2475#[cfg(feature = "dfa-build")]
2476impl<'a> StateMut<'a> {
2477    /// Sets the ith transition to the given state.
2478    fn set_next_at(&mut self, i: usize, next: StateID) {
2479        let start = i * StateID::SIZE;
2480        let end = start + StateID::SIZE;
2481        wire::write_state_id::<wire::NE>(next, &mut self.next[start..end]);
2482    }
2483}
2484
2485#[cfg(feature = "dfa-build")]
2486impl<'a> fmt::Debug for StateMut<'a> {
2487    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2488        let state = State {
2489            id: self.id,
2490            is_match: self.is_match,
2491            ntrans: self.ntrans,
2492            input_ranges: self.input_ranges,
2493            next: self.next,
2494            pattern_ids: self.pattern_ids,
2495            accel: self.accel,
2496        };
2497        fmt::Debug::fmt(&state, f)
2498    }
2499}
2500
2501// In order to validate everything, we not only need to make sure we
2502// can decode every state, but that every transition in every state
2503// points to a valid state. There are many duplicative transitions, so
2504// we record state IDs that we've verified so that we don't redo the
2505// decoding work.
2506//
2507// Except, when in no_std mode, we don't have dynamic memory allocation
2508// available to us, so we skip this optimization. It's not clear
2509// whether doing something more clever is worth it just yet. If you're
2510// profiling this code and need it to run faster, please file an issue.
2511//
2512// OK, so we also use this to record the set of valid state IDs. Since
2513// it is possible for a transition to point to an invalid state ID that
2514// still (somehow) deserializes to a valid state. So we need to make
2515// sure our transitions are limited to actually correct state IDs.
2516// The problem is, I'm not sure how to do this verification step in
2517// no-std no-alloc mode. I think we'd *have* to store the set of valid
2518// state IDs in the DFA itself. For now, we don't do this verification
2519// in no-std no-alloc mode. The worst thing that can happen is an
2520// incorrect result. But no panics or memory safety problems should
2521// result. Because we still do validate that the state itself is
2522// "valid" in the sense that everything it points to actually exists.
2523//
2524// ---AG
2525#[derive(Debug)]
2526struct Seen {
2527    #[cfg(feature = "alloc")]
2528    set: alloc::collections::BTreeSet<StateID>,
2529    #[cfg(not(feature = "alloc"))]
2530    set: core::marker::PhantomData<StateID>,
2531}
2532
2533#[cfg(feature = "alloc")]
2534impl Seen {
2535    fn new() -> Seen {
2536        Seen { set: alloc::collections::BTreeSet::new() }
2537    }
2538    fn insert(&mut self, id: StateID) {
2539        self.set.insert(id);
2540    }
2541    fn contains(&self, id: &StateID) -> bool {
2542        self.set.contains(id)
2543    }
2544}
2545
2546#[cfg(not(feature = "alloc"))]
2547impl Seen {
2548    fn new() -> Seen {
2549        Seen { set: core::marker::PhantomData }
2550    }
2551    fn insert(&mut self, _id: StateID) {}
2552    fn contains(&self, _id: &StateID) -> bool {
2553        true
2554    }
2555}
2556
2557/*
2558/// A binary search routine specialized specifically to a sparse DFA state's
2559/// transitions. Specifically, the transitions are defined as a set of pairs
2560/// of input bytes that delineate an inclusive range of bytes. If the input
2561/// byte is in the range, then the corresponding transition is a match.
2562///
2563/// This binary search accepts a slice of these pairs and returns the position
2564/// of the matching pair (the ith transition), or None if no matching pair
2565/// could be found.
2566///
2567/// Note that this routine is not currently used since it was observed to
2568/// either decrease performance when searching ASCII, or did not provide enough
2569/// of a boost on non-ASCII haystacks to be worth it. However, we leave it here
2570/// for posterity in case we can find a way to use it.
2571///
2572/// In theory, we could use the standard library's search routine if we could
2573/// cast a `&[u8]` to a `&[(u8, u8)]`, but I don't believe this is currently
2574/// guaranteed to be safe and is thus UB (since I don't think the in-memory
2575/// representation of `(u8, u8)` has been nailed down). One could define a
2576/// repr(C) type, but the casting doesn't seem justified.
2577#[cfg_attr(feature = "perf-inline", inline(always))]
2578fn binary_search_ranges(ranges: &[u8], needle: u8) -> Option<usize> {
2579    debug_assert!(ranges.len() % 2 == 0, "ranges must have even length");
2580    debug_assert!(ranges.len() <= 512, "ranges should be short");
2581
2582    let (mut left, mut right) = (0, ranges.len() / 2);
2583    while left < right {
2584        let mid = (left + right) / 2;
2585        let (b1, b2) = (ranges[mid * 2], ranges[mid * 2 + 1]);
2586        if needle < b1 {
2587            right = mid;
2588        } else if needle > b2 {
2589            left = mid + 1;
2590        } else {
2591            return Some(mid);
2592        }
2593    }
2594    None
2595}
2596*/
2597
2598#[cfg(all(test, feature = "syntax", feature = "dfa-build"))]
2599mod tests {
2600    use crate::{
2601        dfa::{dense::DFA, Automaton},
2602        nfa::thompson,
2603        Input, MatchError,
2604    };
2605
2606    // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs.
2607    #[test]
2608    fn heuristic_unicode_forward() {
2609        let dfa = DFA::builder()
2610            .configure(DFA::config().unicode_word_boundary(true))
2611            .thompson(thompson::Config::new().reverse(true))
2612            .build(r"\b[0-9]+\b")
2613            .unwrap()
2614            .to_sparse()
2615            .unwrap();
2616
2617        let input = Input::new("β123").range(2..);
2618        let expected = MatchError::quit(0xB2, 1);
2619        let got = dfa.try_search_fwd(&input);
2620        assert_eq!(Err(expected), got);
2621
2622        let input = Input::new("123β").range(..3);
2623        let expected = MatchError::quit(0xCE, 3);
2624        let got = dfa.try_search_fwd(&input);
2625        assert_eq!(Err(expected), got);
2626    }
2627
2628    // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs.
2629    #[test]
2630    fn heuristic_unicode_reverse() {
2631        let dfa = DFA::builder()
2632            .configure(DFA::config().unicode_word_boundary(true))
2633            .thompson(thompson::Config::new().reverse(true))
2634            .build(r"\b[0-9]+\b")
2635            .unwrap()
2636            .to_sparse()
2637            .unwrap();
2638
2639        let input = Input::new("β123").range(2..);
2640        let expected = MatchError::quit(0xB2, 1);
2641        let got = dfa.try_search_rev(&input);
2642        assert_eq!(Err(expected), got);
2643
2644        let input = Input::new("123β").range(..3);
2645        let expected = MatchError::quit(0xCE, 3);
2646        let got = dfa.try_search_rev(&input);
2647        assert_eq!(Err(expected), got);
2648    }
2649}