blob: 07c13509879704867f73e1345d9533adda27d0f3 [file] [log] [blame]
/*!
Types and routines specific to dense DFAs.
This module is the home of [`dense::DFA`](DFA).
This module also contains a [`dense::Builder`](Builder) and a
[`dense::Config`](Config) for configuring and building a dense DFA.
*/
#[cfg(feature = "alloc")]
use core::cmp;
use core::{convert::TryFrom, fmt, iter, mem::size_of, slice};
#[cfg(feature = "alloc")]
use alloc::{
collections::{BTreeMap, BTreeSet},
vec,
vec::Vec,
};
#[cfg(feature = "alloc")]
use crate::{
dfa::{
accel::Accel, determinize, error::Error, minimize::Minimizer, sparse,
},
nfa::thompson,
util::alphabet::ByteSet,
MatchKind,
};
use crate::{
dfa::{
accel::Accels,
automaton::{fmt_state_indicator, Automaton},
special::Special,
DEAD,
},
util::{
alphabet::{self, ByteClasses},
bytes::{self, DeserializeError, Endian, SerializeError},
id::{PatternID, StateID},
start::Start,
},
};
/// The label that is pre-pended to a serialized DFA.
const LABEL: &str = "rust-regex-automata-dfa-dense";
/// The format version of dense regexes. This version gets incremented when a
/// change occurs. A change may not necessarily be a breaking change, but the
/// version does permit good error messages in the case where a breaking change
/// is made.
const VERSION: u32 = 2;
/// The configuration used for compiling a dense DFA.
///
/// A dense DFA configuration is a simple data object that is typically used
/// with [`dense::Builder::configure`](self::Builder::configure).
///
/// The default configuration guarantees that a search will _never_ return a
/// [`MatchError`](crate::MatchError) for any haystack or pattern. Setting a
/// quit byte with [`Config::quit`] or enabling heuristic support for Unicode
/// word boundaries with [`Config::unicode_word_boundary`] can in turn cause a
/// search to return an error. See the corresponding configuration options for
/// more details on when those error conditions arise.
#[cfg(feature = "alloc")]
#[derive(Clone, Copy, Debug, Default)]
pub struct Config {
// As with other configuration types in this crate, we put all our knobs
// in options so that we can distinguish between "default" and "not set."
// This makes it possible to easily combine multiple configurations
// without default values overwriting explicitly specified values. See the
// 'overwrite' method.
//
// For docs on the fields below, see the corresponding method setters.
anchored: Option<bool>,
accelerate: Option<bool>,
minimize: Option<bool>,
match_kind: Option<MatchKind>,
starts_for_each_pattern: Option<bool>,
byte_classes: Option<bool>,
unicode_word_boundary: Option<bool>,
quit: Option<ByteSet>,
dfa_size_limit: Option<Option<usize>>,
determinize_size_limit: Option<Option<usize>>,
}
#[cfg(feature = "alloc")]
impl Config {
/// Return a new default dense DFA compiler configuration.
pub fn new() -> Config {
Config::default()
}
/// Set whether matching must be anchored at the beginning of the input.
///
/// When enabled, a match must begin at the start of a search. When
/// disabled, the DFA will act as if the pattern started with a `(?s:.)*?`,
/// which enables a match to appear anywhere.
///
/// Note that if you want to run both anchored and unanchored
/// searches without building multiple automatons, you can enable the
/// [`Config::starts_for_each_pattern`] configuration instead. This will
/// permit unanchored any-pattern searches and pattern-specific anchored
/// searches. See the documentation for that configuration for an example.
///
/// By default this is disabled.
///
/// **WARNING:** this is subtly different than using a `^` at the start of
/// your regex. A `^` forces a regex to match exclusively at the start of
/// input, regardless of where you begin your search. In contrast, enabling
/// this option will allow your regex to match anywhere in your input,
/// but the match must start at the beginning of a search. (Most of the
/// higher level convenience search routines make "start of input" and
/// "start of search" equivalent, but some routines allow treating these as
/// orthogonal.)
///
/// For example, consider the haystack `aba` and the following searches:
///
/// 1. The regex `^a` is compiled with `anchored=false` and searches
/// `aba` starting at position `2`. Since `^` requires the match to
/// start at the beginning of the input and `2 > 0`, no match is found.
/// 2. The regex `a` is compiled with `anchored=true` and searches `aba`
/// starting at position `2`. This reports a match at `[2, 3]` since
/// the match starts where the search started. Since there is no `^`,
/// there is no requirement for the match to start at the beginning of
/// the input.
/// 3. The regex `a` is compiled with `anchored=true` and searches `aba`
/// starting at position `1`. Since `b` corresponds to position `1` and
/// since the regex is anchored, it finds no match.
/// 4. The regex `a` is compiled with `anchored=false` and searches `aba`
/// startting at position `1`. Since the regex is neither anchored nor
/// starts with `^`, the regex is compiled with an implicit `(?s:.)*?`
/// prefix that permits it to match anywhere. Thus, it reports a match
/// at `[2, 3]`.
///
/// # Example
///
/// This demonstrates the differences between an anchored search and
/// a pattern that begins with `^` (as described in the above warning
/// message).
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense}, HalfMatch};
///
/// let haystack = "aba".as_bytes();
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().anchored(false)) // default
/// .build(r"^a")?;
/// let got = dfa.find_leftmost_fwd_at(None, None, haystack, 2, 3)?;
/// // No match is found because 2 is not the beginning of the haystack,
/// // which is what ^ requires.
/// let expected = None;
/// assert_eq!(expected, got);
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().anchored(true))
/// .build(r"a")?;
/// let got = dfa.find_leftmost_fwd_at(None, None, haystack, 2, 3)?;
/// // An anchored search can still match anywhere in the haystack, it just
/// // must begin at the start of the search which is '2' in this case.
/// let expected = Some(HalfMatch::must(0, 3));
/// assert_eq!(expected, got);
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().anchored(true))
/// .build(r"a")?;
/// let got = dfa.find_leftmost_fwd_at(None, None, haystack, 1, 3)?;
/// // No match is found since we start searching at offset 1 which
/// // corresponds to 'b'. Since there is no '(?s:.)*?' prefix, no match
/// // is found.
/// let expected = None;
/// assert_eq!(expected, got);
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().anchored(false)) // default
/// .build(r"a")?;
/// let got = dfa.find_leftmost_fwd_at(None, None, haystack, 1, 3)?;
/// // Since anchored=false, an implicit '(?s:.)*?' prefix was added to the
/// // pattern. Even though the search starts at 'b', the 'match anything'
/// // prefix allows the search to match 'a'.
/// let expected = Some(HalfMatch::must(0, 3));
/// assert_eq!(expected, got);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn anchored(mut self, yes: bool) -> Config {
self.anchored = Some(yes);
self
}
/// Enable state acceleration.
///
/// When enabled, DFA construction will analyze each state to determine
/// whether it is eligible for simple acceleration. Acceleration typically
/// occurs when most of a state's transitions loop back to itself, leaving
/// only a select few bytes that will exit the state. When this occurs,
/// other routines like `memchr` can be used to look for those bytes which
/// may be much faster than traversing the DFA.
///
/// Callers may elect to disable this if consistent performance is more
/// desirable than variable performance. Namely, acceleration can sometimes
/// make searching slower than it otherwise would be if the transitions
/// that leave accelerated states are traversed frequently.
///
/// See [`Automaton::accelerator`](crate::dfa::Automaton::accelerator) for
/// an example.
///
/// This is enabled by default.
pub fn accelerate(mut self, yes: bool) -> Config {
self.accelerate = Some(yes);
self
}
/// Minimize the DFA.
///
/// When enabled, the DFA built will be minimized such that it is as small
/// as possible.
///
/// Whether one enables minimization or not depends on the types of costs
/// you're willing to pay and how much you care about its benefits. In
/// particular, minimization has worst case `O(n*k*logn)` time and `O(k*n)`
/// space, where `n` is the number of DFA states and `k` is the alphabet
/// size. In practice, minimization can be quite costly in terms of both
/// space and time, so it should only be done if you're willing to wait
/// longer to produce a DFA. In general, you might want a minimal DFA in
/// the following circumstances:
///
/// 1. You would like to optimize for the size of the automaton. This can
/// manifest in one of two ways. Firstly, if you're converting the
/// DFA into Rust code (or a table embedded in the code), then a minimal
/// DFA will translate into a corresponding reduction in code size, and
/// thus, also the final compiled binary size. Secondly, if you are
/// building many DFAs and putting them on the heap, you'll be able to
/// fit more if they are smaller. Note though that building a minimal
/// DFA itself requires additional space; you only realize the space
/// savings once the minimal DFA is constructed (at which point, the
/// space used for minimization is freed).
/// 2. You've observed that a smaller DFA results in faster match
/// performance. Naively, this isn't guaranteed since there is no
/// inherent difference between matching with a bigger-than-minimal
/// DFA and a minimal DFA. However, a smaller DFA may make use of your
/// CPU's cache more efficiently.
/// 3. You are trying to establish an equivalence between regular
/// languages. The standard method for this is to build a minimal DFA
/// for each language and then compare them. If the DFAs are equivalent
/// (up to state renaming), then the languages are equivalent.
///
/// Typically, minimization only makes sense as an offline process. That
/// is, one might minimize a DFA before serializing it to persistent
/// storage. In practical terms, minimization can take around an order of
/// magnitude more time than compiling the initial DFA via determinization.
///
/// This option is disabled by default.
pub fn minimize(mut self, yes: bool) -> Config {
self.minimize = Some(yes);
self
}
/// Set the desired match semantics.
///
/// The default is [`MatchKind::LeftmostFirst`], which corresponds to the
/// match semantics of Perl-like regex engines. That is, when multiple
/// patterns would match at the same leftmost position, the pattern that
/// appears first in the concrete syntax is chosen.
///
/// Currently, the only other kind of match semantics supported is
/// [`MatchKind::All`]. This corresponds to classical DFA construction
/// where all possible matches are added to the DFA.
///
/// Typically, `All` is used when one wants to execute an overlapping
/// search and `LeftmostFirst` otherwise. In particular, it rarely makes
/// sense to use `All` with the various "leftmost" find routines, since the
/// leftmost routines depend on the `LeftmostFirst` automata construction
/// strategy. Specifically, `LeftmostFirst` adds dead states to the DFA
/// as a way to terminate the search and report a match. `LeftmostFirst`
/// also supports non-greedy matches using this strategy where as `All`
/// does not.
///
/// # Example: overlapping search
///
/// This example shows the typical use of `MatchKind::All`, which is to
/// report overlapping matches.
///
/// ```
/// use regex_automata::{
/// dfa::{Automaton, OverlappingState, dense},
/// HalfMatch, MatchKind,
/// };
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().match_kind(MatchKind::All))
/// .build_many(&[r"\w+$", r"\S+$"])?;
/// let haystack = "@foo".as_bytes();
/// let mut state = OverlappingState::start();
///
/// let expected = Some(HalfMatch::must(1, 4));
/// let got = dfa.find_overlapping_fwd(haystack, &mut state)?;
/// assert_eq!(expected, got);
///
/// // The first pattern also matches at the same position, so re-running
/// // the search will yield another match. Notice also that the first
/// // pattern is returned after the second. This is because the second
/// // pattern begins its match before the first, is therefore an earlier
/// // match and is thus reported first.
/// let expected = Some(HalfMatch::must(0, 4));
/// let got = dfa.find_overlapping_fwd(haystack, &mut state)?;
/// assert_eq!(expected, got);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Example: reverse automaton to find start of match
///
/// Another example for using `MatchKind::All` is for constructing a
/// reverse automaton to find the start of a match. `All` semantics are
/// used for this in order to find the longest possible match, which
/// corresponds to the leftmost starting position.
///
/// Note that if you need the starting position then
/// [`dfa::regex::Regex`](crate::dfa::regex::Regex) will handle this for
/// you, so it's usually not necessary to do this yourself.
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense}, HalfMatch, MatchKind};
///
/// let haystack = "123foobar456".as_bytes();
/// let pattern = r"[a-z]+";
///
/// let dfa_fwd = dense::DFA::new(pattern)?;
/// let dfa_rev = dense::Builder::new()
/// .configure(dense::Config::new()
/// .anchored(true)
/// .match_kind(MatchKind::All)
/// )
/// .build(pattern)?;
/// let expected_fwd = HalfMatch::must(0, 9);
/// let expected_rev = HalfMatch::must(0, 3);
/// let got_fwd = dfa_fwd.find_leftmost_fwd(haystack)?.unwrap();
/// // Here we don't specify the pattern to search for since there's only
/// // one pattern and we're doing a leftmost search. But if this were an
/// // overlapping search, you'd need to specify the pattern that matched
/// // in the forward direction. (Otherwise, you might wind up finding the
/// // starting position of a match of some other pattern.) That in turn
/// // requires building the reverse automaton with starts_for_each_pattern
/// // enabled. Indeed, this is what Regex does internally.
/// let got_rev = dfa_rev.find_leftmost_rev_at(
/// None, haystack, 0, got_fwd.offset(),
/// )?.unwrap();
/// assert_eq!(expected_fwd, got_fwd);
/// assert_eq!(expected_rev, got_rev);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn match_kind(mut self, kind: MatchKind) -> Config {
self.match_kind = Some(kind);
self
}
/// Whether to compile a separate start state for each pattern in the
/// automaton.
///
/// When enabled, a separate **anchored** start state is added for each
/// pattern in the DFA. When this start state is used, then the DFA will
/// only search for matches for the pattern specified, even if there are
/// other patterns in the DFA.
///
/// The main downside of this option is that it can potentially increase
/// the size of the DFA and/or increase the time it takes to build the DFA.
///
/// There are a few reasons one might want to enable this (it's disabled
/// by default):
///
/// 1. When looking for the start of an overlapping match (using a
/// reverse DFA), doing it correctly requires starting the reverse search
/// using the starting state of the pattern that matched in the forward
/// direction. Indeed, when building a [`Regex`](crate::dfa::regex::Regex),
/// it will automatically enable this option when building the reverse DFA
/// internally.
/// 2. When you want to use a DFA with multiple patterns to both search
/// for matches of any pattern or to search for anchored matches of one
/// particular pattern while using the same DFA. (Otherwise, you would need
/// to compile a new DFA for each pattern.)
/// 3. Since the start states added for each pattern are anchored, if you
/// compile an unanchored DFA with one pattern while also enabling this
/// option, then you can use the same DFA to perform anchored or unanchored
/// searches. The latter you get with the standard search APIs. The former
/// you get from the various `_at` search methods that allow you specify a
/// pattern ID to search for.
///
/// By default this is disabled.
///
/// # Example
///
/// This example shows how to use this option to permit the same DFA to
/// run both anchored and unanchored searches for a single pattern.
///
/// ```
/// use regex_automata::{
/// dfa::{Automaton, dense},
/// HalfMatch, PatternID,
/// };
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().starts_for_each_pattern(true))
/// .build(r"foo[0-9]+")?;
/// let haystack = b"quux foo123";
///
/// // Here's a normal unanchored search. Notice that we use 'None' for the
/// // pattern ID. Since the DFA was built as an unanchored machine, it
/// // use its default unanchored starting state.
/// let expected = HalfMatch::must(0, 11);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd_at(
/// None, None, haystack, 0, haystack.len(),
/// )?);
/// // But now if we explicitly specify the pattern to search ('0' being
/// // the only pattern in the DFA), then it will use the starting state
/// // for that specific pattern which is always anchored. Since the
/// // pattern doesn't have a match at the beginning of the haystack, we
/// // find nothing.
/// assert_eq!(None, dfa.find_leftmost_fwd_at(
/// None, Some(PatternID::must(0)), haystack, 0, haystack.len(),
/// )?);
/// // And finally, an anchored search is not the same as putting a '^' at
/// // beginning of the pattern. An anchored search can only match at the
/// // beginning of the *search*, which we can change:
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd_at(
/// None, Some(PatternID::must(0)), haystack, 5, haystack.len(),
/// )?);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn starts_for_each_pattern(mut self, yes: bool) -> Config {
self.starts_for_each_pattern = Some(yes);
self
}
/// Whether to attempt to shrink the size of the DFA's alphabet or not.
///
/// This option is enabled by default and should never be disabled unless
/// one is debugging a generated DFA.
///
/// When enabled, the DFA will use a map from all possible bytes to their
/// corresponding equivalence class. Each equivalence class represents a
/// set of bytes that does not discriminate between a match and a non-match
/// in the DFA. For example, the pattern `[ab]+` has at least two
/// equivalence classes: a set containing `a` and `b` and a set containing
/// every byte except for `a` and `b`. `a` and `b` are in the same
/// equivalence classes because they never discriminate between a match
/// and a non-match.
///
/// The advantage of this map is that the size of the transition table
/// can be reduced drastically from `#states * 256 * sizeof(StateID)` to
/// `#states * k * sizeof(StateID)` where `k` is the number of equivalence
/// classes (rounded up to the nearest power of 2). As a result, total
/// space usage can decrease substantially. Moreover, since a smaller
/// alphabet is used, DFA compilation becomes faster as well.
///
/// **WARNING:** This is only useful for debugging DFAs. Disabling this
/// does not yield any speed advantages. Namely, even when this is
/// disabled, a byte class map is still used while searching. The only
/// difference is that every byte will be forced into its own distinct
/// equivalence class. This is useful for debugging the actual generated
/// transitions because it lets one see the transitions defined on actual
/// bytes instead of the equivalence classes.
pub fn byte_classes(mut self, yes: bool) -> Config {
self.byte_classes = Some(yes);
self
}
/// Heuristically enable Unicode word boundaries.
///
/// When set, this will attempt to implement Unicode word boundaries as if
/// they were ASCII word boundaries. This only works when the search input
/// is ASCII only. If a non-ASCII byte is observed while searching, then a
/// [`MatchError::Quit`](crate::MatchError::Quit) error is returned.
///
/// A possible alternative to enabling this option is to simply use an
/// ASCII word boundary, e.g., via `(?-u:\b)`. The main reason to use this
/// option is if you absolutely need Unicode support. This option lets one
/// use a fast search implementation (a DFA) for some potentially very
/// common cases, while providing the option to fall back to some other
/// regex engine to handle the general case when an error is returned.
///
/// If the pattern provided has no Unicode word boundary in it, then this
/// option has no effect. (That is, quitting on a non-ASCII byte only
/// occurs when this option is enabled _and_ a Unicode word boundary is
/// present in the pattern.)
///
/// This is almost equivalent to setting all non-ASCII bytes to be quit
/// bytes. The only difference is that this will cause non-ASCII bytes to
/// be quit bytes _only_ when a Unicode word boundary is present in the
/// pattern.
///
/// When enabling this option, callers _must_ be prepared to handle
/// a [`MatchError`](crate::MatchError) error during search.
/// When using a [`Regex`](crate::dfa::regex::Regex), this corresponds
/// to using the `try_` suite of methods. Alternatively, if
/// callers can guarantee that their input is ASCII only, then a
/// [`MatchError::Quit`](crate::MatchError::Quit) error will never be
/// returned while searching.
///
/// This is disabled by default.
///
/// # Example
///
/// This example shows how to heuristically enable Unicode word boundaries
/// in a pattern. It also shows what happens when a search comes across a
/// non-ASCII byte.
///
/// ```
/// use regex_automata::{
/// dfa::{Automaton, dense},
/// HalfMatch, MatchError, MatchKind,
/// };
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().unicode_word_boundary(true))
/// .build(r"\b[0-9]+\b")?;
///
/// // The match occurs before the search ever observes the snowman
/// // character, so no error occurs.
/// let haystack = "foo 123 ☃".as_bytes();
/// let expected = Some(HalfMatch::must(0, 7));
/// let got = dfa.find_leftmost_fwd(haystack)?;
/// assert_eq!(expected, got);
///
/// // Notice that this search fails, even though the snowman character
/// // occurs after the ending match offset. This is because search
/// // routines read one byte past the end of the search to account for
/// // look-around, and indeed, this is required here to determine whether
/// // the trailing \b matches.
/// let haystack = "foo 123☃".as_bytes();
/// let expected = MatchError::Quit { byte: 0xE2, offset: 7 };
/// let got = dfa.find_leftmost_fwd(haystack);
/// assert_eq!(Err(expected), got);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn unicode_word_boundary(mut self, yes: bool) -> Config {
// We have a separate option for this instead of just setting the
// appropriate quit bytes here because we don't want to set quit bytes
// for every regex. We only want to set them when the regex contains a
// Unicode word boundary.
self.unicode_word_boundary = Some(yes);
self
}
/// Add a "quit" byte to the DFA.
///
/// When a quit byte is seen during search time, then search will return
/// a [`MatchError::Quit`](crate::MatchError::Quit) error indicating the
/// offset at which the search stopped.
///
/// A quit byte will always overrule any other aspects of a regex. For
/// example, if the `x` byte is added as a quit byte and the regex `\w` is
/// used, then observing `x` will cause the search to quit immediately
/// despite the fact that `x` is in the `\w` class.
///
/// This mechanism is primarily useful for heuristically enabling certain
/// features like Unicode word boundaries in a DFA. Namely, if the input
/// to search is ASCII, then a Unicode word boundary can be implemented
/// via an ASCII word boundary with no change in semantics. Thus, a DFA
/// can attempt to match a Unicode word boundary but give up as soon as it
/// observes a non-ASCII byte. Indeed, if callers set all non-ASCII bytes
/// to be quit bytes, then Unicode word boundaries will be permitted when
/// building DFAs. Of course, callers should enable
/// [`Config::unicode_word_boundary`] if they want this behavior instead.
/// (The advantage being that non-ASCII quit bytes will only be added if a
/// Unicode word boundary is in the pattern.)
///
/// When enabling this option, callers _must_ be prepared to handle a
/// [`MatchError`](crate::MatchError) error during search. When using a
/// [`Regex`](crate::dfa::regex::Regex), this corresponds to using the
/// `try_` suite of methods.
///
/// By default, there are no quit bytes set.
///
/// # Panics
///
/// This panics if heuristic Unicode word boundaries are enabled and any
/// non-ASCII byte is removed from the set of quit bytes. Namely, enabling
/// Unicode word boundaries requires setting every non-ASCII byte to a quit
/// byte. So if the caller attempts to undo any of that, then this will
/// panic.
///
/// # Example
///
/// This example shows how to cause a search to terminate if it sees a
/// `\n` byte. This could be useful if, for example, you wanted to prevent
/// a user supplied pattern from matching across a line boundary.
///
/// ```
/// use regex_automata::{
/// dfa::{Automaton, dense},
/// HalfMatch, MatchError,
/// };
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().quit(b'\n', true))
/// .build(r"foo\p{any}+bar")?;
///
/// let haystack = "foo\nbar".as_bytes();
/// // Normally this would produce a match, since \p{any} contains '\n'.
/// // But since we instructed the automaton to enter a quit state if a
/// // '\n' is observed, this produces a match error instead.
/// let expected = MatchError::Quit { byte: 0x0A, offset: 3 };
/// let got = dfa.find_leftmost_fwd(haystack).unwrap_err();
/// assert_eq!(expected, got);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn quit(mut self, byte: u8, yes: bool) -> Config {
if self.get_unicode_word_boundary() && !byte.is_ascii() && !yes {
panic!(
"cannot set non-ASCII byte to be non-quit when \
Unicode word boundaries are enabled"
);
}
if self.quit.is_none() {
self.quit = Some(ByteSet::empty());
}
if yes {
self.quit.as_mut().unwrap().add(byte);
} else {
self.quit.as_mut().unwrap().remove(byte);
}
self
}
/// Set a size limit on the total heap used by a DFA.
///
/// This size limit is expressed in bytes and is applied during
/// determinization of an NFA into a DFA. If the DFA's heap usage, and only
/// the DFA, exceeds this configured limit, then determinization is stopped
/// and an error is returned.
///
/// This limit does not apply to auxiliary storage used during
/// determinization that isn't part of the generated DFA.
///
/// This limit is only applied during determinization. Currently, there is
/// no way to post-pone this check to after minimization if minimization
/// was enabled.
///
/// The total limit on heap used during determinization is the sum of the
/// DFA and determinization size limits.
///
/// The default is no limit.
///
/// # Example
///
/// This example shows a DFA that fails to build because of a configured
/// size limit. This particular example also serves as a cautionary tale
/// demonstrating just how big DFAs with large Unicode character classes
/// can get.
///
/// ```
/// use regex_automata::dfa::{dense, Automaton};
///
/// // 3MB isn't enough!
/// dense::Builder::new()
/// .configure(dense::Config::new().dfa_size_limit(Some(3_000_000)))
/// .build(r"\w{20}")
/// .unwrap_err();
///
/// // ... but 4MB probably is!
/// // (Note that DFA sizes aren't necessarily stable between releases.)
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().dfa_size_limit(Some(4_000_000)))
/// .build(r"\w{20}")?;
/// let haystack = "A".repeat(20).into_bytes();
/// assert!(dfa.find_leftmost_fwd(&haystack)?.is_some());
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// While one needs a little more than 3MB to represent `\w{20}`, it
/// turns out that you only need a little more than 4KB to represent
/// `(?-u:\w{20})`. So only use Unicode if you need it!
pub fn dfa_size_limit(mut self, bytes: Option<usize>) -> Config {
self.dfa_size_limit = Some(bytes);
self
}
/// Set a size limit on the total heap used by determinization.
///
/// This size limit is expressed in bytes and is applied during
/// determinization of an NFA into a DFA. If the heap used for auxiliary
/// storage during determinization (memory that is not in the DFA but
/// necessary for building the DFA) exceeds this configured limit, then
/// determinization is stopped and an error is returned.
///
/// This limit does not apply to heap used by the DFA itself.
///
/// The total limit on heap used during determinization is the sum of the
/// DFA and determinization size limits.
///
/// The default is no limit.
///
/// # Example
///
/// This example shows a DFA that fails to build because of a
/// configured size limit on the amount of heap space used by
/// determinization. This particular example complements the example for
/// [`Config::dfa_size_limit`] by demonstrating that not only does Unicode
/// potentially make DFAs themselves big, but it also results in more
/// auxiliary storage during determinization. (Although, auxiliary storage
/// is still not as much as the DFA itself.)
///
/// ```
/// use regex_automata::dfa::{dense, Automaton};
///
/// // 300KB isn't enough!
/// dense::Builder::new()
/// .configure(dense::Config::new()
/// .determinize_size_limit(Some(300_000))
/// )
/// .build(r"\w{20}")
/// .unwrap_err();
///
/// // ... but 400KB probably is!
/// // (Note that auxiliary storage sizes aren't necessarily stable between
/// // releases.)
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new()
/// .determinize_size_limit(Some(400_000))
/// )
/// .build(r"\w{20}")?;
/// let haystack = "A".repeat(20).into_bytes();
/// assert!(dfa.find_leftmost_fwd(&haystack)?.is_some());
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn determinize_size_limit(mut self, bytes: Option<usize>) -> Config {
self.determinize_size_limit = Some(bytes);
self
}
/// Returns whether this configuration has enabled anchored searches.
pub fn get_anchored(&self) -> bool {
self.anchored.unwrap_or(false)
}
/// Returns whether this configuration has enabled simple state
/// acceleration.
pub fn get_accelerate(&self) -> bool {
self.accelerate.unwrap_or(true)
}
/// Returns whether this configuration has enabled the expensive process
/// of minimizing a DFA.
pub fn get_minimize(&self) -> bool {
self.minimize.unwrap_or(false)
}
/// Returns the match semantics set in this configuration.
pub fn get_match_kind(&self) -> MatchKind {
self.match_kind.unwrap_or(MatchKind::LeftmostFirst)
}
/// Returns whether this configuration has enabled anchored starting states
/// for every pattern in the DFA.
pub fn get_starts_for_each_pattern(&self) -> bool {
self.starts_for_each_pattern.unwrap_or(false)
}
/// Returns whether this configuration has enabled byte classes or not.
/// This is typically a debugging oriented option, as disabling it confers
/// no speed benefit.
pub fn get_byte_classes(&self) -> bool {
self.byte_classes.unwrap_or(true)
}
/// Returns whether this configuration has enabled heuristic Unicode word
/// boundary support. When enabled, it is possible for a search to return
/// an error.
pub fn get_unicode_word_boundary(&self) -> bool {
self.unicode_word_boundary.unwrap_or(false)
}
/// Returns whether this configuration will instruct the DFA to enter a
/// quit state whenever the given byte is seen during a search. When at
/// least one byte has this enabled, it is possible for a search to return
/// an error.
pub fn get_quit(&self, byte: u8) -> bool {
self.quit.map_or(false, |q| q.contains(byte))
}
/// Returns the DFA size limit of this configuration if one was set.
/// The size limit is total number of bytes on the heap that a DFA is
/// permitted to use. If the DFA exceeds this limit during construction,
/// then construction is stopped and an error is returned.
pub fn get_dfa_size_limit(&self) -> Option<usize> {
self.dfa_size_limit.unwrap_or(None)
}
/// Returns the determinization size limit of this configuration if one
/// was set. The size limit is total number of bytes on the heap that
/// determinization is permitted to use. If determinization exceeds this
/// limit during construction, then construction is stopped and an error is
/// returned.
///
/// This is different from the DFA size limit in that this only applies to
/// the auxiliary storage used during determinization. Once determinization
/// is complete, this memory is freed.
///
/// The limit on the total heap memory used is the sum of the DFA and
/// determinization size limits.
pub fn get_determinize_size_limit(&self) -> Option<usize> {
self.determinize_size_limit.unwrap_or(None)
}
/// Overwrite the default configuration such that the options in `o` are
/// always used. If an option in `o` is not set, then the corresponding
/// option in `self` is used. If it's not set in `self` either, then it
/// remains not set.
pub(crate) fn overwrite(self, o: Config) -> Config {
Config {
anchored: o.anchored.or(self.anchored),
accelerate: o.accelerate.or(self.accelerate),
minimize: o.minimize.or(self.minimize),
match_kind: o.match_kind.or(self.match_kind),
starts_for_each_pattern: o
.starts_for_each_pattern
.or(self.starts_for_each_pattern),
byte_classes: o.byte_classes.or(self.byte_classes),
unicode_word_boundary: o
.unicode_word_boundary
.or(self.unicode_word_boundary),
quit: o.quit.or(self.quit),
dfa_size_limit: o.dfa_size_limit.or(self.dfa_size_limit),
determinize_size_limit: o
.determinize_size_limit
.or(self.determinize_size_limit),
}
}
}
/// A builder for constructing a deterministic finite automaton from regular
/// expressions.
///
/// This builder provides two main things:
///
/// 1. It provides a few different `build` routines for actually constructing
/// a DFA from different kinds of inputs. The most convenient is
/// [`Builder::build`], which builds a DFA directly from a pattern string. The
/// most flexible is [`Builder::build_from_nfa`], which builds a DFA straight
/// from an NFA.
/// 2. The builder permits configuring a number of things.
/// [`Builder::configure`] is used with [`Config`] to configure aspects of
/// the DFA and the construction process itself. [`Builder::syntax`] and
/// [`Builder::thompson`] permit configuring the regex parser and Thompson NFA
/// construction, respectively. The syntax and thompson configurations only
/// apply when building from a pattern string.
///
/// This builder always constructs a *single* DFA. As such, this builder
/// can only be used to construct regexes that either detect the presence
/// of a match or find the end location of a match. A single DFA cannot
/// produce both the start and end of a match. For that information, use a
/// [`Regex`](crate::dfa::regex::Regex), which can be similarly configured
/// using [`regex::Builder`](crate::dfa::regex::Builder). The main reason to
/// use a DFA directly is if the end location of a match is enough for your use
/// case. Namely, a `Regex` will construct two DFAs instead of one, since a
/// second reverse DFA is needed to find the start of a match.
///
/// Note that if one wants to build a sparse DFA, you must first build a dense
/// DFA and convert that to a sparse DFA. There is no way to build a sparse
/// DFA without first building a dense DFA.
///
/// # Example
///
/// This example shows how to build a minimized DFA that completely disables
/// Unicode. That is:
///
/// * Things such as `\w`, `.` and `\b` are no longer Unicode-aware. `\w`
/// and `\b` are ASCII-only while `.` matches any byte except for `\n`
/// (instead of any UTF-8 encoding of a Unicode scalar value except for
/// `\n`). Things that are Unicode only, such as `\pL`, are not allowed.
/// * The pattern itself is permitted to match invalid UTF-8. For example,
/// things like `[^a]` that match any byte except for `a` are permitted.
/// * Unanchored patterns can search through invalid UTF-8. That is, for
/// unanchored patterns, the implicit prefix is `(?s-u:.)*?` instead of
/// `(?s:.)*?`.
///
/// ```
/// use regex_automata::{
/// dfa::{Automaton, dense},
/// nfa::thompson,
/// HalfMatch, SyntaxConfig,
/// };
///
/// let dfa = dense::Builder::new()
/// .configure(dense::Config::new().minimize(false))
/// .syntax(SyntaxConfig::new().unicode(false).utf8(false))
/// .thompson(thompson::Config::new().utf8(false))
/// .build(r"foo[^b]ar.*")?;
///
/// let haystack = b"\xFEfoo\xFFar\xE2\x98\xFF\n";
/// let expected = Some(HalfMatch::must(0, 10));
/// let got = dfa.find_leftmost_fwd(haystack)?;
/// assert_eq!(expected, got);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[cfg(feature = "alloc")]
#[derive(Clone, Debug)]
pub struct Builder {
config: Config,
thompson: thompson::Builder,
}
#[cfg(feature = "alloc")]
impl Builder {
/// Create a new dense DFA builder with the default configuration.
pub fn new() -> Builder {
Builder {
config: Config::default(),
thompson: thompson::Builder::new(),
}
}
/// Build a DFA from the given pattern.
///
/// If there was a problem parsing or compiling the pattern, then an error
/// is returned.
pub fn build(&self, pattern: &str) -> Result<OwnedDFA, Error> {
self.build_many(&[pattern])
}
/// Build a DFA from the given patterns.
///
/// When matches are returned, the pattern ID corresponds to the index of
/// the pattern in the slice given.
pub fn build_many<P: AsRef<str>>(
&self,
patterns: &[P],
) -> Result<OwnedDFA, Error> {
let nfa = self.thompson.build_many(patterns).map_err(Error::nfa)?;
self.build_from_nfa(&nfa)
}
/// Build a DFA from the given NFA.
///
/// # Example
///
/// This example shows how to build a DFA if you already have an NFA in
/// hand.
///
/// ```
/// use regex_automata::{
/// dfa::{Automaton, dense},
/// nfa::thompson,
/// HalfMatch,
/// };
///
/// let haystack = "foo123bar".as_bytes();
///
/// // This shows how to set non-default options for building an NFA.
/// let nfa = thompson::Builder::new()
/// .configure(thompson::Config::new().shrink(false))
/// .build(r"[0-9]+")?;
/// let dfa = dense::Builder::new().build_from_nfa(&nfa)?;
/// let expected = Some(HalfMatch::must(0, 6));
/// let got = dfa.find_leftmost_fwd(haystack)?;
/// assert_eq!(expected, got);
///
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn build_from_nfa(
&self,
nfa: &thompson::NFA,
) -> Result<OwnedDFA, Error> {
let mut quit = self.config.quit.unwrap_or(ByteSet::empty());
if self.config.get_unicode_word_boundary()
&& nfa.has_word_boundary_unicode()
{
for b in 0x80..=0xFF {
quit.add(b);
}
}
let classes = if !self.config.get_byte_classes() {
// DFAs will always use the equivalence class map, but enabling
// this option is useful for debugging. Namely, this will cause all
// transitions to be defined over their actual bytes instead of an
// opaque equivalence class identifier. The former is much easier
// to grok as a human.
ByteClasses::singletons()
} else {
let mut set = nfa.byte_class_set().clone();
// It is important to distinguish any "quit" bytes from all other
// bytes. Otherwise, a non-quit byte may end up in the same class
// as a quit byte, and thus cause the DFA stop when it shouldn't.
if !quit.is_empty() {
set.add_set(&quit);
}
set.byte_classes()
};
let mut dfa = DFA::initial(
classes,
nfa.pattern_len(),
self.config.get_starts_for_each_pattern(),
)?;
determinize::Config::new()
.anchored(self.config.get_anchored())
.match_kind(self.config.get_match_kind())
.quit(quit)
.dfa_size_limit(self.config.get_dfa_size_limit())
.determinize_size_limit(self.config.get_determinize_size_limit())
.run(nfa, &mut dfa)?;
if self.config.get_minimize() {
dfa.minimize();
}
if self.config.get_accelerate() {
dfa.accelerate();
}
Ok(dfa)
}
/// Apply the given dense DFA configuration options to this builder.
pub fn configure(&mut self, config: Config) -> &mut Builder {
self.config = self.config.overwrite(config);
self
}
/// Set the syntax configuration for this builder using
/// [`SyntaxConfig`](crate::SyntaxConfig).
///
/// This permits setting things like case insensitivity, Unicode and multi
/// line mode.
///
/// These settings only apply when constructing a DFA directly from a
/// pattern.
pub fn syntax(
&mut self,
config: crate::util::syntax::SyntaxConfig,
) -> &mut Builder {
self.thompson.syntax(config);
self
}
/// Set the Thompson NFA configuration for this builder using
/// [`nfa::thompson::Config`](crate::nfa::thompson::Config).
///
/// This permits setting things like whether the DFA should match the regex
/// in reverse or if additional time should be spent shrinking the size of
/// the NFA.
///
/// These settings only apply when constructing a DFA directly from a
/// pattern.
pub fn thompson(&mut self, config: thompson::Config) -> &mut Builder {
self.thompson.configure(config);
self
}
}
#[cfg(feature = "alloc")]
impl Default for Builder {
fn default() -> Builder {
Builder::new()
}
}
/// A convenience alias for an owned DFA. We use this particular instantiation
/// a lot in this crate, so it's worth giving it a name. This instantiation
/// is commonly used for mutable APIs on the DFA while building it. The main
/// reason for making DFAs generic is no_std support, and more generally,
/// making it possible to load a DFA from an arbitrary slice of bytes.
#[cfg(feature = "alloc")]
pub(crate) type OwnedDFA = DFA<Vec<u32>>;
/// A dense table-based deterministic finite automaton (DFA).
///
/// All dense DFAs have one or more start states, zero or more match states
/// and a transition table that maps the current state and the current byte
/// of input to the next state. A DFA can use this information to implement
/// fast searching. In particular, the use of a dense DFA generally makes the
/// trade off that match speed is the most valuable characteristic, even if
/// building the DFA may take significant time *and* space. (More concretely,
/// building a DFA takes time and space that is exponential in the size of the
/// pattern in the worst case.) As such, the processing of every byte of input
/// is done with a small constant number of operations that does not vary with
/// the pattern, its size or the size of the alphabet. If your needs don't line
/// up with this trade off, then a dense DFA may not be an adequate solution to
/// your problem.
///
/// In contrast, a [`sparse::DFA`] makes the opposite
/// trade off: it uses less space but will execute a variable number of
/// instructions per byte at match time, which makes it slower for matching.
/// (Note that space usage is still exponential in the size of the pattern in
/// the worst case.)
///
/// A DFA can be built using the default configuration via the
/// [`DFA::new`] constructor. Otherwise, one can
/// configure various aspects via [`dense::Builder`](Builder).
///
/// A single DFA fundamentally supports the following operations:
///
/// 1. Detection of a match.
/// 2. Location of the end of a match.
/// 3. In the case of a DFA with multiple patterns, which pattern matched is
/// reported as well.
///
/// A notable absence from the above list of capabilities is the location of
/// the *start* of a match. In order to provide both the start and end of
/// a match, *two* DFAs are required. This functionality is provided by a
/// [`Regex`](crate::dfa::regex::Regex).
///
/// # Type parameters
///
/// A `DFA` has one type parameter, `T`, which is used to represent state IDs,
/// pattern IDs and accelerators. `T` is typically a `Vec<u32>` or a `&[u32]`.
///
/// # The `Automaton` trait
///
/// This type implements the [`Automaton`] trait, which means it can be used
/// for searching. For example:
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// let dfa = DFA::new("foo[0-9]+")?;
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[derive(Clone)]
pub struct DFA<T> {
/// The transition table for this DFA. This includes the transitions
/// themselves, along with the stride, number of states and the equivalence
/// class mapping.
tt: TransitionTable<T>,
/// The set of starting state identifiers for this DFA. The starting state
/// IDs act as pointers into the transition table. The specific starting
/// state chosen for each search is dependent on the context at which the
/// search begins.
st: StartTable<T>,
/// The set of match states and the patterns that match for each
/// corresponding match state.
///
/// This structure is technically only needed because of support for
/// multi-regexes. Namely, multi-regexes require answering not just whether
/// a match exists, but _which_ patterns match. So we need to store the
/// matching pattern IDs for each match state. We do this even when there
/// is only one pattern for the sake of simplicity. In practice, this uses
/// up very little space for the case of on pattern.
ms: MatchStates<T>,
/// Information about which states are "special." Special states are states
/// that are dead, quit, matching, starting or accelerated. For more info,
/// see the docs for `Special`.
special: Special,
/// The accelerators for this DFA.
///
/// If a state is accelerated, then there exist only a small number of
/// bytes that can cause the DFA to leave the state. This permits searching
/// to use optimized routines to find those specific bytes instead of using
/// the transition table.
///
/// All accelerated states exist in a contiguous range in the DFA's
/// transition table. See dfa/special.rs for more details on how states are
/// arranged.
accels: Accels<T>,
}
#[cfg(feature = "alloc")]
impl OwnedDFA {
/// Parse the given regular expression using a default configuration and
/// return the corresponding DFA.
///
/// If you want a non-default configuration, then use the
/// [`dense::Builder`](Builder) to set your own configuration.
///
/// # Example
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense}, HalfMatch};
///
/// let dfa = dense::DFA::new("foo[0-9]+bar")?;
/// let expected = HalfMatch::must(0, 11);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345bar")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn new(pattern: &str) -> Result<OwnedDFA, Error> {
Builder::new().build(pattern)
}
/// Parse the given regular expressions using a default configuration and
/// return the corresponding multi-DFA.
///
/// If you want a non-default configuration, then use the
/// [`dense::Builder`](Builder) to set your own configuration.
///
/// # Example
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense}, HalfMatch};
///
/// let dfa = dense::DFA::new_many(&["[0-9]+", "[a-z]+"])?;
/// let expected = HalfMatch::must(1, 3);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345bar")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn new_many<P: AsRef<str>>(patterns: &[P]) -> Result<OwnedDFA, Error> {
Builder::new().build_many(patterns)
}
}
#[cfg(feature = "alloc")]
impl OwnedDFA {
/// Create a new DFA that matches every input.
///
/// # Example
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense}, HalfMatch};
///
/// let dfa = dense::DFA::always_match()?;
///
/// let expected = HalfMatch::must(0, 0);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"")?);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn always_match() -> Result<OwnedDFA, Error> {
let nfa = thompson::NFA::always_match();
Builder::new().build_from_nfa(&nfa)
}
/// Create a new DFA that never matches any input.
///
/// # Example
///
/// ```
/// use regex_automata::dfa::{Automaton, dense};
///
/// let dfa = dense::DFA::never_match()?;
/// assert_eq!(None, dfa.find_leftmost_fwd(b"")?);
/// assert_eq!(None, dfa.find_leftmost_fwd(b"foo")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn never_match() -> Result<OwnedDFA, Error> {
let nfa = thompson::NFA::never_match();
Builder::new().build_from_nfa(&nfa)
}
/// Create an initial DFA with the given equivalence classes, pattern count
/// and whether anchored starting states are enabled for each pattern. An
/// initial DFA can be further mutated via determinization.
fn initial(
classes: ByteClasses,
pattern_count: usize,
starts_for_each_pattern: bool,
) -> Result<OwnedDFA, Error> {
let start_pattern_count =
if starts_for_each_pattern { pattern_count } else { 0 };
Ok(DFA {
tt: TransitionTable::minimal(classes),
st: StartTable::dead(start_pattern_count)?,
ms: MatchStates::empty(pattern_count),
special: Special::new(),
accels: Accels::empty(),
})
}
}
impl<T: AsRef<[u32]>> DFA<T> {
/// Cheaply return a borrowed version of this dense DFA. Specifically,
/// the DFA returned always uses `&[u32]` for its transition table.
pub fn as_ref(&self) -> DFA<&'_ [u32]> {
DFA {
tt: self.tt.as_ref(),
st: self.st.as_ref(),
ms: self.ms.as_ref(),
special: self.special,
accels: self.accels(),
}
}
/// Return an owned version of this sparse DFA. Specifically, the DFA
/// returned always uses `Vec<u32>` for its transition table.
///
/// Effectively, this returns a dense DFA whose transition table lives on
/// the heap.
#[cfg(feature = "alloc")]
pub fn to_owned(&self) -> OwnedDFA {
DFA {
tt: self.tt.to_owned(),
st: self.st.to_owned(),
ms: self.ms.to_owned(),
special: self.special,
accels: self.accels().to_owned(),
}
}
/// Returns true only if this DFA has starting states for each pattern.
///
/// When a DFA has starting states for each pattern, then a search with the
/// DFA can be configured to only look for anchored matches of a specific
/// pattern. Specifically, APIs like [`Automaton::find_earliest_fwd_at`]
/// can accept a non-None `pattern_id` if and only if this method returns
/// true. Otherwise, calling `find_earliest_fwd_at` will panic.
///
/// Note that if the DFA has no patterns, this always returns false.
pub fn has_starts_for_each_pattern(&self) -> bool {
self.st.patterns > 0
}
/// Returns the total number of elements in the alphabet for this DFA.
///
/// That is, this returns the total number of transitions that each state
/// in this DFA must have. Typically, a normal byte oriented DFA would
/// always have an alphabet size of 256, corresponding to the number of
/// unique values in a single byte. However, this implementation has two
/// peculiarities that impact the alphabet length:
///
/// * Every state has a special "EOI" transition that is only followed
/// after the end of some haystack is reached. This EOI transition is
/// necessary to account for one byte of look-ahead when implementing
/// things like `\b` and `$`.
/// * Bytes are grouped into equivalence classes such that no two bytes in
/// the same class can distinguish a match from a non-match. For example,
/// in the regex `^[a-z]+$`, the ASCII bytes `a-z` could all be in the
/// same equivalence class. This leads to a massive space savings.
///
/// Note though that the alphabet length does _not_ necessarily equal the
/// total stride space taken up by a single DFA state in the transition
/// table. Namely, for performance reasons, the stride is always the
/// smallest power of two that is greater than or equal to the alphabet
/// length. For this reason, [`DFA::stride`] or [`DFA::stride2`] are
/// often more useful. The alphabet length is typically useful only for
/// informational purposes.
pub fn alphabet_len(&self) -> usize {
self.tt.alphabet_len()
}
/// Returns the total stride for every state in this DFA, expressed as the
/// exponent of a power of 2. The stride is the amount of space each state
/// takes up in the transition table, expressed as a number of transitions.
/// (Unused transitions map to dead states.)
///
/// The stride of a DFA is always equivalent to the smallest power of 2
/// that is greater than or equal to the DFA's alphabet length. This
/// definition uses extra space, but permits faster translation between
/// premultiplied state identifiers and contiguous indices (by using shifts
/// instead of relying on integer division).
///
/// For example, if the DFA's stride is 16 transitions, then its `stride2`
/// is `4` since `2^4 = 16`.
///
/// The minimum `stride2` value is `1` (corresponding to a stride of `2`)
/// while the maximum `stride2` value is `9` (corresponding to a stride of
/// `512`). The maximum is not `8` since the maximum alphabet size is `257`
/// when accounting for the special EOI transition. However, an alphabet
/// length of that size is exceptionally rare since the alphabet is shrunk
/// into equivalence classes.
pub fn stride2(&self) -> usize {
self.tt.stride2
}
/// Returns the total stride for every state in this DFA. This corresponds
/// to the total number of transitions used by each state in this DFA's
/// transition table.
///
/// Please see [`DFA::stride2`] for more information. In particular, this
/// returns the stride as the number of transitions, where as `stride2`
/// returns it as the exponent of a power of 2.
pub fn stride(&self) -> usize {
self.tt.stride()
}
/// Returns the "universal" start state for this DFA.
///
/// A universal start state occurs only when all of the starting states
/// for this DFA are precisely the same. This occurs when there are no
/// look-around assertions at the beginning (or end for a reverse DFA) of
/// the pattern.
///
/// Using this as a starting state for a DFA without a universal starting
/// state has unspecified behavior. This condition is not checked, so the
/// caller must guarantee it themselves.
pub(crate) fn universal_start_state(&self) -> StateID {
// We choose 'NonWordByte' for no particular reason, other than
// the fact that this is the 'main' starting configuration used in
// determinization. But in essence, it doesn't really matter.
//
// Also, we might consider exposing this routine, but it seems
// a little tricky to use correctly. Maybe if we also expose a
// 'has_universal_start_state' method?
self.st.start(Start::NonWordByte, None)
}
/// Returns the memory usage, in bytes, of this DFA.
///
/// The memory usage is computed based on the number of bytes used to
/// represent this DFA.
///
/// This does **not** include the stack size used up by this DFA. To
/// compute that, use `std::mem::size_of::<dense::DFA>()`.
pub fn memory_usage(&self) -> usize {
self.tt.memory_usage()
+ self.st.memory_usage()
+ self.ms.memory_usage()
+ self.accels.memory_usage()
}
}
/// Routines for converting a dense DFA to other representations, such as
/// sparse DFAs or raw bytes suitable for persistent storage.
impl<T: AsRef<[u32]>> DFA<T> {
/// Convert this dense DFA to a sparse DFA.
///
/// If a `StateID` is too small to represent all states in the sparse
/// DFA, then this returns an error. In most cases, if a dense DFA is
/// constructable with `StateID` then a sparse DFA will be as well.
/// However, it is not guaranteed.
///
/// # Example
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense}, HalfMatch};
///
/// let dense = dense::DFA::new("foo[0-9]+")?;
/// let sparse = dense.to_sparse()?;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), sparse.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[cfg(feature = "alloc")]
pub fn to_sparse(&self) -> Result<sparse::DFA<Vec<u8>>, Error> {
sparse::DFA::from_dense(self)
}
/// Serialize this DFA as raw bytes to a `Vec<u8>` in little endian
/// format. Upon success, the `Vec<u8>` and the initial padding length are
/// returned.
///
/// The written bytes are guaranteed to be deserialized correctly and
/// without errors in a semver compatible release of this crate by a
/// `DFA`'s deserialization APIs (assuming all other criteria for the
/// deserialization APIs has been satisfied):
///
/// * [`DFA::from_bytes`]
/// * [`DFA::from_bytes_unchecked`]
///
/// The padding returned is non-zero if the returned `Vec<u8>` starts at
/// an address that does not have the same alignment as `u32`. The padding
/// corresponds to the number of leading bytes written to the returned
/// `Vec<u8>`.
///
/// # Example
///
/// This example shows how to serialize and deserialize a DFA:
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// // Compile our original DFA.
/// let original_dfa = DFA::new("foo[0-9]+")?;
///
/// // N.B. We use native endianness here to make the example work, but
/// // using to_bytes_little_endian would work on a little endian target.
/// let (buf, _) = original_dfa.to_bytes_native_endian();
/// // Even if buf has initial padding, DFA::from_bytes will automatically
/// // ignore it.
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[cfg(feature = "alloc")]
pub fn to_bytes_little_endian(&self) -> (Vec<u8>, usize) {
self.to_bytes::<bytes::LE>()
}
/// Serialize this DFA as raw bytes to a `Vec<u8>` in big endian
/// format. Upon success, the `Vec<u8>` and the initial padding length are
/// returned.
///
/// The written bytes are guaranteed to be deserialized correctly and
/// without errors in a semver compatible release of this crate by a
/// `DFA`'s deserialization APIs (assuming all other criteria for the
/// deserialization APIs has been satisfied):
///
/// * [`DFA::from_bytes`]
/// * [`DFA::from_bytes_unchecked`]
///
/// The padding returned is non-zero if the returned `Vec<u8>` starts at
/// an address that does not have the same alignment as `u32`. The padding
/// corresponds to the number of leading bytes written to the returned
/// `Vec<u8>`.
///
/// # Example
///
/// This example shows how to serialize and deserialize a DFA:
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// // Compile our original DFA.
/// let original_dfa = DFA::new("foo[0-9]+")?;
///
/// // N.B. We use native endianness here to make the example work, but
/// // using to_bytes_big_endian would work on a big endian target.
/// let (buf, _) = original_dfa.to_bytes_native_endian();
/// // Even if buf has initial padding, DFA::from_bytes will automatically
/// // ignore it.
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[cfg(feature = "alloc")]
pub fn to_bytes_big_endian(&self) -> (Vec<u8>, usize) {
self.to_bytes::<bytes::BE>()
}
/// Serialize this DFA as raw bytes to a `Vec<u8>` in native endian
/// format. Upon success, the `Vec<u8>` and the initial padding length are
/// returned.
///
/// The written bytes are guaranteed to be deserialized correctly and
/// without errors in a semver compatible release of this crate by a
/// `DFA`'s deserialization APIs (assuming all other criteria for the
/// deserialization APIs has been satisfied):
///
/// * [`DFA::from_bytes`]
/// * [`DFA::from_bytes_unchecked`]
///
/// The padding returned is non-zero if the returned `Vec<u8>` starts at
/// an address that does not have the same alignment as `u32`. The padding
/// corresponds to the number of leading bytes written to the returned
/// `Vec<u8>`.
///
/// Generally speaking, native endian format should only be used when
/// you know that the target you're compiling the DFA for matches the
/// endianness of the target on which you're compiling DFA. For example,
/// if serialization and deserialization happen in the same process or on
/// the same machine. Otherwise, when serializing a DFA for use in a
/// portable environment, you'll almost certainly want to serialize _both_
/// a little endian and a big endian version and then load the correct one
/// based on the target's configuration.
///
/// # Example
///
/// This example shows how to serialize and deserialize a DFA:
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// // Compile our original DFA.
/// let original_dfa = DFA::new("foo[0-9]+")?;
///
/// let (buf, _) = original_dfa.to_bytes_native_endian();
/// // Even if buf has initial padding, DFA::from_bytes will automatically
/// // ignore it.
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf)?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
#[cfg(feature = "alloc")]
pub fn to_bytes_native_endian(&self) -> (Vec<u8>, usize) {
self.to_bytes::<bytes::NE>()
}
/// The implementation of the public `to_bytes` serialization methods,
/// which is generic over endianness.
#[cfg(feature = "alloc")]
fn to_bytes<E: Endian>(&self) -> (Vec<u8>, usize) {
let len = self.write_to_len();
let (mut buf, padding) = bytes::alloc_aligned_buffer::<u32>(len);
// This should always succeed since the only possible serialization
// error is providing a buffer that's too small, but we've ensured that
// `buf` is big enough here.
self.as_ref().write_to::<E>(&mut buf[padding..]).unwrap();
(buf, padding)
}
/// Serialize this DFA as raw bytes to the given slice, in little endian
/// format. Upon success, the total number of bytes written to `dst` is
/// returned.
///
/// The written bytes are guaranteed to be deserialized correctly and
/// without errors in a semver compatible release of this crate by a
/// `DFA`'s deserialization APIs (assuming all other criteria for the
/// deserialization APIs has been satisfied):
///
/// * [`DFA::from_bytes`]
/// * [`DFA::from_bytes_unchecked`]
///
/// Note that unlike the various `to_byte_*` routines, this does not write
/// any padding. Callers are responsible for handling alignment correctly.
///
/// # Errors
///
/// This returns an error if the given destination slice is not big enough
/// to contain the full serialized DFA. If an error occurs, then nothing
/// is written to `dst`.
///
/// # Example
///
/// This example shows how to serialize and deserialize a DFA without
/// dynamic memory allocation.
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// // Compile our original DFA.
/// let original_dfa = DFA::new("foo[0-9]+")?;
///
/// // Create a 4KB buffer on the stack to store our serialized DFA.
/// let mut buf = [0u8; 4 * (1<<10)];
/// // N.B. We use native endianness here to make the example work, but
/// // using write_to_little_endian would work on a little endian target.
/// let written = original_dfa.write_to_native_endian(&mut buf)?;
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf[..written])?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn write_to_little_endian(
&self,
dst: &mut [u8],
) -> Result<usize, SerializeError> {
self.as_ref().write_to::<bytes::LE>(dst)
}
/// Serialize this DFA as raw bytes to the given slice, in big endian
/// format. Upon success, the total number of bytes written to `dst` is
/// returned.
///
/// The written bytes are guaranteed to be deserialized correctly and
/// without errors in a semver compatible release of this crate by a
/// `DFA`'s deserialization APIs (assuming all other criteria for the
/// deserialization APIs has been satisfied):
///
/// * [`DFA::from_bytes`]
/// * [`DFA::from_bytes_unchecked`]
///
/// Note that unlike the various `to_byte_*` routines, this does not write
/// any padding. Callers are responsible for handling alignment correctly.
///
/// # Errors
///
/// This returns an error if the given destination slice is not big enough
/// to contain the full serialized DFA. If an error occurs, then nothing
/// is written to `dst`.
///
/// # Example
///
/// This example shows how to serialize and deserialize a DFA without
/// dynamic memory allocation.
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// // Compile our original DFA.
/// let original_dfa = DFA::new("foo[0-9]+")?;
///
/// // Create a 4KB buffer on the stack to store our serialized DFA.
/// let mut buf = [0u8; 4 * (1<<10)];
/// // N.B. We use native endianness here to make the example work, but
/// // using write_to_big_endian would work on a big endian target.
/// let written = original_dfa.write_to_native_endian(&mut buf)?;
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf[..written])?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn write_to_big_endian(
&self,
dst: &mut [u8],
) -> Result<usize, SerializeError> {
self.as_ref().write_to::<bytes::BE>(dst)
}
/// Serialize this DFA as raw bytes to the given slice, in native endian
/// format. Upon success, the total number of bytes written to `dst` is
/// returned.
///
/// The written bytes are guaranteed to be deserialized correctly and
/// without errors in a semver compatible release of this crate by a
/// `DFA`'s deserialization APIs (assuming all other criteria for the
/// deserialization APIs has been satisfied):
///
/// * [`DFA::from_bytes`]
/// * [`DFA::from_bytes_unchecked`]
///
/// Generally speaking, native endian format should only be used when
/// you know that the target you're compiling the DFA for matches the
/// endianness of the target on which you're compiling DFA. For example,
/// if serialization and deserialization happen in the same process or on
/// the same machine. Otherwise, when serializing a DFA for use in a
/// portable environment, you'll almost certainly want to serialize _both_
/// a little endian and a big endian version and then load the correct one
/// based on the target's configuration.
///
/// Note that unlike the various `to_byte_*` routines, this does not write
/// any padding. Callers are responsible for handling alignment correctly.
///
/// # Errors
///
/// This returns an error if the given destination slice is not big enough
/// to contain the full serialized DFA. If an error occurs, then nothing
/// is written to `dst`.
///
/// # Example
///
/// This example shows how to serialize and deserialize a DFA without
/// dynamic memory allocation.
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// // Compile our original DFA.
/// let original_dfa = DFA::new("foo[0-9]+")?;
///
/// // Create a 4KB buffer on the stack to store our serialized DFA.
/// let mut buf = [0u8; 4 * (1<<10)];
/// let written = original_dfa.write_to_native_endian(&mut buf)?;
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf[..written])?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub fn write_to_native_endian(
&self,
dst: &mut [u8],
) -> Result<usize, SerializeError> {
self.as_ref().write_to::<bytes::NE>(dst)
}
/// Return the total number of bytes required to serialize this DFA.
///
/// This is useful for determining the size of the buffer required to pass
/// to one of the serialization routines:
///
/// * [`DFA::write_to_little_endian`]
/// * [`DFA::write_to_big_endian`]
/// * [`DFA::write_to_native_endian`]
///
/// Passing a buffer smaller than the size returned by this method will
/// result in a serialization error. Serialization routines are guaranteed
/// to succeed when the buffer is big enough.
///
/// # Example
///
/// This example shows how to dynamically allocate enough room to serialize
/// a DFA.
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// // Compile our original DFA.
/// let original_dfa = DFA::new("foo[0-9]+")?;
///
/// let mut buf = vec![0; original_dfa.write_to_len()];
/// let written = original_dfa.write_to_native_endian(&mut buf)?;
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&buf[..written])?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// Note that this example isn't actually guaranteed to work! In
/// particular, if `buf` is not aligned to a 4-byte boundary, then the
/// `DFA::from_bytes` call will fail. If you need this to work, then you
/// either need to deal with adding some initial padding yourself, or use
/// one of the `to_bytes` methods, which will do it for you.
pub fn write_to_len(&self) -> usize {
bytes::write_label_len(LABEL)
+ bytes::write_endianness_check_len()
+ bytes::write_version_len()
+ size_of::<u32>() // unused, intended for future flexibility
+ self.tt.write_to_len()
+ self.st.write_to_len()
+ self.ms.write_to_len()
+ self.special.write_to_len()
+ self.accels.write_to_len()
}
}
impl<'a> DFA<&'a [u32]> {
/// Safely deserialize a DFA with a specific state identifier
/// representation. Upon success, this returns both the deserialized DFA
/// and the number of bytes read from the given slice. Namely, the contents
/// of the slice beyond the DFA are not read.
///
/// Deserializing a DFA using this routine will never allocate heap memory.
/// For safety purposes, the DFA's transition table will be verified such
/// that every transition points to a valid state. If this verification is
/// too costly, then a [`DFA::from_bytes_unchecked`] API is provided, which
/// will always execute in constant time.
///
/// The bytes given must be generated by one of the serialization APIs
/// of a `DFA` using a semver compatible release of this crate. Those
/// include:
///
/// * [`DFA::to_bytes_little_endian`]
/// * [`DFA::to_bytes_big_endian`]
/// * [`DFA::to_bytes_native_endian`]
/// * [`DFA::write_to_little_endian`]
/// * [`DFA::write_to_big_endian`]
/// * [`DFA::write_to_native_endian`]
///
/// The `to_bytes` methods allocate and return a `Vec<u8>` for you, along
/// with handling alignment correctly. The `write_to` methods do not
/// allocate and write to an existing slice (which may be on the stack).
/// Since deserialization always uses the native endianness of the target
/// platform, the serialization API you use should match the endianness of
/// the target platform. (It's often a good idea to generate serialized
/// DFAs for both forms of endianness and then load the correct one based
/// on endianness.)
///
/// # Errors
///
/// Generally speaking, it's easier to state the conditions in which an
/// error is _not_ returned. All of the following must be true:
///
/// * The bytes given must be produced by one of the serialization APIs
/// on this DFA, as mentioned above.
/// * The endianness of the target platform matches the endianness used to
/// serialized the provided DFA.
/// * The slice given must have the same alignment as `u32`.
///
/// If any of the above are not true, then an error will be returned.
///
/// # Panics
///
/// This routine will never panic for any input.
///
/// # Example
///
/// This example shows how to serialize a DFA to raw bytes, deserialize it
/// and then use it for searching.
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// let initial = DFA::new("foo[0-9]+")?;
/// let (bytes, _) = initial.to_bytes_native_endian();
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes)?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// # Example: dealing with alignment and padding
///
/// In the above example, we used the `to_bytes_native_endian` method to
/// serialize a DFA, but we ignored part of its return value corresponding
/// to padding added to the beginning of the serialized DFA. This is OK
/// because deserialization will skip this initial padding. What matters
/// is that the address immediately following the padding has an alignment
/// that matches `u32`. That is, the following is an equivalent but
/// alternative way to write the above example:
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// let initial = DFA::new("foo[0-9]+")?;
/// // Serialization returns the number of leading padding bytes added to
/// // the returned Vec<u8>.
/// let (bytes, pad) = initial.to_bytes_native_endian();
/// let dfa: DFA<&[u32]> = DFA::from_bytes(&bytes[pad..])?.0;
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// This padding is necessary because Rust's standard library does
/// not expose any safe and robust way of creating a `Vec<u8>` with a
/// guaranteed alignment other than 1. Now, in practice, the underlying
/// allocator is likely to provide a `Vec<u8>` that meets our alignment
/// requirements, which means `pad` is zero in practice most of the time.
///
/// The purpose of exposing the padding like this is flexibility for the
/// caller. For example, if one wants to embed a serialized DFA into a
/// compiled program, then it's important to guarantee that it starts at a
/// `u32`-aligned address. The simplest way to do this is to discard the
/// padding bytes and set it up so that the serialized DFA itself begins at
/// a properly aligned address. We can show this in two parts. The first
/// part is serializing the DFA to a file:
///
/// ```no_run
/// use regex_automata::dfa::{Automaton, dense::DFA};
///
/// let dfa = DFA::new("foo[0-9]+")?;
///
/// let (bytes, pad) = dfa.to_bytes_big_endian();
/// // Write the contents of the DFA *without* the initial padding.
/// std::fs::write("foo.bigendian.dfa", &bytes[pad..])?;
///
/// // Do it again, but this time for little endian.
/// let (bytes, pad) = dfa.to_bytes_little_endian();
/// std::fs::write("foo.littleendian.dfa", &bytes[pad..])?;
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
///
/// And now the second part is embedding the DFA into the compiled program
/// and deserializing it at runtime on first use. We use conditional
/// compilation to choose the correct endianness.
///
/// ```no_run
/// use regex_automata::{dfa::{Automaton, dense}, HalfMatch};
///
/// type S = u32;
/// type DFA = dense::DFA<&'static [S]>;
///
/// fn get_foo() -> &'static DFA {
/// use std::cell::Cell;
/// use std::mem::MaybeUninit;
/// use std::sync::Once;
///
/// // This struct with a generic B is used to permit unsizing
/// // coercions, specifically, where B winds up being a [u8]. We also
/// // need repr(C) to guarantee that _align comes first, which forces
/// // a correct alignment.
/// #[repr(C)]
/// struct Aligned<B: ?Sized> {
/// _align: [S; 0],
/// bytes: B,
/// }
///
/// # const _: &str = stringify! {
/// // This assignment is made possible (implicitly) via the
/// // CoerceUnsized trait.
/// static ALIGNED: &Aligned<[u8]> = &Aligned {
/// _align: [],
/// #[cfg(target_endian = "big")]
/// bytes: *include_bytes!("foo.bigendian.dfa"),
/// #[cfg(target_endian = "little")]
/// bytes: *include_bytes!("foo.littleendian.dfa"),
/// };
/// # };
/// # static ALIGNED: &Aligned<[u8]> = &Aligned {
/// # _align: [],
/// # bytes: [],
/// # };
///
/// struct Lazy(Cell<MaybeUninit<DFA>>);
/// // SAFETY: This is safe because DFA impls Sync.
/// unsafe impl Sync for Lazy {}
///
/// static INIT: Once = Once::new();
/// static DFA: Lazy = Lazy(Cell::new(MaybeUninit::uninit()));
///
/// INIT.call_once(|| {
/// let (dfa, _) = DFA::from_bytes(&ALIGNED.bytes)
/// .expect("serialized DFA should be valid");
/// // SAFETY: This is guaranteed to only execute once, and all
/// // we do with the pointer is write the DFA to it.
/// unsafe {
/// (*DFA.0.as_ptr()).as_mut_ptr().write(dfa);
/// }
/// });
/// // SAFETY: DFA is guaranteed to by initialized via INIT and is
/// // stored in static memory.
/// unsafe {
/// let dfa = (*DFA.0.as_ptr()).as_ptr();
/// std::mem::transmute::<*const DFA, &'static DFA>(dfa)
/// }
/// }
///
/// let dfa = get_foo();
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Ok(Some(expected)), dfa.find_leftmost_fwd(b"foo12345"));
/// ```
///
/// Alternatively, consider using
/// [`lazy_static`](https://crates.io/crates/lazy_static)
/// or
/// [`once_cell`](https://crates.io/crates/once_cell),
/// which will guarantee safety for you. You will still need to use the
/// `Aligned` trick above to force correct alignment, but this is safe to
/// do and `from_bytes` will return an error if you get it wrong.
pub fn from_bytes(
slice: &'a [u8],
) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> {
// SAFETY: This is safe because we validate both the transition table,
// start state ID list and the match states below. If either validation
// fails, then we return an error.
let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? };
dfa.tt.validate()?;
dfa.st.validate(&dfa.tt)?;
dfa.ms.validate(&dfa)?;
dfa.accels.validate()?;
// N.B. dfa.special doesn't have a way to do unchecked deserialization,
// so it has already been validated.
Ok((dfa, nread))
}
/// Deserialize a DFA with a specific state identifier representation in
/// constant time by omitting the verification of the validity of the
/// transition table and other data inside the DFA.
///
/// This is just like [`DFA::from_bytes`], except it can potentially return
/// a DFA that exhibits undefined behavior if its transition table contains
/// invalid state identifiers.
///
/// This routine is useful if you need to deserialize a DFA cheaply
/// and cannot afford the transition table validation performed by
/// `from_bytes`.
///
/// # Example
///
/// ```
/// use regex_automata::{dfa::{Automaton, dense::DFA}, HalfMatch};
///
/// let initial = DFA::new("foo[0-9]+")?;
/// let (bytes, _) = initial.to_bytes_native_endian();
/// // SAFETY: This is guaranteed to be safe since the bytes given come
/// // directly from a compatible serialization routine.
/// let dfa: DFA<&[u32]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 };
///
/// let expected = HalfMatch::must(0, 8);
/// assert_eq!(Some(expected), dfa.find_leftmost_fwd(b"foo12345")?);
/// # Ok::<(), Box<dyn std::error::Error>>(())
/// ```
pub unsafe fn from_bytes_unchecked(
slice: &'a [u8],
) -> Result<(DFA<&'a [u32]>, usize), DeserializeError> {
let mut nr = 0;
nr += bytes::skip_initial_padding(slice);
bytes::check_alignment::<StateID>(&slice[nr..])?;
nr += bytes::read_label(&slice[nr..], LABEL)?;
nr += bytes::read_endianness_check(&slice[nr..])?;
nr += bytes::read_version(&slice[nr..], VERSION)?;
let _unused = bytes::try_read_u32(&slice[nr..], "unused space")?;
nr += size_of::<u32>();
let (tt, nread) = TransitionTable::from_bytes_unchecked(&slice[nr..])?;
nr += nread;
let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?;
nr += nread;
let (ms, nread) = MatchStates::from_bytes_unchecked(&slice[nr..])?;
nr += nread;
let (special, nread) = Special::from_bytes(&slice[nr..])?;
nr += nread;
special.validate_state_count(tt.count(), tt.stride2)?;
let (accels, nread) = Accels::from_bytes_unchecked(&slice[nr..])?;
nr += nread;
Ok((DFA { tt, st, ms, special, accels }, nr))
}
/// The implementation of the public `write_to` serialization methods,
/// which is generic over endianness.
///
/// This is defined only for &[u32] to reduce binary size/compilation time.
fn write_to<E: Endian>(
&self,
mut dst: &mut [u8],
) -> Result<usize, SerializeError> {
let nwrite = self.write_to_len();
if dst.len() < nwrite {
return Err(SerializeError::buffer_too_small("dense DFA"));
}
dst = &mut dst[..nwrite];
let mut nw = 0;
nw += bytes::write_label(LABEL, &mut dst[nw..])?;
nw += bytes::write_endianness_check::<E>(&mut dst[nw..])?;
nw += bytes::write_version::<E>(VERSION, &mut dst[nw..])?;
nw += {
// Currently unused, intended for future flexibility
E::write_u32(0, &mut dst[nw..]);
size_of::<u32>()
};
nw += self.tt.write_to::<E>(&mut dst[nw..])?;
nw += self.st.write_to::<E>(&mut dst[nw..])?;
nw += self.ms.write_to::<E>(&mut dst[nw..])?;
nw += self.special.write_to::<E>(&mut dst[nw..])?;
nw += self.accels.write_to::<E>(&mut dst[nw..])?;
Ok(nw)
}
}
/// The following methods implement mutable routines on the internal
/// representation of a DFA. As such, we must fix the first type parameter to a
/// `Vec<u32>` since a generic `T: AsRef<[u32]>` does not permit mutation. We
/// can get away with this because these methods are internal to the crate and
/// are exclusively used during construction of the DFA.
#[cfg(feature = "alloc")]
impl OwnedDFA {
/// Add a start state of this DFA.
pub(crate) fn set_start_state(
&mut self,
index: Start,
pattern_id: Option<PatternID>,
id: StateID,
) {
assert!(self.tt.is_valid(id), "invalid start state");
self.st.set_start(index, pattern_id, id);
}
/// Set the given transition to this DFA. Both the `from` and `to` states
/// must already exist.
pub(crate) fn set_transition(
&mut self,
from: StateID,
byte: alphabet::Unit,
to: StateID,
) {
self.tt.set(from, byte, to);
}
/// An an empty state (a state where all transitions lead to a dead state)
/// and return its identifier. The identifier returned is guaranteed to
/// not point to any other existing state.
///
/// If adding a state would exceed `StateID::LIMIT`, then this returns an
/// error.
pub(crate) fn add_empty_state(&mut self) -> Result<StateID, Error> {
self.tt.add_empty_state()
}
/// Swap the two states given in the transition table.
///
/// This routine does not do anything to check the correctness of this
/// swap. Callers must ensure that other states pointing to id1 and id2 are
/// updated appropriately.
pub(crate) fn swap_states(&mut self, id1: StateID, id2: StateID) {
self.tt.swap(id1, id2);
}
/// Truncate the states in this DFA to the given count.
///
/// This routine does not do anything to check the correctness of this
/// truncation. Callers must ensure that other states pointing to truncated
/// states are updated appropriately.
pub(crate) fn truncate_states(&mut self, count: usize) {
self.tt.truncate(count);
}
/// Return a mutable representation of the state corresponding to the given
/// id. This is useful for implementing routines that manipulate DFA states
/// (e.g., swapping states).
pub(crate) fn state_mut(&mut self, id: StateID) -> StateMut<'_> {
self.tt.state_mut(id)
}
/// Minimize this DFA in place using Hopcroft's algorithm.
pub(crate) fn minimize(&mut self) {
Minimizer::new(self).run();
}
/// Updates the match state pattern ID map to use the one provided.
///
/// This is useful when it's convenient to manipulate matching states
/// (and their corresponding pattern IDs) as a map. In particular, the
/// representation used by a DFA for this map is not amenable to mutation,
/// so if things need to be changed (like when shuffling states), it's
/// often easier to work with the map form.
pub(crate) fn set_pattern_map(
&mut self,
map: &BTreeMap<StateID, Vec<PatternID>>,
) -> Result<(), Error> {
self.ms = self.ms.new_with_map(map)?;
Ok(())
}
/// Find states that have a small number of non-loop transitions and mark
/// them as candidates for acceleration during search.
pub(crate) fn accelerate(&mut self) {
// dead and quit states can never be accelerated.
if self.state_count() <= 2 {
return;
}
// Go through every state and record their accelerator, if possible.
let mut accels = BTreeMap::new();
// Count the number of accelerated match, start and non-match/start
// states.
let (mut cmatch, mut cstart, mut cnormal) = (0, 0, 0);
for state in self.states() {
if let Some(accel) = state.accelerate(self.byte_classes()) {
accels.insert(state.id(), accel);
if self.is_match_state(state.id()) {
cmatch += 1;
} else if self.is_start_state(state.id()) {
cstart += 1;
} else {
assert!(!self.is_dead_state(state.id()));
assert!(!self.is_quit_state(state.id()));
cnormal += 1;
}
}
}
// If no states were able to be accelerated, then we're done.
if accels.is_empty() {
return;
}
let original_accels_len = accels.len();
// A remapper keeps track of state ID changes. Once we're done
// shuffling, the remapper is used to rewrite all transitions in the
// DFA based on the new positions of states.
let mut remapper = Remapper::from_dfa(self);
// As we swap states, if they are match states, we need to swap their
// pattern ID lists too (for multi-regexes). We do this by converting
// the lists to an easily swappable map, and then convert back to
// MatchStates once we're done.
let mut new_matches = self.ms.to_map(self);
// There is at least one state that gets accelerated, so these are
// guaranteed to get set to sensible values below.
self.special.min_accel = StateID::MAX;
self.special.max_accel = StateID::ZERO;
let update_special_accel =
|special: &mut Special, accel_id: StateID| {
special.min_accel = cmp::min(special.min_accel, accel_id);
special.max_accel = cmp::max(special.max_accel, accel_id);
};
// Start by shuffling match states. Any match states that are
// accelerated get moved to the end of the match state range.
if cmatch > 0 && self.special.matches() {
// N.B. special.{min,max}_match do not need updating, since the
// range/number of match states does not change. Only the ordering
// of match states may change.
let mut next_id = self.special.max_match;
let mut cur_id = next_id;
while cur_id >= self.special.min_match {
if let Some(accel) = accels.remove(&cur_id) {
accels.insert(next_id, accel);
update_special_accel(&mut self.special, next_id);
// No need to do any actual swapping for equivalent IDs.
if cur_id != next_id {
remapper.swap(self, cur_id, next_id);
// Swap pattern IDs for match states.
let cur_pids = new_matches.remove(&cur_id).unwrap();
let next_pids = new_matches.remove(&next_id).unwrap();
new_matches.insert(cur_id, next_pids);
new_matches.insert(next_id, cur_pids);
}
next_id = self.tt.prev_state_id(next_id);
}
cur_id = self.tt.prev_state_id(cur_id);
}
}
// This is where it gets tricky. Without acceleration, start states
// normally come right after match states. But we want accelerated
// states to be a single contiguous range (to make it very fast
// to determine whether a state *is* accelerated), while also keeping
// match and starting states as contiguous ranges for the same reason.
// So what we do here is shuffle states such that it looks like this:
//
// DQMMMMAAAAASSSSSSNNNNNNN
// | |
// |---------|
// accelerated states
//
// Where:
// D - dead state
// Q - quit state
// M - match state (may be accelerated)
// A - normal state that is accelerated
// S - start state (may be accelerated)
// N - normal state that is NOT accelerated
//
// We implement this by shuffling states, which is done by a sequence
// of pairwise swaps. We start by looking at all normal states to be
// accelerated. When we find one, we swap it with the earliest starting
// state, and then swap that with the earliest normal state. This
// preserves the contiguous property.
//
// Once we're done looking for accelerated normal states, now we look
// for accelerated starting states by moving them to the beginning
// of the starting state range (just like we moved accelerated match
// states to the end of the matching state range).
//
// For a more detailed/different perspective on this, see the docs
// in dfa/special.rs.
if cnormal > 0 {
// our next available starting and normal states for swapping.
let mut next_start_id = self.special.min_start;
let mut cur_id = self.from_index(self.state_count() - 1);
// This is guaranteed to exist since cnormal > 0.
let mut next_norm_id =
self.tt.next_state_id(self.special.max_start);
while cur_id >= next_norm_id {
if let Some(accel) = accels.remove(&cur_id) {
remapper.swap(self, next_start_id, cur_id);
remapper.swap(self, next_norm_id, cur_id);
// Keep our accelerator map updated with new IDs if the
// states we swapped were also accelerated.
if let Some(accel2) = accels.remove(&next_norm_id) {
accels.insert(cur_id, accel2);
}
if let Some(accel2) = accels.remove(&next_start_id) {
accels.insert(next_norm_id, accel2);
}
accels.insert(next_start_id, accel);
update_special_accel(&mut self.special, next_start_id);
// Our start range shifts one to the right now.
self.special.min_start =
self.tt.next_state_id(self.special.min_start);
self.special.max_start =
self.tt.next_state_id(self.special.max_start);
next_start_id = self.tt.next_state_id(next_start_id);
next_norm_id = self.tt.next_state_id(next_norm_id);
}
// This is pretty tricky, but if our 'next_norm_id' state also
// happened to be accelerated, then the result is that it is
// now in the position of cur_id, so we need to consider it
// again. This loop is still guaranteed to terminate though,
// because when accels contains cur_id, we're guaranteed to
// increment next_norm_id even if cur_id remains unchanged.
if !accels.contains_key(&cur_id) {
cur_id = self.tt.prev_state_id(cur_id);
}
}
}
// Just like we did for match states, but we want to move accelerated
// start states to the beginning of the range instead of the end.
if cstart > 0 {
// N.B. special.{min,max}_start do not need updating, since the
// range/number of start states does not change at this point. Only
// the ordering of start states may change.
let mut next_id = self.special.min_start;
let mut cur_id = next_id;
while cur_id <= self.special.max_start {
if let Some(accel) = accels.remove(&cur_id) {
remapper.swap(self, cur_id, next_id);
accels.insert(next_id, accel);
update_special_accel(&mut self.special, next_id);
next_id = self.tt.next_state_id(next_id);
}
cur_id = self.tt.next_state_id(cur_id);
}
}
// Remap all transitions in our DFA and assert some things.
remapper.remap(self);
// This unwrap is OK because acceleration never changes the number of
// match states or patterns in those match states. Since acceleration
// runs after the pattern map has been set at least once, we know that
// our match states cannot error.
self.set_pattern_map(&new_matches).unwrap();
self.special.set_max();
self.special.validate().expect("special state ranges should validate");
self.special
.validate_state_count(self.state_count(), self.stride2())
.expect(
"special state ranges should be consistent with state count",
);
assert_eq!(
self.special.accel_len(self.stride()),
// We record the number of accelerated states initially detected
// since the accels map is itself mutated in the process above.
// If mutated incorrectly, its size may change, and thus can't be
// trusted as a source of truth of how many accelerated states we
// expected there to be.
original_accels_len,
"mismatch with expected number of accelerated states",
);
// And finally record our accelerators. We kept our accels map updated
// as we shuffled states above, so the accelerators should now
// correspond to a contiguous range in the state ID space. (Which we
// assert.)
let mut prev: Option<StateID> = None;
for (id, accel) in accels {
assert!(prev.map_or(true, |p| self.tt.next_state_id(p) == id));
prev = Some(id);
self.accels.add(accel);
}
}
/// Shuffle the states in this DFA so that starting states, match
/// states and accelerated states are all contiguous.
///
/// See dfa/special.rs for more details.
pub(crate) fn shuffle(
&mut self,
mut matches: BTreeMap<StateID, Vec<PatternID>>,
) -> Result<(), Error> {
// The determinizer always adds a quit state and it is always second.
self.special.quit_id = self.from_index(1);
// If all we have are the dead and quit states, then we're done and
// the DFA will never produce a match.
if self.state_count() <= 2 {
self.special.set_max();
return Ok(());
}
// Collect all our start states into a convenient set and confirm there
// is no overlap with match states. In the classicl DFA construction,
// start states can be match states. But because of look-around, we
// delay all matches by a byte, which prevents start states from being
// match states.
let mut is_start: BTreeSet<StateID> = BTreeSet::new();
for (start_id, _, _) in self.starts() {
// While there's nothing theoretically wrong with setting a start
// state to a dead ID (indeed, it could be an optimization!), the
// shuffling code below assumes that start states aren't dead. If
// this assumption is violated, the dead state could be shuffled
// to a new location, which must never happen. So if we do want
// to allow start states to be dead, then this assert should be
// removed and the code below fixed.
//
// N.B. Minimization can cause start states to be dead, but that
// happens after states are shuffled, so it's OK. Also, start
// states are dead for the DFA that never matches anything, but
// in that case, there are no states to shuffle.
assert_ne!(start_id, DEAD, "start state cannot be dead");
assert!(
!matches.contains_key(&start_id),
"{:?} is both a start and a match state, which is not allowed",
start_id,
);
is_start.insert(start_id);
}
// We implement shuffling by a sequence of pairwise swaps of states.
// Since we have a number of things referencing states via their
// IDs and swapping them changes their IDs, we need to record every
// swap we make so that we can remap IDs. The remapper handles this
// book-keeping for us.
let mut remapper = Remapper::from_dfa(self);
// Shuffle matching states.
if matches.is_empty() {
self.special.min_match = DEAD;
self.special.max_match = DEAD;
} else {
// The determinizer guarantees that the first two states are the
// dead and quit states, respectively. We want our match states to
// come right after quit.
let mut next_id = self.from_index(2);
let mut new_matches = BTreeMap::new();
self.special.min_match = next_id;
for (id, pids) in matches {
remapper.swap(self, next_id, id);
new_matches.insert(next_id, pids);
// If we swapped a start state, then update our set.
if is_start.contains(&next_id) {
is_start.remove(&next_id);
is_start.insert(id);
}
next_id = self.tt.next_state_id(next_id);
}
matches = new_matches;
self.special.max_match = cmp::max(
self.special.min_match,
self.tt.prev_state_id(next_id),
);
}
// Shuffle starting states.
{
let mut next_id = self.from_index(2);
if self.special.matches() {
next_id = self.tt.next_state_id(self.special.max_match);
}
self.special.min_start = next_id;
for id in is_start {
remapper.swap(self, next_id, id);
next_id = self.tt.next_state_id(next_id);
}
self.special.max_start = cmp::max(
self.special.min_start,
self.tt.prev_state_id(next_id),
);
}
// Finally remap all transitions in our DFA.
remapper.remap(self);
self.set_pattern_map(&matches)?;
self.special.set_max();
self.special.validate().expect("special state ranges should validate");
self.special
.validate_state_count(self.state_count(), self.stride2())
.expect(
"special state ranges should be consistent with state count",
);
Ok(())
}
}
/// A variety of generic internal methods for accessing DFA internals.
impl<T: AsRef<[u32]>> DFA<T> {
/// Return the byte classes used by this DFA.
pub(crate) fn byte_classes(&self) -> &ByteClasses {
&self.tt.classes
}
/// Return the info about special states.
pub(crate) fn special(&self) -> &Special {
&self.special
}
/// Return the info about special states as a mutable borrow.
#[cfg(feature = "alloc")]
pub(crate) fn special_mut(&mut self) -> &mut Special {
&mut self.special
}
/// Returns an iterator over all states in this DFA.
///
/// This iterator yields a tuple for each state. The first element of the
/// tuple corresponds to a state's identifier, and the second element
/// corresponds to the state itself (comprised of its transitions).
pub(crate) fn states(&self) -> StateIter<'_, T> {
self.tt.states()
}
/// Return the total number of states in this DFA. Every DFA has at least
/// 1 state, even the empty DFA.
pub(crate) fn state_count(&self) -> usize {
self.tt.count()
}
/// Return an iterator over all pattern IDs for the given match state.
///
/// If the given state is not a match state, then this panics.
#[cfg(feature = "alloc")]
pub(crate) fn pattern_id_slice(&self, id: StateID) -> &[PatternID] {
assert!(self.is_match_state(id));
self.ms.pattern_id_slice(self.match_state_index(id))
}
/// Return the total number of pattern IDs for the given match state.
///
/// If the given state is not a match state, then this panics.
pub(crate) fn match_pattern_len(&self, id: StateID) -> usize {
assert!(self.is_match_state(id));
self.ms.pattern_len(self.match_state_index(id))
}
/// Returns the total number of patterns matched by this DFA.
pub(crate) fn pattern_count(&self) -> usize {
self.ms.patterns
}
/// Returns a map from match state ID to a list of pattern IDs that match
/// in that state.
#[cfg(feature = "alloc")]
pub(crate) fn pattern_map(&self) -> BTreeMap<StateID, Vec<PatternID>> {
self.ms.to_map(self)
}
/// Returns the ID of the quit state for this DFA.
#[cfg(feature = "alloc")]
pub(crate) fn quit_id(&self) -> StateID {
self.from_index(1)
}
/// Convert the given state identifier to the state's index. The state's
/// index corresponds to the position in which it appears in the transition
/// table. When a DFA is NOT premultiplied, then a state's identifier is
/// also its index. When a DFA is premultiplied, then a state's identifier
/// is equal to `index * alphabet_len`. This routine reverses that.
pub(crate) fn to_index(&self, id: StateID) -> usize {
self.tt.to_index(id)
}
/// Convert an index to a state (in the range 0..self.state_count()) to an
/// actual state identifier.
///
/// This is useful when using a `Vec<T>` as an efficient map keyed by state
/// to some other information (such as a remapped state ID).
#[cfg(feature = "alloc")]
pub(crate) fn from_index(&self, index: usize) -> StateID {
self.tt.from_index(index)
}
/// Return the table of state IDs for this DFA's start states.
pub(crate) fn starts(&self) -> StartStateIter<'_> {
self.st.iter()
}
/// Returns the index of the match state for the given ID. If the
/// given ID does not correspond to a match state, then this may
/// panic or produce an incorrect result.
fn match_state_index(&self, id: StateID) -> usize {
debug_assert!(self.is_match_state(id));
// This is one of the places where we rely on the fact that match
// states are contiguous in the transition table. Namely, that the
// first match state ID always corresponds to dfa.special.min_start.
// From there, since we know the stride, we can compute the overall
// index of any match state given the match state's ID.
let min = self.special().min_match.as_usize();
// CORRECTNESS: We're allowed to produce an incorrect result or panic,
// so both the subtraction and the unchecked StateID construction is
// OK.
self.to_index(StateID::new_unchecked(id.as_usize() - min))
}
/// Returns the index of the accelerator state for the given ID. If the
/// given ID does not correspond to an accelerator state, then this may
/// panic or produce an incorrect result.
fn accelerator_index(&self, id: StateID) -> usize {
let min = self.special().min_accel.as_usize();
// CORRECTNESS: We're allowed to produce an incorrect result or panic,
// so both the subtraction and the unchecked StateID construction is
// OK.
self.to_index(StateID::new_unchecked(id.as_usize() - min))
}
/// Return the accelerators for this DFA.
fn accels(&self) -> Accels<&[u32]> {
self.accels.as_ref()
}
/// Return this DFA's transition table as a slice.
fn trans(&self) -> &[StateID] {
self.tt.table()
}
}
impl<T: AsRef<[u32]>> fmt::Debug for DFA<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "dense::DFA(")?;
for state in self.states() {
fmt_state_indicator(f, self, state.id())?;
let id = if f.alternate() {
state.id().as_usize()
} else {
self.to_index(state.id())
};
write!(f, "{:06?}: ", id)?;
state.fmt(f)?;
write!(f, "\n")?;
}
writeln!(f, "")?;
for (i, (start_id, sty, pid)) in self.starts().enumerate() {
let id = if f.alternate() {
start_id.as_usize()
} else {
self.to_index(start_id)
};
if i % self.st.stride == 0 {
match pid {
None => writeln!(f, "START-GROUP(ALL)")?,
Some(pid) => {
writeln!(f, "START_GROUP(pattern: {:?})", pid)?
}
}
}
writeln!(f, " {:?} => {:06?}", sty, id)?;
}
if self.pattern_count() > 1 {
writeln!(f, "")?;
for i in 0..self.ms.count() {
let id = self.ms.match_state_id(self, i);
let id = if f.alternate() {
id.as_usize()
} else {
self.to_index(id)
};
write!(f, "MATCH({:06?}): ", id)?;
for (i, &pid) in self.ms.pattern_id_slice(i).iter().enumerate()
{
if i > 0 {
write!(f, ", ")?;
}
write!(f, "{:?}", pid)?;
}
writeln!(f, "")?;
}
}
writeln!(f, "state count: {:?}", self.state_count())?;
writeln!(f, "pattern count: {:?}", self.pattern_count())?;
writeln!(f, ")")?;
Ok(())
}
}
unsafe impl<T: AsRef<[u32]>> Automaton for DFA<T> {
#[inline]
fn is_special_state(&self, id: StateID) -> bool {
self.special.is_special_state(id)
}
#[inline]
fn is_dead_state(&self, id: StateID) -> bool {
self.special.is_dead_state(id)
}
#[inline]
fn is_quit_state(&self, id: StateID) -> bool {
self.special.is_quit_state(id)
}
#[inline]
fn is_match_state(&self, id: StateID) -> bool {
self.special.is_match_state(id)
}
#[inline]
fn is_start_state(&self, id: StateID) -> bool {
self.special.is_start_state(id)
}
#[inline]
fn is_accel_state(&self, id: StateID) -> bool {
self.special.is_accel_state(id)
}
#[inline]
fn next_state(&self, current: StateID, input: u8) -> StateID {
let input = self.byte_classes().get(input);
let o = current.as_usize() + usize::from(input);
self.trans()[o]
}
#[inline]
unsafe fn next_state_unchecked(
&self,
current: StateID,
input: u8,
) -> StateID {
let input = self.byte_classes().get_unchecked(input);
let o = current.as_usize() + usize::from(input);
*self.trans().get_unchecked(o)
}
#[inline]
fn next_eoi_state(&self, current: StateID) -> StateID {
let eoi = self.byte_classes().eoi().as_usize();
let o = current.as_usize() + eoi;
self.trans()[o]
}
#[inline]
fn pattern_count(&self) -> usize {
self.ms.patterns
}
#[inline]
fn match_count(&self, id: StateID) -> usize {
self.match_pattern_len(id)
}
#[inline]
fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID {
// This is an optimization for the very common case of a DFA with a
// single pattern. This conditional avoids a somewhat more costly path
// that finds the pattern ID from the state machine, which requires
// a bit of slicing/pointer-chasing. This optimization tends to only
// matter when matches are frequent.
if self.ms.patterns == 1 {
return PatternID::ZERO;
}
let state_index = self.match_state_index(id);
self.ms.pattern_id(state_index, match_index)
}
#[inline]
fn start_state_forward(
&self,
pattern_id: Option<PatternID>,
bytes: &[u8],
start: usize,
end: usize,
) -> StateID {
let index = Start::from_position_fwd(bytes, start, end);
self.st.start(index, pattern_id)
}
#[inline]
fn start_state_reverse(
&self,
pattern_id: Option<PatternID>,
bytes: &[u8],
start: usize,
end: usize,
) -> StateID {
let index = Start::from_position_rev(bytes, start, end);
self.st.start(index, pattern_id)
}
#[inline(always)]
fn accelerator(&self, id: StateID) -> &[u8] {
if !self.is_accel_state(id) {
return &[];
}
self.accels.needles(self.accelerator_index(id))
}
}
/// The transition table portion of a dense DFA.
///
/// The transition table is the core part of the DFA in that it describes how
/// to move from one state to another based on the input sequence observed.
#[derive(Clone)]
pub(crate) struct TransitionTable<T> {
/// A contiguous region of memory representing the transition table in
/// row-major order. The representation is dense. That is, every state
/// has precisely the same number of transitions. The maximum number of
/// transitions per state is 257 (256 for each possible byte value, plus 1
/// for the special EOI transition). If a DFA has been instructed to use
/// byte classes (the default), then the number of transitions is usually
/// substantially fewer.
///
/// In practice, T is either `Vec<u32>` or `&[u32]`.
table: T,
/// A set of equivalence classes, where a single equivalence class
/// represents a set of bytes that never discriminate between a match
/// and a non-match in the DFA. Each equivalence class corresponds to a
/// single character in this DFA's alphabet, where the maximum number of
/// characters is 257 (each possible value of a byte plus the special
/// EOI transition). Consequently, the number of equivalence classes
/// corresponds to the number of transitions for each DFA state. Note
/// though that the *space* used by each DFA state in the transition table
/// may be larger. The total space used by each DFA state is known as the
/// stride.
///
/// The only time the number of equivalence classes is fewer than 257 is if
/// the DFA's kind uses byte classes (which is the default). Equivalence
/// classes should generally only be disabled when debugging, so that
/// the transitions themselves aren't obscured. Disabling them has no
/// other benefit, since the equivalence class map is always used while
/// searching. In the vast majority of cases, the number of equivalence
/// classes is substantially smaller than 257, particularly when large
/// Unicode classes aren't used.
classes: ByteClasses,
/// The stride of each DFA state, expressed as a power-of-two exponent.
///
/// The stride of a DFA corresponds to the total amount of space used by
/// each DFA state in the transition table. This may be bigger than the
/// size of a DFA's alphabet, since the stride is always the smallest
/// power of two greater than or equal to the alphabet size.
///
/// While this wastes space, this avoids the need for integer division
/// to convert between premultiplied state IDs and their corresponding
/// indices. Instead, we can use simple bit-shifts.
///
/// See the docs for the `stride2` method for more details.
///
/// The minimum `stride2` value is `1` (corresponding to a stride of `2`)
/// while the maximum `stride2` value is `9` (corresponding to a stride of
/// `512`). The maximum is not `8` since the maximum alphabet size is `257`
/// when accounting for the special EOI transition. However, an alphabet
/// length of that size is exceptionally rare since the alphabet is shrunk
/// into equivalence classes.
stride2: usize,
}
impl<'a> TransitionTable<&'a [u32]> {
/// Deserialize a transition table starting at the beginning of `slice`.
/// Upon success, return the total number of bytes read along with the
/// transition table.
///
/// If there was a problem deserializing any part of the transition table,
/// then this returns an error. Notably, if the given slice does not have
/// the same alignment as `StateID`, then this will return an error (among
/// other possible errors).
///
/// This is guaranteed to execute in constant time.
///
/// # Safety
///
/// This routine is not safe because it does not check the valdity of the
/// transition table itself. In particular, the transition table can be
/// quite large, so checking its validity can be somewhat expensive. An
/// invalid transition table is not safe because other code may rely on the
/// transition table being correct (such as explicit bounds check elision).
/// Therefore, an invalid transition table can lead to undefined behavior.
///
/// Callers that use this function must either pass on the safety invariant
/// or guarantee that the bytes given contain a valid transition table.
/// This guarantee is upheld by the bytes written by `write_to`.
unsafe fn from_bytes_unchecked(
mut slice: &'a [u8],
) -> Result<(TransitionTable<&'a [u32]>, usize), DeserializeError> {
let slice_start = slice.as_ptr() as usize;
let (count, nr) = bytes::try_read_u32_as_usize(slice, "state count")?;
slice = &slice[nr..];
let (stride2, nr) = bytes::try_read_u32_as_usize(slice, "stride2")?;
slice = &slice[nr..];
let (classes, nr) = ByteClasses::from_bytes(slice)?;
slice = &slice[nr..];
// The alphabet length (determined by the byte class map) cannot be
// bigger than the stride (total space used by each DFA state).
if stride2 > 9 {
return Err(DeserializeError::generic(
"dense DFA has invalid stride2 (too big)",
));
}
// It also cannot be zero, since even a DFA that never matches anything
// has a non-zero number of states with at least two equivalence
// classes: one for all 256 byte values and another for the EOI
// sentinel.
if stride2 < 1 {
return Err(DeserializeError::generic(
"dense DFA has invalid stride2 (too small)",
));
}
// This is OK since 1 <= stride2 <= 9.
let stride =
1usize.checked_shl(u32::try_from(stride2).unwrap()).unwrap();
if classes.alphabet_len() > stride {
return Err(DeserializeError::generic(
"alphabet size cannot be bigger than transition table stride",
));
}
let trans_count =
bytes::shl(count, stride2, "dense table transition count")?;
let table_bytes_len = bytes::mul(
trans_count,
StateID::SIZE,
"dense table state byte count",
)?;
bytes::check_slice_len(slice, table_bytes_len, "transition table")?;
bytes::check_alignment::<StateID>(slice)?;
let table_bytes = &slice[..table_bytes_len];
slice = &slice[table_bytes_len..];
// SAFETY: Since StateID is always representable as a u32, all we need
// to do is ensure that we have the proper length and alignment. We've
// checked both above, so the cast below is safe.
//
// N.B. This is the only not-safe code in this function, so we mark
// it explicitly to call it out, even though it is technically
// superfluous.
#[allow(unused_unsafe)]
let table = unsafe {
core::slice::from_raw_parts(
table_bytes.as_ptr() as *const u32,
trans_count,
)
};
let tt = TransitionTable { table, classes, stride2 };
Ok((tt, slice.as_ptr() as usize - slice_start))
}
}
#[cfg(feature = "alloc")]
impl TransitionTable<Vec<u32>> {
/// Create a minimal transition table with just two states: a dead state
/// and a quit state. The alphabet length and stride of the transition
/// table is determined by the given set of equivalence classes.
fn minimal(classes: ByteClasses) -> TransitionTable<Vec<u32>> {
let mut tt = TransitionTable {
table: vec![],
classes,
stride2: classes.stride2(),
};
// Two states, regardless of alphabet size, can always fit into u32.
tt.add_empty_state().unwrap(); // dead state
tt.add_empty_state().unwrap(); // quit state
tt
}
/// Set a transition in this table. Both the `from` and `to` states must
/// already exist, otherwise this panics. `unit` should correspond to the
/// transition out of `from` to set to `to`.
fn set(&mut self, from: StateID, unit: alphabet::Unit, to: StateID) {
assert!(self.is_valid(from), "invalid 'from' state");
assert!(self.is_valid(to), "invalid 'to' state");
self.table[from.as_usize() + self.classes.get_by_unit(unit)] =
to.as_u32();
}
/// Add an empty state (a state where all transitions lead to a dead state)
/// and return its identifier. The identifier returned is guaranteed to
/// not point to any other existing state.
///
/// If adding a state would exhaust the state identifier space, then this
/// returns an error.
fn add_empty_state(&mut self) -> Result<StateID, Error> {
// Normally, to get a fresh state identifier, we would just
// take the index of the next state added to the transition
// table. However, we actually perform an optimization here
// that premultiplies state IDs by the stride, such that they
// point immediately at the beginning of their transitions in
// the transition table. This avoids an extra multiplication
// instruction for state lookup at search time.
//
// Premultiplied identifiers means that instead of your matching
// loop looking something like this:
//
// state = dfa.start
// for byte in haystack:
// next = dfa.transitions[state * stride + byte]
// if dfa.is_match(next):
// return true
// return false
//
// it can instead look like this:
//
// state = dfa.start
// for byte in haystack:
// next = dfa.transitions[state + byte]
// if dfa.is_match(next):
// return true
// return false
//
// In other words, we save a multiplication instruction in the
// critical path. This turns out to be a decent performance win.
// The cost of using premultiplied state ids is that they can
// require a bigger state id representation. (And they also make
// the code a bit more complex, especially during minimization and
// when reshuffling states, as one needs to convert back and forth
// between state IDs and state indices.)
//
// To do this, we simply take the index of the state into the
// entire transition table, rather than the index of the state
// itself. e.g., If the stride is 64, then the ID of the 3rd state
// is 192, not 2.
let next = self.table.len();
let id = StateID::new(next).map_err(|_| Error::too_many_states())?;
self.table.extend(iter::repeat(0).take(self.stride()));
Ok(id)
}
/// Swap the two states given in this transition table.
///
/// This routine does not do anything to check the correctness of this
/// swap. Callers must ensure that other states pointing to id1 and id2 are
/// updated appropriately.
///
/// Both id1 and id2 must point to valid states, otherwise this panics.
fn swap(&mut self, id1: StateID, id2: StateID) {
assert!(self.is_valid(id1), "invalid 'id1' state: {:?}", id1);
assert!(self.is_valid(id2), "invalid 'id2' state: {:?}", id2);
// We only need to swap the parts of the state that are used. So if the
// stride is 64, but the alphabet length is only 33, then we save a lot
// of work.
for b in 0..self.classes.alphabet_len() {
self.table.swap(id1.as_usize() + b, id2.as_usize() + b);
}
}
/// Truncate the states in this transition table to the given count.
///
/// This routine does not do anything to check the correctness of this
/// truncation. Callers must ensure that other states pointing to truncated
/// states are updated appropriately.
fn truncate(&mut self, count: usize) {
self.table.truncate(count << self.stride2);
}
/// Return a mutable representation of the state corresponding to the given
/// id. This is useful for implementing routines that manipulate DFA states
/// (e.g., swapping states).
fn state_mut(&mut self, id: StateID) -> StateMut<'_> {
let alphabet_len = self.alphabet_len();
let i = id.as_usize();
StateMut {
id,
stride2: self.stride2,
transitions: &mut self.table_mut()[i..i + alphabet_len],
}
}
}
impl<T: AsRef<[u32]>> TransitionTable<T> {
/// Writes a serialized form of this transition table to the buffer given.
/// If the buffer is too small, then an error is returned. To determine
/// how big the buffer must be, use `write_to_len`.
fn write_to<E: Endian>(
&self,
mut dst: &mut [u8],
) -> Result<usize, SerializeError> {
let nwrite = self.write_to_len();
if dst.len() < nwrite {
return Err(SerializeError::buffer_too_small("transition table"));
}
dst = &mut dst[..nwrite];
// write state count
// Unwrap is OK since number of states is guaranteed to fit in a u32.
E::write_u32(u32::try_from(self.count()).unwrap(), dst);
dst = &mut dst[size_of::<u32>()..];
// write state stride (as power of 2)
// Unwrap is OK since stride2 is guaranteed to be <= 9.
E::write_u32(u32::try_from(self.stride2).unwrap(), dst);
dst = &mut dst[size_of::<u32>()..];
// write byte class map
let n = self.classes.write_to(dst)?;
dst = &mut dst[n..];
// write actual transitions
for &sid in self.table() {
let n = bytes::write_state_id::<E>(sid, &mut dst);
dst = &mut dst[n..];
}
Ok(nwrite)
}
/// Returns the number of bytes the serialized form of this transition
/// table will use.
fn write_to_len(&self) -> usize {
size_of::<u32>() // state count
+ size_of::<u32>() // stride2
+ self.classes.write_to_len()
+ (self.table().len() * StateID::SIZE)
}
/// Validates that every state ID in this transition table is valid.
///
/// That is, every state ID can be used to correctly index a state in this
/// table.
fn validate(&self) -> Result<(), DeserializeError> {
for state in self.states() {
for (_, to) in state.transitions() {
if !self.is_valid(to) {
return Err(DeserializeError::generic(
"found invalid state ID in transition table",
));
}
}
}
Ok(())
}
/// Converts this transition table to a borrowed value.
fn as_ref(&self) -> TransitionTable<&'_ [u32]> {
TransitionTable {
table: self.table.as_ref(),
classes: self.classes.clone(),
stride2: self.stride2,
}
}
/// Converts this transition table to an owned value.
#[cfg(feature = "alloc")]
fn to_owned(&self) -> TransitionTable<Vec<u32>> {
TransitionTable {
table: self.table.as_ref().to_vec(),
classes: self.classes.clone(),
stride2: self.stride2,
}
}
/// Return the state for the given ID. If the given ID is not valid, then
/// this panics.
fn state(&self, id: StateID) -> State<'_> {
assert!(self.is_valid(id));
let i = id.as_usize();
State {
id,
stride2: self.stride2,
transitions: &self.table()[i..i + self.alphabet_len()],
}
}
/// Returns an iterator over all states in this transition table.
///
/// This iterator yields a tuple for each state. The first element of the
/// tuple corresponds to a state's identifier, and the second element
/// corresponds to the state itself (comprised of its transitions).
fn states(&self) -> StateIter<'_, T> {
StateIter {
tt: self,
it: self.table().chunks(self.stride()).enumerate(),
}
}
/// Convert a state identifier to an index to a state (in the range
/// 0..self.count()).
///
/// This is useful when using a `Vec<T>` as an efficient map keyed by state
/// to some other information (such as a remapped state ID).
///
/// If the given ID is not valid, then this may panic or produce an
/// incorrect index.
fn to_index(&self, id: StateID) -> usize {
id.as_usize() >> self.stride2
}
/// Convert an index to a state (in the range 0..self.count()) to an actual
/// state identifier.
///
/// This is useful when using a `Vec<T>` as an efficient map keyed by state
/// to some other information (such as a remapped state ID).
///
/// If the given index is not in the specified range, then this may panic
/// or produce an incorrect state ID.
fn from_index(&self, index: usize) -> StateID {
// CORRECTNESS: If the given index is not valid, then it is not
// required for this to panic or return a valid state ID.
StateID::new_unchecked(index << self.stride2)
}
/// Returns the state ID for the state immediately following the one given.
///
/// This does not check whether the state ID returned is invalid. In fact,
/// if the state ID given is the last state in this DFA, then the state ID
/// returned is guaranteed to be invalid.
#[cfg(feature = "alloc")]
fn next_state_id(&self, id: StateID) -> StateID {
self.from_index(self.to_index(id).checked_add(1).unwrap())
}
/// Returns the state ID for the state immediately preceding the one given.
///
/// If the dead ID given (which is zero), then this panics.
#[cfg(feature = "alloc")]
fn prev_state_id(&self, id: StateID) -> StateID {
self.from_index(self.to_index(id).checked_sub(1).unwrap())
}
/// Returns the table as a slice of state IDs.
fn table(&self) -> &[StateID] {
let integers = self.table.as_ref();
// SAFETY: This is safe because StateID is guaranteed to be
// representable as a u32.
unsafe {
core::slice::from_raw_parts(
integers.as_ptr() as *const StateID,
integers.len(),
)
}
}
/// Returns the total number of states in this transition table.
///
/// Note that a DFA always has at least two states: the dead and quit
/// states. In particular, the dead state always has ID 0 and is
/// correspondingly always the first state. The dead state is never a match
/// state.
fn count(&self) -> usize {
self.table().len() >> self.stride2
}
/// Returns the total stride for every state in this DFA. This corresponds
/// to the total number of transitions used by each state in this DFA's
/// transition table.
fn stride(&self) -> usize {
1 << self.stride2
}
/// Returns the total number of elements in the alphabet for this
/// transition table. This is always less than or equal to `self.stride()`.
/// It is only equal when the alphabet length is a power of 2. Otherwise,
/// it is always strictly less.
fn alphabet_len(&self) -> usize {
self.classes.alphabet_len()
}
/// Returns true if and only if the given state ID is valid for this
/// transition table. Validity in this context means that the given ID can
/// be used as a valid offset with `self.stride()` to index this transition
/// table.
fn is_valid(&self, id: StateID) -> bool {
let id = id.as_usize();
id < self.table().len() && id % self.stride() == 0
}
/// Return the memory usage, in bytes, of this transition table.
///
/// This does not include the size of a `TransitionTable` value itself.
fn memory_usage(&self) -> usize {
self.table().len() * StateID::SIZE
}
}
#[cfg(feature = "alloc")]
impl<T: AsMut<[u32]>> TransitionTable<T> {
/// Returns the table as a slice of state IDs.
fn table_mut(&mut self) -> &mut [StateID] {
let integers = self.table.as_mut();
// SAFETY: This is safe because StateID is guaranteed to be
// representable as a u32.
unsafe {
core::slice::from_raw_parts_mut(
integers.as_mut_ptr() as *mut StateID,
integers.len(),
)
}
}
}
/// The set of all possible starting states in a DFA.
///
/// The set of starting states corresponds to the possible choices one can make
/// in terms of starting a DFA. That is, before following the first transition,
/// you first need to select the state that you start in.
///
/// Normally, a DFA converted from an NFA that has a single starting state
/// would itself just have one starting state. However, our support for look
/// around generally requires more starting states. The correct starting state
/// is chosen based on certain properties of the position at which we begin
/// our search.
///
/// Before listing those properties, we first must define two terms:
///
/// * `haystack` - The bytes to execute the search. The search always starts
/// at the beginning of `haystack` and ends before or at the end of
/// `haystack`.
/// * `context` - The (possibly empty) bytes surrounding `haystack`. `haystack`
/// must be contained within `context` such that `context` is at least as big
/// as `haystack`.
///
/// This split is crucial for dealing with look-around. For example, consider
/// the context `foobarbaz`, the haystack `bar` and the regex `^bar$`. This
/// regex should _not_ match the haystack since `bar` does not appear at the
/// beginning of the input. Similarly, the regex `\Bbar\B` should match the
/// haystack because `bar` is not surrounded by word boundaries. But a search
/// that does not take context into account would not permit `\B` to match
/// since the beginning of any string matches a word boundary. Similarly, a
/// search that does not take context into account when searching `^bar$` in
/// the haystack `bar` would produce a match when it shouldn't.
///
/// Thus, it follows that the starting state is chosen based on the following
/// criteria, derived from the position at which the search starts in the
/// `context` (corresponding to the start of `haystack`):
///
/// 1. If the search starts at the beginning of `context`, then the `Text`
/// start state is used. (Since `^` corresponds to
/// `hir::Anchor::StartText`.)
/// 2. If the search starts at a position immediately following a line
/// terminator, then the `Line` start state is used. (Since `(?m:^)`
/// corresponds to `hir::Anchor::StartLine`.)
/// 3. If the search starts at a position immediately following a byte
/// classified as a "word" character (`[_0-9a-zA-Z]`), then the `WordByte`
/// start state is used. (Since `(?-u:\b)` corresponds to a word boundary.)
/// 4. Otherwise, if the search starts at a position immediately following
/// a byte that is not classified as a "word" character (`[^_0-9a-zA-Z]`),
/// then the `NonWordByte` start state is used. (Since `(?-u:\B)`
/// corresponds to a not-word-boundary.)
///
/// (N.B. Unicode word boundaries are not supported by the DFA because they
/// require multi-byte look-around and this is difficult to support in a DFA.)
///
/// To further complicate things, we also support constructing individual
/// anchored start states for each pattern in the DFA. (Which is required to
/// implement overlapping regexes correctly, but is also generally useful.)
/// Thus, when individual start states for each pattern are enabled, then the
/// total number of start states represented is `4 + (4 * #patterns)`, where
/// the 4 comes from each of the 4 possibilities above. The first 4 represents
/// the starting states for the entire DFA, which support searching for
/// multiple patterns simultaneously (possibly unanchored).
///
/// If individual start states are disabled, then this will only store 4
/// start states. Typically, individual start states are only enabled when
/// constructing the reverse DFA for regex matching. But they are also useful
/// for building DFAs that can search for a specific pattern or even to support
/// both anchored and unanchored searches with the same DFA.
///
/// Note though that while the start table always has either `4` or
/// `4 + (4 * #patterns)` starting state *ids*, the total number of states
/// might be considerably smaller. That is, many of the IDs may be duplicative.
/// (For example, if a regex doesn't have a `\b` sub-pattern, then there's no
/// reason to generate a unique starting state for handling word boundaries.
/// Similarly for start/end anchors.)
#[derive(Clone)]
pub(crate) struct StartTable<T> {
/// The initial start state IDs.
///
/// In practice, T is either `Vec<u32>` or `&[u32]`.
///
/// The first `stride` (currently always 4) entries always correspond to
/// the start states for the entire DFA. After that, there are
/// `stride * patterns` state IDs, where `patterns` may be zero in the
/// case of a DFA with no patterns or in the case where the DFA was built
/// without enabling starting states for each pattern.
table: T,
/// The number of starting state IDs per pattern.
stride: usize,
/// The total number of patterns for which starting states are encoded.
/// This may be zero for non-empty DFAs when the DFA was built without
/// start states for each pattern. Thus, one cannot use this field to
/// say how many patterns are in the DFA in all cases. It is specific to
/// how many patterns are represented in this start table.
patterns: usize,
}
#[cfg(feature = "alloc")]
impl StartTable<Vec<u32>> {
/// Create a valid set of start states all pointing to the dead state.
///
/// When the corresponding DFA is constructed with start states for each
/// pattern, then `patterns` should be the number of patterns. Otherwise,
/// it should be zero.
///
/// If the total table size could exceed the allocatable limit, then this
/// returns an error. In practice, this is unlikely to be able to occur,
/// since it's likely that allocation would have failed long before it got
/// to this point.
fn dead(patterns: usize) -> Result<StartTable<Vec<u32>>, Error> {
assert!(patterns <= PatternID::LIMIT);
let stride = Start::count();
let pattern_starts_len = match stride.checked_mul(patterns) {
Some(x) => x,
None => return Err(Error::too_many_start_states()),
};
let table_len = match stride.checked_add(pattern_starts_len) {
Some(x) => x,
None => return Err(Error::too_many_start_states()),
};
if table_len > core::isize::MAX as usize {
return Err(Error::too_many_start_states());
}
let table = vec![DEAD.as_u32(); table_len];
Ok(StartTable { table, stride, patterns })
}
}
impl<'a> StartTable<&'a [u32]> {
/// Deserialize a table of start state IDs starting at the beginning of
/// `slice`. Upon success, return the total number of bytes read along with
/// the table of starting state IDs.
///
/// If there was a problem deserializing any part of the starting IDs,
/// then this returns an error. Notably, if the given slice does not have
/// the same alignment as `StateID`, then this will return an error (among
/// other possible errors).
///
/// This is guaranteed to execute in constant time.
///
/// # Safety
///
/// This routine is not safe because it does not check the valdity of the
/// starting state IDs themselves. In particular, the number of starting
/// IDs can be of variable length, so it's possible that checking their
/// validity cannot be done in constant time. An invalid starting state
/// ID is not safe because other code may rely on the starting IDs being
/// correct (such as explicit bounds check elision). Therefore, an invalid
/// start ID can lead to undefined behavior.
///
/// Callers that use this function must either pass on the safety invariant
/// or guarantee that the bytes given contain valid starting state IDs.
/// This guarantee is upheld by the bytes written by `write_to`.
unsafe fn from_bytes_unchecked(
mut slice: &'a [u8],
) -> Result<(StartTable<&'a [u32]>, usize), DeserializeError> {
let slice_start = slice.as_ptr() as usize;
let (stride, nr) =
bytes::try_read_u32_as_usize(slice, "start table stride")?;
slice = &slice[nr..];
let (patterns, nr) =
bytes::try_read_u32_as_usize(slice, "start table patterns")?;
slice = &slice[nr..];
if stride != Start::count() {
return Err(DeserializeError::generic(
"invalid starting table stride",
));
}
if patterns > PatternID::LIMIT {
return Err(DeserializeError::generic(
"invalid number of patterns",
));
}
let pattern_table_size =
bytes::mul(stride, patterns, "invalid pattern count")?;
// Our start states always start with a single stride of start states
// for the entire automaton which permit it to match any pattern. What
// follows it are an optional set of start states for each pattern.
let start_state_count = bytes::add(
stride,
pattern_table_size,
"invalid 'any' pattern starts size",
)?;
let table_bytes_len = bytes::mul(
start_state_count,
StateID::SIZE,
"pattern table bytes length",
)?;
bytes::check_slice_len(slice, table_bytes_len, "start ID table")?;
bytes::check_alignment::<StateID>(slice)?;
let table_bytes = &slice[..table_bytes_len];
slice = &slice[table_bytes_len..];
// SAFETY: Since StateID is always representable as a u32, all we need
// to do is ensure that we have the proper length and alignment. We've
// checked both above, so the cast below is safe.
//
// N.B. This is the only not-safe code in this function, so we mark
// it explicitly to call it out, even though it is technically
// superfluous.
#[allow(unused_unsafe)]
let table = unsafe {
core::slice::from_raw_parts(
table_bytes.as_ptr() as *const u32,
start_state_count,
)
};
let st = StartTable { table, stride, patterns };
Ok((st, slice.as_ptr() as usize - slice_start))
}
}
impl<T: AsRef<[u32]>> StartTable<T> {
/// Writes a serialized form of this start table to the buffer given. If
/// the buffer is too small, then an error is returned. To determine how
/// big the buffer must be, use `write_to_len`.
fn write_to<E: Endian>(
&self,
mut dst: &mut [u8],
) -> Result<usize, SerializeError> {
let nwrite = self.write_to_len();
if dst.len() < nwrite {
return Err(SerializeError::buffer_too_small(
"starting table ids",
));
}
dst = &mut dst[..nwrite];
// write stride
// Unwrap is OK since the stride is always 4 (currently).
E::write_u32(u32::try_from(self.stride).unwrap(), dst);
dst = &mut dst[size_of::<u32>()..];
// write pattern count
// Unwrap is OK since number of patterns is guaranteed to fit in a u32.
E::write_u32(u32::try_from(self.patterns).unwrap(), dst);
dst = &mut dst[size_of::<u32>()..];
// write start IDs
for &sid in self.table() {
let n = bytes::write_state_id::<E>(sid, &mut dst);
dst = &mut dst[n..];
}
Ok(nwrite)
}
/// Returns the number of bytes the serialized form of this start ID table
/// will use.
fn write_to_len(&self) -> usize {
size_of::<u32>() // stride
+ size_of::<u32>() // # patterns
+ (self.table().len() * StateID::SIZE)
}
/// Validates that every state ID in this start table is valid by checking
/// it against the given transition table (which must be for the same DFA).
///
/// That is, every state ID can be used to correctly index a state.
fn validate(
&self,
tt: &TransitionTable<T>,
) -> Result<(), DeserializeError> {
for &id in self.table() {
if !tt.is_valid(id) {
return Err(DeserializeError::generic(
"found invalid starting state ID",
));
}
}
Ok(())
}
/// Converts this start list to a borrowed value.
fn as_ref(&self) -> StartTable<&'_ [u32]> {
StartTable {
table: self.table.as_ref(),
stride: self.stride,
patterns: self.patterns,
}
}
/// Converts this start list to an owned value.
#[cfg(feature = "alloc")]
fn to_owned(&self) -> StartTable<Vec<u32>> {
StartTable {
table: self.table.as_ref().to_vec(),
stride: self.stride,
patterns: self.patterns,
}
}
/// Return the start state for the given start index and pattern ID. If the
/// pattern ID is None, then the corresponding start state for the entire
/// DFA is returned. If the pattern ID is not None, then the corresponding
/// starting state for the given pattern is returned. If this start table
/// does not have individual starting states for each pattern, then this
/// panics.
fn start(&self, index: Start, pattern_id: Option<PatternID>) -> StateID {
let start_index = index.as_usize();
let index = match pattern_id {
None => start_index,
Some(pid) => {
let pid = pid.as_usize();
assert!(pid < self.patterns, "invalid pattern ID {:?}", pid);
self.stride + (self.stride * pid) + start_index
}
};
self.table()[index]
}
/// Returns an iterator over all start state IDs in this table.
///
/// Each item is a triple of: start state ID, the start state type and the
/// pattern ID (if any).
fn iter(&self) -> StartStateIter<'_> {
StartStateIter { st: self.as_ref(), i: 0 }
}
/// Returns the table as a slice of state IDs.
fn table(&self) -> &[StateID] {
let integers = self.table.as_ref();
// SAFETY: This is safe because StateID is guaranteed to be
// representable as a u32.
unsafe {
core::slice::from_raw_parts(
integers.as_ptr() as *const StateID,
integers.len(),
)
}
}
/// Return the memory usage, in bytes, of this start list.
///
/// This does not include the size of a `StartList` value itself.
fn memory_usage(&self) -> usize {
self.table().len() * StateID::SIZE
}
}
#[cfg(feature = "alloc")]
impl<T: AsMut<[u32]>> StartTable<T> {
/// Set the start state for the given index and pattern.
///
/// If the pattern ID or state ID are not valid, then this will panic.
fn set_start(
&mut self,
index: Start,
pattern_id: Option<PatternID>,
id: StateID,
) {
let start_index = index.as_usize();
let index = match pattern_id {
None => start_index,
Some(pid) => self
.stride
.checked_mul(pid.as_usize())
.unwrap()
.checked_add(self.stride)
.unwrap()
.checked_add(start_index)
.unwrap(),
};
self.table_mut()[index] = id;
}
/// Returns the table as a mutable slice of state IDs.
fn table_mut(&mut self) -> &mut [StateID] {
let integers = self.table.as_mut();
// SAFETY: This is safe because StateID is guaranteed to be
// representable as a u32.
unsafe {
core::slice::from_raw_parts_mut(
integers.as_mut_ptr() as *mut StateID,
integers.len(),
)
}
}
}
/// An iterator over start state IDs.
///
/// This iterator yields a triple of start state ID, the start state type
/// and the pattern ID (if any). The pattern ID is None for start states
/// corresponding to the entire DFA and non-None for start states corresponding
/// to a specific pattern. The latter only occurs when the DFA is compiled with
/// start states for each pattern.
pub(crate) struct StartStateIter<'a> {
st: StartTable<&'a [u32]>,
i: usize,
}
impl<'a> Iterator for StartStateIter<'a> {
type Item = (StateID, Start, Option<PatternID>);
fn next(&mut self) -> Option<(StateID, Start, Option<PatternID>)> {
let i = self.i;
let table = self.st.table();
if i >= table.len() {
return None;
}
self.i += 1;
// This unwrap is okay since the stride of the starting state table
// must always match the number of start state types.
let start_type = Start::from_usize(i % self.st.stride).unwrap();
let pid = if i < self.st.stride {
None
} else {
Some(
PatternID::new((i - self.st.stride) / self.st.stride).unwrap(),
)
};
Some((table[i], start_type, pid))
}
}
/// This type represents that patterns that should be reported whenever a DFA
/// enters a match state. This structure exists to support DFAs that search for
/// matches for multiple regexes.
///
/// This structure relies on the fact that all match states in a DFA occur
/// contiguously in the DFA's transition table. (See dfa/special.rs for a more
/// detailed breakdown of the representation.) Namely, when a match occurs, we
/// know its state ID. Since we know the start and end of the contiguous region
/// of match states, we can use that to compute the position at which the match
/// state occurs. That in turn is used as an offset into this structure.
#[derive(Clone, Debug)]
struct MatchStates<T> {
/// Slices is a flattened sequence of pairs, where each pair points to a
/// sub-slice of pattern_ids. The first element of the pair is an offset
/// into pattern_ids and the second element of the pair is the number
/// of 32-bit pattern IDs starting at that position. That is, each pair
/// corresponds to a single DFA match state and its corresponding match
/// IDs. The number of pairs always corresponds to the number of distinct
/// DFA match states.
///
/// In practice, T is either Vec<u32> or &[u32].
slices: T,
/// A flattened sequence of pattern IDs for each DFA match state. The only
/// way to correctly read this sequence is indirectly via `slices`.
///
/// In practice, T is either Vec<u32> or &[u32].
pattern_ids: T,
/// The total number of unique patterns represented by these match states.
patterns: usize,
}
impl<'a> MatchStates<&'a [u32]> {
unsafe fn from_bytes_unchecked(
mut slice: &'a [u8],
) -> Result<(MatchStates<&'a [u32]>, usize), DeserializeError> {
let slice_start = slice.as_ptr() as usize;
// Read the total number of match states.
let (count, nr) =
bytes::try_read_u32_as_usize(slice, "match state count")?;
slice = &slice[nr..];
// Read the slice start/length pairs.
let pair_count = bytes::mul(2, count, "match state offset pairs")?;
let slices_bytes_len = bytes::mul(
pair_count,
PatternID::SIZE,
"match state slice offset byte length",
)?;
bytes::check_slice_len(slice, slices_bytes_len, "match state slices")?;
bytes::check_alignment::<PatternID>(slice)?;
let slices_bytes = &slice[..slices_bytes_len];
slice = &slice[slices_bytes_len..];
// SAFETY: Since PatternID is always representable as a u32, all we
// need to do is ensure that we have the proper length and alignment.
// We've checked both above, so the cast below is safe.
//
// N.B. This is one of the few not-safe snippets in this function, so
// we mark it explicitly to call it out, even though it is technically
// superfluous.
#[allow(unused_unsafe)]
let slices = unsafe {
core::slice::from_raw_parts(
slices_bytes.as_ptr() as *const u32,
pair_count,
)
};
// Read the total number of unique pattern IDs (which is always 1 more
// than the maximum pattern ID in this automaton, since pattern IDs are
// handed out contiguously starting at 0).
let (patterns, nr) =
bytes::try_read_u32_as_usize(slice, "pattern count")?;
slice = &slice[nr..];
// Now read the pattern ID count. We don't need to store this
// explicitly, but we need it to know how many pattern IDs to read.
let (idcount, nr) =
bytes::try_read_u32_as_usize(slice, "pattern ID count")?;
slice = &slice[nr..];
// Read the actual pattern IDs.
let pattern_ids_len =
bytes::mul(idcount, PatternID::SIZE, "pattern ID byte length")?;
bytes::check_slice_len(slice, pattern_ids_len, "match pattern IDs")?;
bytes::check_alignment::<PatternID>(slice)?;
let pattern_ids_bytes = &slice[..pattern_ids_len];
slice = &slice[pattern_ids_len..];
// SAFETY: Since PatternID is always representable as a u32, all we
// need to do is ensure that we have the proper length and alignment.
// We've checked both above, so the cast below is safe.
//
// N.B. This is one of the few not-safe snippets in this function, so
// we mark it explicitly to call it out, even though it is technically
// superfluous.
#[allow(unused_unsafe)]
let pattern_ids = unsafe {
core::slice::from_raw_parts(
pattern_ids_bytes.as_ptr() as *const u32,
idcount,
)
};
let ms = MatchStates { slices, pattern_ids, patterns };
Ok((ms, slice.as_ptr() as usize - slice_start))
}
}
#[cfg(feature = "alloc")]
impl MatchStates<Vec<u32>> {
fn empty(pattern_count: usize) -> MatchStates<Vec<u32>> {
assert!(pattern_count <= PatternID::LIMIT);
MatchStates {
slices: vec![],
pattern_ids: vec![],
patterns: pattern_count,
}
}
fn new(
matches: &BTreeMap<StateID, Vec<PatternID>>,
pattern_count: usize,
) -> Result<MatchStates<Vec<u32>>, Error> {
let mut m = MatchStates::empty(pattern_count);
for (_, pids) in matches.iter() {
let start = PatternID::new(m.pattern_ids.len())
.map_err(|_| Error::too_many_match_pattern_ids())?;
m.slices.push(start.as_u32());
// This is always correct since the number of patterns in a single
// match state can never exceed maximum number of allowable
// patterns. Why? Because a pattern can only appear once in a
// particular match state, by construction. (And since our pattern
// ID limit is one less than u32::MAX, we're guaranteed that the
// length fits in a u32.)
m.slices.push(u32::try_from(pids.len()).unwrap());
for &pid in pids {
m.pattern_ids.push(pid.as_u32());
}
}
m.patterns = pattern_count;
Ok(m)
}
fn new_with_map(
&self,
matches: &BTreeMap<StateID, Vec<PatternID>>,
) -> Result<MatchStates<Vec<u32>>, Error> {
MatchStates::new(matches, self.patterns)
}
}
impl<T: AsRef<[u32]>> MatchStates<T> {
/// Writes a serialized form of these match states to the buffer given. If
/// the buffer is too small, then an error is returned. To determine how
/// big the buffer must be, use `write_to_len`.
fn write_to<E: Endian>(
&self,
mut dst: &mut [u8],
) -> Result<usize, SerializeError> {
let nwrite = self.write_to_len();
if dst.len() < nwrite {
return Err(SerializeError::buffer_too_small("match states"));
}
dst = &mut dst[..nwrite];
// write state ID count
// Unwrap is OK since number of states is guaranteed to fit in a u32.
E::write_u32(u32::try_from(self.count()).unwrap(), dst);
dst = &mut dst[size_of::<u32>()..];
// write slice offset pairs
for &pid in self.slices() {
let n = bytes::write_pattern_id::<E>(pid, &mut dst);
dst = &mut dst[n..];
}
// write unique pattern ID count
// Unwrap is OK since number of patterns is guaranteed to fit in a u32.
E::write_u32(u32::try_from(self.patterns).unwrap(), dst);
dst = &mut dst[size_of::<u32>()..];
// write pattern ID count
// Unwrap is OK since we check at construction (and deserialization)
// that the number of patterns is representable as a u32.
E::write_u32(u32::try_from(self.pattern_ids().len()).unwrap(), dst);
dst = &mut dst[size_of::<u32>()..];
// write pattern IDs
for &pid in self.pattern_ids() {
let n = bytes::write_pattern_id::<E>(pid, &mut dst);
dst = &mut dst[n..];
}
Ok(nwrite)
}
/// Returns the number of bytes the serialized form of this transition
/// table will use.
fn write_to_len(&self) -> usize {
size_of::<u32>() // match state count
+ (self.slices().len() * PatternID::SIZE)
+ size_of::<u32>() // unique pattern ID count
+ size_of::<u32>() // pattern ID count
+ (self.pattern_ids().len() * PatternID::SIZE)
}
/// Valides that the match state info is itself internally consistent and
/// consistent with the recorded match state region in the given DFA.
fn validate(&self, dfa: &DFA<T>) -> Result<(), DeserializeError> {
if self.count() != dfa.special.match_len(dfa.stride()) {
return Err(DeserializeError::generic(
"match state count mismatch",
));
}
for si in 0..self.count() {
let start = self.slices()[si * 2].as_usize();
let len = self.slices()[si * 2 + 1].as_usize();
if start >= self.pattern_ids().len() {
return Err(DeserializeError::generic(
"invalid pattern ID start offset",
));
}
if start + len > self.pattern_ids().len() {
return Err(DeserializeError::generic(
"invalid pattern ID length",
));
}
for mi in 0..len {
let pid = self.pattern_id(si, mi);
if pid.as_usize() >= self.patterns {
return Err(DeserializeError::generic(
"invalid pattern ID",
));
}
}
}
Ok(())
}
/// Converts these match states back into their map form. This is useful
/// when shuffling states, as the normal MatchStates representation is not
/// amenable to easy state swapping. But with this map, to swap id1 and
/// id2, all you need to do is:
///
/// if let Some(pids) = map.remove(&id1) {
/// map.insert(id2, pids);
/// }
///
/// Once shuffling is done, use MatchStates::new to convert back.
#[cfg(feature = "alloc")]
fn to_map(&self, dfa: &DFA<T>) -> BTreeMap<StateID, Vec<PatternID>> {
let mut map = BTreeMap::new();
for i in 0..self.count() {
let mut pids = vec![];
for j in 0..self.pattern_len(i) {
pids.push(self.pattern_id(i, j));
}
map.insert(self.match_state_id(dfa, i), pids);
}
map
}
/// Converts these match states to a borrowed value.
fn as_ref(&self) -> MatchStates<&'_ [u32]> {
MatchStates {
slices: self.slices.as_ref(),
pattern_ids: self.pattern_ids.as_ref(),
patterns: self.patterns,
}
}
/// Converts these match states to an owned value.
#[cfg(feature = "alloc")]
fn to_owned(&self) -> MatchStates<Vec<u32>> {
MatchStates {
slices: self.slices.as_ref().to_vec(),
pattern_ids: self.pattern_ids.as_ref().to_vec(),
patterns: self.patterns,
}
}
/// Returns the match state ID given the match state index. (Where the
/// first match state corresponds to index 0.)
///
/// This panics if there is no match state at the given index.
fn match_state_id(&self, dfa: &DFA<T>, index: usize) -> StateID {
assert!(dfa.special.matches(), "no match states to index");
// This is one of the places where we rely on the fact that match
// states are contiguous in the transition table. Namely, that the
// first match state ID always corresponds to dfa.special.min_start.
// From there, since we know the stride, we can compute the ID of any
// match state given its index.
let stride2 = u32::try_from(dfa.stride2()).unwrap();
let offset = index.checked_shl(stride2).unwrap();
let id = dfa.special.min_match.as_usize().checked_add(offset).unwrap();
let sid = StateID::new(id).unwrap();
assert!(dfa.is_match_state(sid));
sid
}
/// Returns the pattern ID at the given match index for the given match
/// state.
///
/// The match state index is the state index minus the state index of the
/// first match state in the DFA.
///
/// The match index is the index of the pattern ID for the given state.
/// The index must be less than `self.pattern_len(state_index)`.
fn pattern_id(&self, state_index: usize, match_index: usize) -> PatternID {
self.pattern_id_slice(state_index)[match_index]
}
/// Returns the number of patterns in the given match state.
///
/// The match state index is the state index minus the state index of the
/// first match state in the DFA.
fn pattern_len(&self, state_index: usize) -> usize {
self.slices()[state_index * 2 + 1].as_usize()
}
/// Returns all of the pattern IDs for the given match state index.
///
/// The match state index is the state index minus the state index of the
/// first match state in the DFA.
fn pattern_id_slice(&self, state_index: usize) -> &[PatternID] {
let start = self.slices()[state_index * 2].as_usize();
let len = self.pattern_len(state_index);
&self.pattern_ids()[start..start + len]
}
/// Returns the pattern ID offset slice of u32 as a slice of PatternID.
fn slices(&self) -> &[PatternID] {
let integers = self.slices.as_ref();
// SAFETY: This is safe because PatternID is guaranteed to be
// representable as a u32.
unsafe {
core::slice::from_raw_parts(
integers.as_ptr() as *const PatternID,
integers.len(),
)
}
}
/// Returns the total number of match states.
fn count(&self) -> usize {
assert_eq!(0, self.slices().len() % 2);
self.slices().len() / 2
}
/// Returns the pattern ID slice of u32 as a slice of PatternID.
fn pattern_ids(&self) -> &[PatternID] {
let integers = self.pattern_ids.as_ref();
// SAFETY: This is safe because PatternID is guaranteed to be
// representable as a u32.
unsafe {
core::slice::from_raw_parts(
integers.as_ptr() as *const PatternID,
integers.len(),
)
}
}
/// Return the memory usage, in bytes, of these match pairs.
fn memory_usage(&self) -> usize {
(self.slices().len() + self.pattern_ids().len()) * PatternID::SIZE
}
}
/// An iterator over all states in a DFA.
///
/// This iterator yields a tuple for each state. The first element of the
/// tuple corresponds to a state's identifier, and the second element
/// corresponds to the state itself (comprised of its transitions).
///
/// `'a` corresponding to the lifetime of original DFA, `T` corresponds to
/// the type of the transition table itself.
pub(crate) struct StateIter<'a, T> {
tt: &'a TransitionTable<T>,
it: iter::Enumerate<slice::Chunks<'a, StateID>>,
}
impl<'a, T: AsRef<[u32]>> Iterator for StateIter<'a, T> {
type Item = State<'a>;
fn next(&mut self) -> Option<State<'a>> {
self.it.next().map(|(index, _)| {
let id = self.tt.from_index(index);
self.tt.state(id)
})
}
}
/// An immutable representation of a single DFA state.
///
/// `'a` correspondings to the lifetime of a DFA's transition table.
pub(crate) struct State<'a> {
id: StateID,
stride2: usize,
transitions: &'a [StateID],
}
impl<'a> State<'a> {
/// Return an iterator over all transitions in this state. This yields
/// a number of transitions equivalent to the alphabet length of the
/// corresponding DFA.
///
/// Each transition is represented by a tuple. The first element is
/// the input byte for that transition and the second element is the
/// transitions itself.
pub(crate) fn transitions(&self) -> StateTransitionIter<'_> {
StateTransitionIter {
len: self.transitions.len(),
it: self.transitions.iter().enumerate(),
}
}
/// Return an iterator over a sparse representation of the transitions in
/// this state. Only non-dead transitions are returned.
///
/// The "sparse" representation in this case corresponds to a sequence of
/// triples. The first two elements of the triple comprise an inclusive
/// byte range while the last element corresponds to the transition taken
/// for all bytes in the range.
///
/// This is somewhat more condensed than the classical sparse
/// representation (where you have an element for every non-dead
/// transition), but in practice, checking if a byte is in a range is very
/// cheap and using ranges tends to conserve quite a bit more space.
pub(crate) fn sparse_transitions(&self) -> StateSparseTransitionIter<'_> {
StateSparseTransitionIter { dense: self.transitions(), cur: None }
}
/// Returns the identifier for this state.
pub(crate) fn id(&self) -> StateID {
self.id
}
/// Analyzes this state to determine whether it can be accelerated. If so,
/// it returns an accelerator that contains at least one byte.
#[cfg(feature = "alloc")]
fn accelerate(&self, classes: &ByteClasses) -> Option<Accel> {
// We just try to add bytes to our accelerator. Once adding fails
// (because we've added too many bytes), then give up.
let mut accel = Accel::new();
for (class, id) in self.transitions() {
if id == self.id() {
continue;
}
for unit in classes.elements(class) {
if let Some(byte) = unit.as_u8() {
if !accel.add(byte) {
return None;
}
}
}
}
if accel.is_empty() {
None
} else {
Some(accel)
}
}
}
impl<'a> fmt::Debug for State<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, (start, end, id)) in self.sparse_transitions().enumerate() {
let index = if f.alternate() {
id.as_usize()
} else {
id.as_usize() >> self.stride2
};
if i > 0 {
write!(f, ", ")?;
}
if start == end {
write!(f, "{:?} => {:?}", start, index)?;
} else {
write!(f, "{:?}-{:?} => {:?}", start, end, index)?;
}
}
Ok(())
}
}
/// A mutable representation of a single DFA state.
///
/// `'a` correspondings to the lifetime of a DFA's transition table.
#[cfg(feature = "alloc")]
pub(crate) struct StateMut<'a> {
id: StateID,
stride2: usize,
transitions: &'a mut [StateID],
}
#[cfg(feature = "alloc")]
impl<'a> StateMut<'a> {
/// Return an iterator over all transitions in this state. This yields
/// a number of transitions equivalent to the alphabet length of the
/// corresponding DFA.
///
/// Each transition is represented by a tuple. The first element is the
/// input byte for that transition and the second element is a mutable
/// reference to the transition itself.
pub(crate) fn iter_mut(&mut self) -> StateTransitionIterMut<'_> {
StateTransitionIterMut {
len: self.transitions.len(),
it: self.transitions.iter_mut().enumerate(),
}
}
}
#[cfg(feature = "alloc")]
impl<'a> fmt::Debug for StateMut<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(
&State {
id: self.id,
stride2: self.stride2,
transitions: self.transitions,
},
f,
)
}
}
/// An iterator over all transitions in a single DFA state. This yields
/// a number of transitions equivalent to the alphabet length of the
/// corresponding DFA.
///
/// Each transition is represented by a tuple. The first element is the input
/// byte for that transition and the second element is the transition itself.
#[derive(Debug)]
pub(crate) struct StateTransitionIter<'a> {
len: usize,
it: iter::Enumerate<slice::Iter<'a, StateID>>,
}
impl<'a> Iterator for StateTransitionIter<'a> {
type Item = (alphabet::Unit, StateID);
fn next(&mut self) -> Option<(alphabet::Unit, StateID)> {
self.it.next().map(|(i, &id)| {
let unit = if i + 1 == self.len {
alphabet::Unit::eoi(i)
} else {
let b = u8::try_from(i)
.expect("raw byte alphabet is never exceeded");
alphabet::Unit::u8(b)
};
(unit, id)
})
}
}
/// A mutable iterator over all transitions in a DFA state.
///
/// Each transition is represented by a tuple. The first element is the
/// input byte for that transition and the second element is a mutable
/// reference to the transition itself.
#[cfg(feature = "alloc")]
#[derive(Debug)]
pub(crate) struct StateTransitionIterMut<'a> {
len: usize,
it: iter::Enumerate<slice::IterMut<'a, StateID>>,
}
#[cfg(feature = "alloc")]
impl<'a> Iterator for StateTransitionIterMut<'a> {
type Item = (alphabet::Unit, &'a mut StateID);
fn next(&mut self) -> Option<(alphabet::Unit, &'a mut StateID)> {
self.it.next().map(|(i, id)| {
let unit = if i + 1 == self.len {
alphabet::Unit::eoi(i)
} else {
let b = u8::try_from(i)
.expect("raw byte alphabet is never exceeded");
alphabet::Unit::u8(b)
};
(unit, id)
})
}
}
/// An iterator over all non-DEAD transitions in a single DFA state using a
/// sparse representation.
///
/// Each transition is represented by a triple. The first two elements of the
/// triple comprise an inclusive byte range while the last element corresponds
/// to the transition taken for all bytes in the range.
///
/// As a convenience, this always returns `alphabet::Unit` values of the same
/// type. That is, you'll never get a (byte, EOI) or a (EOI, byte). Only (byte,
/// byte) and (EOI, EOI) values are yielded.
#[derive(Debug)]
pub(crate) struct StateSparseTransitionIter<'a> {
dense: StateTransitionIter<'a>,
cur: Option<(alphabet::Unit, alphabet::Unit, StateID)>,
}
impl<'a> Iterator for StateSparseTransitionIter<'a> {
type Item = (alphabet::Unit, alphabet::Unit, StateID);
fn next(&mut self) -> Option<(alphabet::Unit, alphabet::Unit, StateID)> {
while let Some((unit, next)) = self.dense.next() {
let (prev_start, prev_end, prev_next) = match self.cur {
Some(t) => t,
None => {
self.cur = Some((unit, unit, next));
continue;
}
};
if prev_next == next && !unit.is_eoi() {
self.cur = Some((prev_start, unit, prev_next));
} else {
self.cur = Some((unit, unit, next));
if prev_next != DEAD {
return Some((prev_start, prev_end, prev_next));
}
}
}
if let Some((start, end, next)) = self.cur.take() {
if next != DEAD {
return Some((start, end, next));
}
}
None
}
}
/// An iterator over pattern IDs for a single match state.
#[derive(Debug)]
pub(crate) struct PatternIDIter<'a>(slice::Iter<'a, PatternID>);
impl<'a> Iterator for PatternIDIter<'a> {
type Item = PatternID;
fn next(&mut self) -> Option<PatternID> {
self.0.next().copied()
}
}
/// Remapper is an abstraction the manages the remapping of state IDs in a
/// dense DFA. This is useful when one wants to shuffle states into different
/// positions in the DFA.
///
/// One of the key complexities this manages is the ability to correctly move
/// one state multiple times.
///
/// Once shuffling is complete, `remap` should be called, which will rewrite
/// all pertinent transitions to updated state IDs.
#[cfg(feature = "alloc")]
#[derive(Debug)]
struct Remapper {
/// A map from the index of a state to its pre-multiplied identifier.
///
/// When a state is swapped with another, then their corresponding
/// locations in this map are also swapped. Thus, its new position will
/// still point to its old pre-multiplied StateID.
///
/// While there is a bit more to it, this then allows us to rewrite the
/// state IDs in a DFA's transition table in a single pass. This is done
/// by iterating over every ID in this map, then iterating over each
/// transition for the state at that ID and re-mapping the transition from
/// `old_id` to `map[dfa.to_index(old_id)]`. That is, we find the position
/// in this map where `old_id` *started*, and set it to where it ended up
/// after all swaps have been completed.
map: Vec<StateID>,
}
#[cfg(feature = "alloc")]
impl Remapper {
fn from_dfa(dfa: &OwnedDFA) -> Remapper {
Remapper {
map: (0..dfa.state_count()).map(|i| dfa.from_index(i)).collect(),
}
}
fn swap(&mut self, dfa: &mut OwnedDFA, id1: StateID, id2: StateID) {
dfa.swap_states(id1, id2);
self.map.swap(dfa.to_index(id1), dfa.to_index(id2));
}
fn remap(mut self, dfa: &mut OwnedDFA) {
// Update the map to account for states that have been swapped
// multiple times. For example, if (A, C) and (C, G) are swapped, then
// transitions previously pointing to A should now point to G. But if
// we don't update our map, they will erroneously be set to C. All we
// do is follow the swaps in our map until we see our original state
// ID.
let oldmap = self.map.clone();
for i in 0..dfa.state_count() {
let cur_id = dfa.from_index(i);
let mut new = oldmap[i];
if cur_id == new {
continue;
}
loop {
let id = oldmap[dfa.to_index(new)];
if cur_id == id {
self.map[i] = new;
break;
}
new = id;
}
}
// To work around the borrow checker for converting state IDs to
// indices. We cannot borrow self while mutably iterating over a
// state's transitions. Otherwise, we'd just use dfa.to_index(..).
let stride2 = dfa.stride2();
let to_index = |id: StateID| -> usize { id.as_usize() >> stride2 };
// Now that we've finished shuffling, we need to remap all of our
// transitions. We don't need to handle re-mapping accelerated states
// since `accels` is only populated after shuffling.
for &id in self.map.iter() {
for (_, next_id) in dfa.state_mut(id).iter_mut() {
*next_id = self.map[to_index(*next_id)];
}
}
for start_id in dfa.st.table_mut().iter_mut() {
*start_id = self.map[to_index(*start_id)];
}
}
}
#[cfg(all(test, feature = "alloc"))]
mod tests {
use super::*;
#[test]
fn errors_with_unicode_word_boundary() {
let pattern = r"\b";
assert!(Builder::new().build(pattern).is_err());
}
#[test]
fn roundtrip_never_match() {
let dfa = DFA::never_match().unwrap();
let (buf, _) = dfa.to_bytes_native_endian();
let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0;
assert_eq!(None, dfa.find_leftmost_fwd(b"foo12345").unwrap());
}
#[test]
fn roundtrip_always_match() {
use crate::HalfMatch;
let dfa = DFA::always_match().unwrap();
let (buf, _) = dfa.to_bytes_native_endian();
let dfa: DFA<&[u32]> = DFA::from_bytes(&buf).unwrap().0;
assert_eq!(
Some(HalfMatch::must(0, 0)),
dfa.find_leftmost_fwd(b"foo12345").unwrap()
);
}
}