/src/regex/regex-automata/src/dfa/sparse.rs
Line | Count | Source |
1 | | /*! |
2 | | Types and routines specific to sparse DFAs. |
3 | | |
4 | | This module is the home of [`sparse::DFA`](DFA). |
5 | | |
6 | | Unlike the [`dense`] module, this module does not contain a builder or |
7 | | configuration specific for sparse DFAs. Instead, the intended way to build a |
8 | | sparse DFA is either by using a default configuration with its constructor |
9 | | [`sparse::DFA::new`](DFA::new), or by first configuring the construction of a |
10 | | dense DFA with [`dense::Builder`] and then calling [`dense::DFA::to_sparse`]. |
11 | | For example, this configures a sparse DFA to do an overlapping search: |
12 | | |
13 | | ``` |
14 | | use regex_automata::{ |
15 | | dfa::{Automaton, OverlappingState, dense}, |
16 | | HalfMatch, Input, MatchKind, |
17 | | }; |
18 | | |
19 | | let dense_re = dense::Builder::new() |
20 | | .configure(dense::Config::new().match_kind(MatchKind::All)) |
21 | | .build(r"Samwise|Sam")?; |
22 | | let sparse_re = dense_re.to_sparse()?; |
23 | | |
24 | | // Setup our haystack and initial start state. |
25 | | let input = Input::new("Samwise"); |
26 | | let mut state = OverlappingState::start(); |
27 | | |
28 | | // First, 'Sam' will match. |
29 | | sparse_re.try_search_overlapping_fwd(&input, &mut state)?; |
30 | | assert_eq!(Some(HalfMatch::must(0, 3)), state.get_match()); |
31 | | |
32 | | // And now 'Samwise' will match. |
33 | | sparse_re.try_search_overlapping_fwd(&input, &mut state)?; |
34 | | assert_eq!(Some(HalfMatch::must(0, 7)), state.get_match()); |
35 | | # Ok::<(), Box<dyn std::error::Error>>(()) |
36 | | ``` |
37 | | */ |
38 | | |
39 | | #[cfg(feature = "dfa-build")] |
40 | | use core::iter; |
41 | | use core::{fmt, mem::size_of}; |
42 | | |
43 | | #[cfg(feature = "dfa-build")] |
44 | | use alloc::{vec, vec::Vec}; |
45 | | |
46 | | #[cfg(feature = "dfa-build")] |
47 | | use crate::dfa::dense::{self, BuildError}; |
48 | | use crate::{ |
49 | | dfa::{ |
50 | | automaton::{fmt_state_indicator, Automaton, StartError}, |
51 | | dense::Flags, |
52 | | special::Special, |
53 | | StartKind, DEAD, |
54 | | }, |
55 | | util::{ |
56 | | alphabet::{ByteClasses, ByteSet}, |
57 | | escape::DebugByte, |
58 | | int::{Pointer, Usize, U16, U32}, |
59 | | prefilter::Prefilter, |
60 | | primitives::{PatternID, StateID}, |
61 | | search::Anchored, |
62 | | start::{self, Start, StartByteMap}, |
63 | | wire::{self, DeserializeError, Endian, SerializeError}, |
64 | | }, |
65 | | }; |
66 | | |
67 | | const LABEL: &str = "rust-regex-automata-dfa-sparse"; |
68 | | const VERSION: u32 = 2; |
69 | | |
70 | | /// A sparse deterministic finite automaton (DFA) with variable sized states. |
71 | | /// |
72 | | /// In contrast to a [dense::DFA], a sparse DFA uses a more space efficient |
73 | | /// representation for its transitions. Consequently, sparse DFAs may use much |
74 | | /// less memory than dense DFAs, but this comes at a price. In particular, |
75 | | /// reading the more space efficient transitions takes more work, and |
76 | | /// consequently, searching using a sparse DFA is typically slower than a dense |
77 | | /// DFA. |
78 | | /// |
79 | | /// A sparse DFA can be built using the default configuration via the |
80 | | /// [`DFA::new`] constructor. Otherwise, one can configure various aspects of a |
81 | | /// dense DFA via [`dense::Builder`], and then convert a dense DFA to a sparse |
82 | | /// DFA using [`dense::DFA::to_sparse`]. |
83 | | /// |
84 | | /// In general, a sparse DFA supports all the same search operations as a dense |
85 | | /// DFA. |
86 | | /// |
87 | | /// Making the choice between a dense and sparse DFA depends on your specific |
88 | | /// work load. If you can sacrifice a bit of search time performance, then a |
89 | | /// sparse DFA might be the best choice. In particular, while sparse DFAs are |
90 | | /// probably always slower than dense DFAs, you may find that they are easily |
91 | | /// fast enough for your purposes! |
92 | | /// |
93 | | /// # Type parameters |
94 | | /// |
95 | | /// A `DFA` has one type parameter, `T`, which is used to represent the parts |
96 | | /// of a sparse DFA. `T` is typically a `Vec<u8>` or a `&[u8]`. |
97 | | /// |
98 | | /// # The `Automaton` trait |
99 | | /// |
100 | | /// This type implements the [`Automaton`] trait, which means it can be used |
101 | | /// for searching. For example: |
102 | | /// |
103 | | /// ``` |
104 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
105 | | /// |
106 | | /// let dfa = DFA::new("foo[0-9]+")?; |
107 | | /// let expected = Some(HalfMatch::must(0, 8)); |
108 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
109 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
110 | | /// ``` |
111 | | #[derive(Clone)] |
112 | | pub struct DFA<T> { |
113 | | // When compared to a dense DFA, a sparse DFA *looks* a lot simpler |
114 | | // representation-wise. In reality, it is perhaps more complicated. Namely, |
115 | | // in a dense DFA, all information needs to be very cheaply accessible |
116 | | // using only state IDs. In a sparse DFA however, each state uses a |
117 | | // variable amount of space because each state encodes more information |
118 | | // than just its transitions. Each state also includes an accelerator if |
119 | | // one exists, along with the matching pattern IDs if the state is a match |
120 | | // state. |
121 | | // |
122 | | // That is, a lot of the complexity is pushed down into how each state |
123 | | // itself is represented. |
124 | | tt: Transitions<T>, |
125 | | st: StartTable<T>, |
126 | | special: Special, |
127 | | pre: Option<Prefilter>, |
128 | | quitset: ByteSet, |
129 | | flags: Flags, |
130 | | } |
131 | | |
132 | | #[cfg(feature = "dfa-build")] |
133 | | impl DFA<Vec<u8>> { |
134 | | /// Parse the given regular expression using a default configuration and |
135 | | /// return the corresponding sparse DFA. |
136 | | /// |
137 | | /// If you want a non-default configuration, then use the |
138 | | /// [`dense::Builder`] to set your own configuration, and then call |
139 | | /// [`dense::DFA::to_sparse`] to create a sparse DFA. |
140 | | /// |
141 | | /// # Example |
142 | | /// |
143 | | /// ``` |
144 | | /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; |
145 | | /// |
146 | | /// let dfa = sparse::DFA::new("foo[0-9]+bar")?; |
147 | | /// |
148 | | /// let expected = Some(HalfMatch::must(0, 11)); |
149 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); |
150 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
151 | | /// ``` |
152 | | #[cfg(feature = "syntax")] |
153 | 0 | pub fn new(pattern: &str) -> Result<DFA<Vec<u8>>, BuildError> { |
154 | 0 | dense::Builder::new() |
155 | 0 | .build(pattern) |
156 | 0 | .and_then(|dense| dense.to_sparse()) |
157 | 0 | } |
158 | | |
159 | | /// Parse the given regular expressions using a default configuration and |
160 | | /// return the corresponding multi-DFA. |
161 | | /// |
162 | | /// If you want a non-default configuration, then use the |
163 | | /// [`dense::Builder`] to set your own configuration, and then call |
164 | | /// [`dense::DFA::to_sparse`] to create a sparse DFA. |
165 | | /// |
166 | | /// # Example |
167 | | /// |
168 | | /// ``` |
169 | | /// use regex_automata::{dfa::{Automaton, sparse}, HalfMatch, Input}; |
170 | | /// |
171 | | /// let dfa = sparse::DFA::new_many(&["[0-9]+", "[a-z]+"])?; |
172 | | /// let expected = Some(HalfMatch::must(1, 3)); |
173 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345bar"))?); |
174 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
175 | | /// ``` |
176 | | #[cfg(feature = "syntax")] |
177 | | pub fn new_many<P: AsRef<str>>( |
178 | | patterns: &[P], |
179 | | ) -> Result<DFA<Vec<u8>>, BuildError> { |
180 | | dense::Builder::new() |
181 | | .build_many(patterns) |
182 | | .and_then(|dense| dense.to_sparse()) |
183 | | } |
184 | | } |
185 | | |
186 | | #[cfg(feature = "dfa-build")] |
187 | | impl DFA<Vec<u8>> { |
188 | | /// Create a new DFA that matches every input. |
189 | | /// |
190 | | /// # Example |
191 | | /// |
192 | | /// ``` |
193 | | /// use regex_automata::{ |
194 | | /// dfa::{Automaton, sparse}, |
195 | | /// HalfMatch, Input, |
196 | | /// }; |
197 | | /// |
198 | | /// let dfa = sparse::DFA::always_match()?; |
199 | | /// |
200 | | /// let expected = Some(HalfMatch::must(0, 0)); |
201 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new(""))?); |
202 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo"))?); |
203 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
204 | | /// ``` |
205 | 0 | pub fn always_match() -> Result<DFA<Vec<u8>>, BuildError> { |
206 | 0 | dense::DFA::always_match()?.to_sparse() |
207 | 0 | } |
208 | | |
209 | | /// Create a new sparse DFA that never matches any input. |
210 | | /// |
211 | | /// # Example |
212 | | /// |
213 | | /// ``` |
214 | | /// use regex_automata::{dfa::{Automaton, sparse}, Input}; |
215 | | /// |
216 | | /// let dfa = sparse::DFA::never_match()?; |
217 | | /// assert_eq!(None, dfa.try_search_fwd(&Input::new(""))?); |
218 | | /// assert_eq!(None, dfa.try_search_fwd(&Input::new("foo"))?); |
219 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
220 | | /// ``` |
221 | 0 | pub fn never_match() -> Result<DFA<Vec<u8>>, BuildError> { |
222 | 0 | dense::DFA::never_match()?.to_sparse() |
223 | 0 | } |
224 | | |
225 | | /// The implementation for constructing a sparse DFA from a dense DFA. |
226 | 0 | pub(crate) fn from_dense<T: AsRef<[u32]>>( |
227 | 0 | dfa: &dense::DFA<T>, |
228 | 0 | ) -> Result<DFA<Vec<u8>>, BuildError> { |
229 | | // In order to build the transition table, we need to be able to write |
230 | | // state identifiers for each of the "next" transitions in each state. |
231 | | // Our state identifiers correspond to the byte offset in the |
232 | | // transition table at which the state is encoded. Therefore, we do not |
233 | | // actually know what the state identifiers are until we've allocated |
234 | | // exactly as much space as we need for each state. Thus, construction |
235 | | // of the transition table happens in two passes. |
236 | | // |
237 | | // In the first pass, we fill out the shell of each state, which |
238 | | // includes the transition length, the input byte ranges and |
239 | | // zero-filled space for the transitions and accelerators, if present. |
240 | | // In this first pass, we also build up a map from the state identifier |
241 | | // index of the dense DFA to the state identifier in this sparse DFA. |
242 | | // |
243 | | // In the second pass, we fill in the transitions based on the map |
244 | | // built in the first pass. |
245 | | |
246 | | // The capacity given here reflects a minimum. (Well, the true minimum |
247 | | // is likely even bigger, but hopefully this saves a few reallocs.) |
248 | 0 | let mut sparse = Vec::with_capacity(StateID::SIZE * dfa.state_len()); |
249 | | // This maps state indices from the dense DFA to StateIDs in the sparse |
250 | | // DFA. We build out this map on the first pass, and then use it in the |
251 | | // second pass to back-fill our transitions. |
252 | 0 | let mut remap: Vec<StateID> = vec![DEAD; dfa.state_len()]; |
253 | 0 | for state in dfa.states() { |
254 | 0 | let pos = sparse.len(); |
255 | | |
256 | 0 | remap[dfa.to_index(state.id())] = StateID::new(pos) |
257 | 0 | .map_err(|_| BuildError::too_many_states())?; |
258 | | // zero-filled space for the transition length |
259 | 0 | sparse.push(0); |
260 | 0 | sparse.push(0); |
261 | | |
262 | 0 | let mut transition_len = 0; |
263 | 0 | for (unit1, unit2, _) in state.sparse_transitions() { |
264 | 0 | match (unit1.as_u8(), unit2.as_u8()) { |
265 | 0 | (Some(b1), Some(b2)) => { |
266 | 0 | transition_len += 1; |
267 | 0 | sparse.push(b1); |
268 | 0 | sparse.push(b2); |
269 | 0 | } |
270 | 0 | (None, None) => {} |
271 | | (Some(_), None) | (None, Some(_)) => { |
272 | | // can never occur because sparse_transitions never |
273 | | // groups EOI with any other transition. |
274 | 0 | unreachable!() |
275 | | } |
276 | | } |
277 | | } |
278 | | // Add dummy EOI transition. This is never actually read while |
279 | | // searching, but having space equivalent to the total number |
280 | | // of transitions is convenient. Otherwise, we'd need to track |
281 | | // a different number of transitions for the byte ranges as for |
282 | | // the 'next' states. |
283 | | // |
284 | | // N.B. The loop above is not guaranteed to yield the EOI |
285 | | // transition, since it may point to a DEAD state. By putting |
286 | | // it here, we always write the EOI transition, and thus |
287 | | // guarantee that our transition length is >0. Why do we always |
288 | | // need the EOI transition? Because in order to implement |
289 | | // Automaton::next_eoi_state, this lets us just ask for the last |
290 | | // transition. There are probably other/better ways to do this. |
291 | 0 | transition_len += 1; |
292 | 0 | sparse.push(0); |
293 | 0 | sparse.push(0); |
294 | | |
295 | | // Check some assumptions about transition length. |
296 | 0 | assert_ne!( |
297 | | transition_len, 0, |
298 | 0 | "transition length should be non-zero", |
299 | | ); |
300 | 0 | assert!( |
301 | 0 | transition_len <= 257, |
302 | 0 | "expected transition length {transition_len} to be <= 257", |
303 | | ); |
304 | | |
305 | | // Fill in the transition length. |
306 | | // Since transition length is always <= 257, we use the most |
307 | | // significant bit to indicate whether this is a match state or |
308 | | // not. |
309 | 0 | let ntrans = if dfa.is_match_state(state.id()) { |
310 | 0 | transition_len | (1 << 15) |
311 | | } else { |
312 | 0 | transition_len |
313 | | }; |
314 | 0 | wire::NE::write_u16(ntrans, &mut sparse[pos..]); |
315 | | |
316 | | // zero-fill the actual transitions. |
317 | | // Unwraps are OK since transition_length <= 257 and our minimum |
318 | | // support usize size is 16-bits. |
319 | 0 | let zeros = usize::try_from(transition_len) |
320 | 0 | .unwrap() |
321 | 0 | .checked_mul(StateID::SIZE) |
322 | 0 | .unwrap(); |
323 | 0 | sparse.extend(iter::repeat(0).take(zeros)); |
324 | | |
325 | | // If this is a match state, write the pattern IDs matched by this |
326 | | // state. |
327 | 0 | if dfa.is_match_state(state.id()) { |
328 | 0 | let plen = dfa.match_pattern_len(state.id()); |
329 | | // Write the actual pattern IDs with a u32 length prefix. |
330 | | // First, zero-fill space. |
331 | 0 | let mut pos = sparse.len(); |
332 | | // Unwraps are OK since it's guaranteed that plen <= |
333 | | // PatternID::LIMIT, which is in turn guaranteed to fit into a |
334 | | // u32. |
335 | 0 | let zeros = size_of::<u32>() |
336 | 0 | .checked_mul(plen) |
337 | 0 | .unwrap() |
338 | 0 | .checked_add(size_of::<u32>()) |
339 | 0 | .unwrap(); |
340 | 0 | sparse.extend(iter::repeat(0).take(zeros)); |
341 | | |
342 | | // Now write the length prefix. |
343 | 0 | wire::NE::write_u32( |
344 | | // Will never fail since u32::MAX is invalid pattern ID. |
345 | | // Thus, the number of pattern IDs is representable by a |
346 | | // u32. |
347 | 0 | plen.try_into().expect("pattern ID length fits in u32"), |
348 | 0 | &mut sparse[pos..], |
349 | | ); |
350 | 0 | pos += size_of::<u32>(); |
351 | | |
352 | | // Now write the pattern IDs. |
353 | 0 | for &pid in dfa.pattern_id_slice(state.id()) { |
354 | 0 | pos += wire::write_pattern_id::<wire::NE>( |
355 | 0 | pid, |
356 | 0 | &mut sparse[pos..], |
357 | 0 | ); |
358 | 0 | } |
359 | 0 | } |
360 | | |
361 | | // And now add the accelerator, if one exists. An accelerator is |
362 | | // at most 4 bytes and at least 1 byte. The first byte is the |
363 | | // length, N. N bytes follow the length. The set of bytes that |
364 | | // follow correspond (exhaustively) to the bytes that must be seen |
365 | | // to leave this state. |
366 | 0 | let accel = dfa.accelerator(state.id()); |
367 | 0 | sparse.push(accel.len().try_into().unwrap()); |
368 | 0 | sparse.extend_from_slice(accel); |
369 | | } |
370 | | |
371 | 0 | let mut new = DFA { |
372 | 0 | tt: Transitions { |
373 | 0 | sparse, |
374 | 0 | classes: dfa.byte_classes().clone(), |
375 | 0 | state_len: dfa.state_len(), |
376 | 0 | pattern_len: dfa.pattern_len(), |
377 | 0 | }, |
378 | 0 | st: StartTable::from_dense_dfa(dfa, &remap)?, |
379 | 0 | special: dfa.special().remap(|id| remap[dfa.to_index(id)]), |
380 | 0 | pre: dfa.get_prefilter().map(|p| p.clone()), |
381 | 0 | quitset: dfa.quitset().clone(), |
382 | 0 | flags: dfa.flags().clone(), |
383 | | }; |
384 | | // And here's our second pass. Iterate over all of the dense states |
385 | | // again, and update the transitions in each of the states in the |
386 | | // sparse DFA. |
387 | 0 | for old_state in dfa.states() { |
388 | 0 | let new_id = remap[dfa.to_index(old_state.id())]; |
389 | 0 | let mut new_state = new.tt.state_mut(new_id); |
390 | 0 | let sparse = old_state.sparse_transitions(); |
391 | 0 | for (i, (_, _, next)) in sparse.enumerate() { |
392 | 0 | let next = remap[dfa.to_index(next)]; |
393 | 0 | new_state.set_next_at(i, next); |
394 | 0 | } |
395 | | } |
396 | 0 | new.tt.sparse.shrink_to_fit(); |
397 | 0 | new.st.table.shrink_to_fit(); |
398 | 0 | debug!( |
399 | 0 | "created sparse DFA, memory usage: {} (dense memory usage: {})", |
400 | 0 | new.memory_usage(), |
401 | 0 | dfa.memory_usage(), |
402 | | ); |
403 | 0 | Ok(new) |
404 | 0 | } |
405 | | } |
406 | | |
407 | | impl<T: AsRef<[u8]>> DFA<T> { |
408 | | /// Cheaply return a borrowed version of this sparse DFA. Specifically, the |
409 | | /// DFA returned always uses `&[u8]` for its transitions. |
410 | | pub fn as_ref<'a>(&'a self) -> DFA<&'a [u8]> { |
411 | | DFA { |
412 | | tt: self.tt.as_ref(), |
413 | | st: self.st.as_ref(), |
414 | | special: self.special, |
415 | | pre: self.pre.clone(), |
416 | | quitset: self.quitset, |
417 | | flags: self.flags, |
418 | | } |
419 | | } |
420 | | |
421 | | /// Return an owned version of this sparse DFA. Specifically, the DFA |
422 | | /// returned always uses `Vec<u8>` for its transitions. |
423 | | /// |
424 | | /// Effectively, this returns a sparse DFA whose transitions live on the |
425 | | /// heap. |
426 | | #[cfg(feature = "alloc")] |
427 | | pub fn to_owned(&self) -> DFA<alloc::vec::Vec<u8>> { |
428 | | DFA { |
429 | | tt: self.tt.to_owned(), |
430 | | st: self.st.to_owned(), |
431 | | special: self.special, |
432 | | pre: self.pre.clone(), |
433 | | quitset: self.quitset, |
434 | | flags: self.flags, |
435 | | } |
436 | | } |
437 | | |
438 | | /// Returns the starting state configuration for this DFA. |
439 | | /// |
440 | | /// The default is [`StartKind::Both`], which means the DFA supports both |
441 | | /// unanchored and anchored searches. However, this can generally lead to |
442 | | /// bigger DFAs. Therefore, a DFA might be compiled with support for just |
443 | | /// unanchored or anchored searches. In that case, running a search with |
444 | | /// an unsupported configuration will panic. |
445 | | pub fn start_kind(&self) -> StartKind { |
446 | | self.st.kind |
447 | | } |
448 | | |
449 | | /// Returns true only if this DFA has starting states for each pattern. |
450 | | /// |
451 | | /// When a DFA has starting states for each pattern, then a search with the |
452 | | /// DFA can be configured to only look for anchored matches of a specific |
453 | | /// pattern. Specifically, APIs like [`Automaton::try_search_fwd`] can |
454 | | /// accept a [`Anchored::Pattern`] if and only if this method returns true. |
455 | | /// Otherwise, an error will be returned. |
456 | | /// |
457 | | /// Note that if the DFA is empty, this always returns false. |
458 | | pub fn starts_for_each_pattern(&self) -> bool { |
459 | | self.st.pattern_len.is_some() |
460 | | } |
461 | | |
462 | | /// Returns the equivalence classes that make up the alphabet for this DFA. |
463 | | /// |
464 | | /// Unless [`dense::Config::byte_classes`] was disabled, it is possible |
465 | | /// that multiple distinct bytes are grouped into the same equivalence |
466 | | /// class if it is impossible for them to discriminate between a match and |
467 | | /// a non-match. This has the effect of reducing the overall alphabet size |
468 | | /// and in turn potentially substantially reducing the size of the DFA's |
469 | | /// transition table. |
470 | | /// |
471 | | /// The downside of using equivalence classes like this is that every state |
472 | | /// transition will automatically use this map to convert an arbitrary |
473 | | /// byte to its corresponding equivalence class. In practice this has a |
474 | | /// negligible impact on performance. |
475 | | pub fn byte_classes(&self) -> &ByteClasses { |
476 | | &self.tt.classes |
477 | | } |
478 | | |
479 | | /// Returns the memory usage, in bytes, of this DFA. |
480 | | /// |
481 | | /// The memory usage is computed based on the number of bytes used to |
482 | | /// represent this DFA. |
483 | | /// |
484 | | /// This does **not** include the stack size used up by this DFA. To |
485 | | /// compute that, use `std::mem::size_of::<sparse::DFA>()`. |
486 | 0 | pub fn memory_usage(&self) -> usize { |
487 | 0 | self.tt.memory_usage() + self.st.memory_usage() |
488 | 0 | } |
489 | | } |
490 | | |
491 | | /// Routines for converting a sparse DFA to other representations, such as raw |
492 | | /// bytes suitable for persistent storage. |
493 | | impl<T: AsRef<[u8]>> DFA<T> { |
494 | | /// Serialize this DFA as raw bytes to a `Vec<u8>` in little endian |
495 | | /// format. |
496 | | /// |
497 | | /// The written bytes are guaranteed to be deserialized correctly and |
498 | | /// without errors in a semver compatible release of this crate by a |
499 | | /// `DFA`'s deserialization APIs (assuming all other criteria for the |
500 | | /// deserialization APIs has been satisfied): |
501 | | /// |
502 | | /// * [`DFA::from_bytes`] |
503 | | /// * [`DFA::from_bytes_unchecked`] |
504 | | /// |
505 | | /// Note that unlike a [`dense::DFA`]'s serialization methods, this does |
506 | | /// not add any initial padding to the returned bytes. Padding isn't |
507 | | /// required for sparse DFAs since they have no alignment requirements. |
508 | | /// |
509 | | /// # Example |
510 | | /// |
511 | | /// This example shows how to serialize and deserialize a DFA: |
512 | | /// |
513 | | /// ``` |
514 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
515 | | /// |
516 | | /// // Compile our original DFA. |
517 | | /// let original_dfa = DFA::new("foo[0-9]+")?; |
518 | | /// |
519 | | /// // N.B. We use native endianness here to make the example work, but |
520 | | /// // using to_bytes_little_endian would work on a little endian target. |
521 | | /// let buf = original_dfa.to_bytes_native_endian(); |
522 | | /// // Even if buf has initial padding, DFA::from_bytes will automatically |
523 | | /// // ignore it. |
524 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; |
525 | | /// |
526 | | /// let expected = Some(HalfMatch::must(0, 8)); |
527 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
528 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
529 | | /// ``` |
530 | | #[cfg(feature = "dfa-build")] |
531 | | pub fn to_bytes_little_endian(&self) -> Vec<u8> { |
532 | | self.to_bytes::<wire::LE>() |
533 | | } |
534 | | |
535 | | /// Serialize this DFA as raw bytes to a `Vec<u8>` in big endian |
536 | | /// format. |
537 | | /// |
538 | | /// The written bytes are guaranteed to be deserialized correctly and |
539 | | /// without errors in a semver compatible release of this crate by a |
540 | | /// `DFA`'s deserialization APIs (assuming all other criteria for the |
541 | | /// deserialization APIs has been satisfied): |
542 | | /// |
543 | | /// * [`DFA::from_bytes`] |
544 | | /// * [`DFA::from_bytes_unchecked`] |
545 | | /// |
546 | | /// Note that unlike a [`dense::DFA`]'s serialization methods, this does |
547 | | /// not add any initial padding to the returned bytes. Padding isn't |
548 | | /// required for sparse DFAs since they have no alignment requirements. |
549 | | /// |
550 | | /// # Example |
551 | | /// |
552 | | /// This example shows how to serialize and deserialize a DFA: |
553 | | /// |
554 | | /// ``` |
555 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
556 | | /// |
557 | | /// // Compile our original DFA. |
558 | | /// let original_dfa = DFA::new("foo[0-9]+")?; |
559 | | /// |
560 | | /// // N.B. We use native endianness here to make the example work, but |
561 | | /// // using to_bytes_big_endian would work on a big endian target. |
562 | | /// let buf = original_dfa.to_bytes_native_endian(); |
563 | | /// // Even if buf has initial padding, DFA::from_bytes will automatically |
564 | | /// // ignore it. |
565 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; |
566 | | /// |
567 | | /// let expected = Some(HalfMatch::must(0, 8)); |
568 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
569 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
570 | | /// ``` |
571 | | #[cfg(feature = "dfa-build")] |
572 | | pub fn to_bytes_big_endian(&self) -> Vec<u8> { |
573 | | self.to_bytes::<wire::BE>() |
574 | | } |
575 | | |
576 | | /// Serialize this DFA as raw bytes to a `Vec<u8>` in native endian |
577 | | /// format. |
578 | | /// |
579 | | /// The written bytes are guaranteed to be deserialized correctly and |
580 | | /// without errors in a semver compatible release of this crate by a |
581 | | /// `DFA`'s deserialization APIs (assuming all other criteria for the |
582 | | /// deserialization APIs has been satisfied): |
583 | | /// |
584 | | /// * [`DFA::from_bytes`] |
585 | | /// * [`DFA::from_bytes_unchecked`] |
586 | | /// |
587 | | /// Note that unlike a [`dense::DFA`]'s serialization methods, this does |
588 | | /// not add any initial padding to the returned bytes. Padding isn't |
589 | | /// required for sparse DFAs since they have no alignment requirements. |
590 | | /// |
591 | | /// Generally speaking, native endian format should only be used when |
592 | | /// you know that the target you're compiling the DFA for matches the |
593 | | /// endianness of the target on which you're compiling DFA. For example, |
594 | | /// if serialization and deserialization happen in the same process or on |
595 | | /// the same machine. Otherwise, when serializing a DFA for use in a |
596 | | /// portable environment, you'll almost certainly want to serialize _both_ |
597 | | /// a little endian and a big endian version and then load the correct one |
598 | | /// based on the target's configuration. |
599 | | /// |
600 | | /// # Example |
601 | | /// |
602 | | /// This example shows how to serialize and deserialize a DFA: |
603 | | /// |
604 | | /// ``` |
605 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
606 | | /// |
607 | | /// // Compile our original DFA. |
608 | | /// let original_dfa = DFA::new("foo[0-9]+")?; |
609 | | /// |
610 | | /// let buf = original_dfa.to_bytes_native_endian(); |
611 | | /// // Even if buf has initial padding, DFA::from_bytes will automatically |
612 | | /// // ignore it. |
613 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf)?.0; |
614 | | /// |
615 | | /// let expected = Some(HalfMatch::must(0, 8)); |
616 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
617 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
618 | | /// ``` |
619 | | #[cfg(feature = "dfa-build")] |
620 | | pub fn to_bytes_native_endian(&self) -> Vec<u8> { |
621 | | self.to_bytes::<wire::NE>() |
622 | | } |
623 | | |
624 | | /// The implementation of the public `to_bytes` serialization methods, |
625 | | /// which is generic over endianness. |
626 | | #[cfg(feature = "dfa-build")] |
627 | | fn to_bytes<E: Endian>(&self) -> Vec<u8> { |
628 | | let mut buf = vec![0; self.write_to_len()]; |
629 | | // This should always succeed since the only possible serialization |
630 | | // error is providing a buffer that's too small, but we've ensured that |
631 | | // `buf` is big enough here. |
632 | | self.write_to::<E>(&mut buf).unwrap(); |
633 | | buf |
634 | | } |
635 | | |
636 | | /// Serialize this DFA as raw bytes to the given slice, in little endian |
637 | | /// format. Upon success, the total number of bytes written to `dst` is |
638 | | /// returned. |
639 | | /// |
640 | | /// The written bytes are guaranteed to be deserialized correctly and |
641 | | /// without errors in a semver compatible release of this crate by a |
642 | | /// `DFA`'s deserialization APIs (assuming all other criteria for the |
643 | | /// deserialization APIs has been satisfied): |
644 | | /// |
645 | | /// * [`DFA::from_bytes`] |
646 | | /// * [`DFA::from_bytes_unchecked`] |
647 | | /// |
648 | | /// # Errors |
649 | | /// |
650 | | /// This returns an error if the given destination slice is not big enough |
651 | | /// to contain the full serialized DFA. If an error occurs, then nothing |
652 | | /// is written to `dst`. |
653 | | /// |
654 | | /// # Example |
655 | | /// |
656 | | /// This example shows how to serialize and deserialize a DFA without |
657 | | /// dynamic memory allocation. |
658 | | /// |
659 | | /// ``` |
660 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
661 | | /// |
662 | | /// // Compile our original DFA. |
663 | | /// let original_dfa = DFA::new("foo[0-9]+")?; |
664 | | /// |
665 | | /// // Create a 4KB buffer on the stack to store our serialized DFA. |
666 | | /// let mut buf = [0u8; 4 * (1<<10)]; |
667 | | /// // N.B. We use native endianness here to make the example work, but |
668 | | /// // using write_to_little_endian would work on a little endian target. |
669 | | /// let written = original_dfa.write_to_native_endian(&mut buf)?; |
670 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; |
671 | | /// |
672 | | /// let expected = Some(HalfMatch::must(0, 8)); |
673 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
674 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
675 | | /// ``` |
676 | | pub fn write_to_little_endian( |
677 | | &self, |
678 | | dst: &mut [u8], |
679 | | ) -> Result<usize, SerializeError> { |
680 | | self.write_to::<wire::LE>(dst) |
681 | | } |
682 | | |
683 | | /// Serialize this DFA as raw bytes to the given slice, in big endian |
684 | | /// format. Upon success, the total number of bytes written to `dst` is |
685 | | /// returned. |
686 | | /// |
687 | | /// The written bytes are guaranteed to be deserialized correctly and |
688 | | /// without errors in a semver compatible release of this crate by a |
689 | | /// `DFA`'s deserialization APIs (assuming all other criteria for the |
690 | | /// deserialization APIs has been satisfied): |
691 | | /// |
692 | | /// * [`DFA::from_bytes`] |
693 | | /// * [`DFA::from_bytes_unchecked`] |
694 | | /// |
695 | | /// # Errors |
696 | | /// |
697 | | /// This returns an error if the given destination slice is not big enough |
698 | | /// to contain the full serialized DFA. If an error occurs, then nothing |
699 | | /// is written to `dst`. |
700 | | /// |
701 | | /// # Example |
702 | | /// |
703 | | /// This example shows how to serialize and deserialize a DFA without |
704 | | /// dynamic memory allocation. |
705 | | /// |
706 | | /// ``` |
707 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
708 | | /// |
709 | | /// // Compile our original DFA. |
710 | | /// let original_dfa = DFA::new("foo[0-9]+")?; |
711 | | /// |
712 | | /// // Create a 4KB buffer on the stack to store our serialized DFA. |
713 | | /// let mut buf = [0u8; 4 * (1<<10)]; |
714 | | /// // N.B. We use native endianness here to make the example work, but |
715 | | /// // using write_to_big_endian would work on a big endian target. |
716 | | /// let written = original_dfa.write_to_native_endian(&mut buf)?; |
717 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; |
718 | | /// |
719 | | /// let expected = Some(HalfMatch::must(0, 8)); |
720 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
721 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
722 | | /// ``` |
723 | | pub fn write_to_big_endian( |
724 | | &self, |
725 | | dst: &mut [u8], |
726 | | ) -> Result<usize, SerializeError> { |
727 | | self.write_to::<wire::BE>(dst) |
728 | | } |
729 | | |
730 | | /// Serialize this DFA as raw bytes to the given slice, in native endian |
731 | | /// format. Upon success, the total number of bytes written to `dst` is |
732 | | /// returned. |
733 | | /// |
734 | | /// The written bytes are guaranteed to be deserialized correctly and |
735 | | /// without errors in a semver compatible release of this crate by a |
736 | | /// `DFA`'s deserialization APIs (assuming all other criteria for the |
737 | | /// deserialization APIs has been satisfied): |
738 | | /// |
739 | | /// * [`DFA::from_bytes`] |
740 | | /// * [`DFA::from_bytes_unchecked`] |
741 | | /// |
742 | | /// Generally speaking, native endian format should only be used when |
743 | | /// you know that the target you're compiling the DFA for matches the |
744 | | /// endianness of the target on which you're compiling DFA. For example, |
745 | | /// if serialization and deserialization happen in the same process or on |
746 | | /// the same machine. Otherwise, when serializing a DFA for use in a |
747 | | /// portable environment, you'll almost certainly want to serialize _both_ |
748 | | /// a little endian and a big endian version and then load the correct one |
749 | | /// based on the target's configuration. |
750 | | /// |
751 | | /// # Errors |
752 | | /// |
753 | | /// This returns an error if the given destination slice is not big enough |
754 | | /// to contain the full serialized DFA. If an error occurs, then nothing |
755 | | /// is written to `dst`. |
756 | | /// |
757 | | /// # Example |
758 | | /// |
759 | | /// This example shows how to serialize and deserialize a DFA without |
760 | | /// dynamic memory allocation. |
761 | | /// |
762 | | /// ``` |
763 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
764 | | /// |
765 | | /// // Compile our original DFA. |
766 | | /// let original_dfa = DFA::new("foo[0-9]+")?; |
767 | | /// |
768 | | /// // Create a 4KB buffer on the stack to store our serialized DFA. |
769 | | /// let mut buf = [0u8; 4 * (1<<10)]; |
770 | | /// let written = original_dfa.write_to_native_endian(&mut buf)?; |
771 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; |
772 | | /// |
773 | | /// let expected = Some(HalfMatch::must(0, 8)); |
774 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
775 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
776 | | /// ``` |
777 | | pub fn write_to_native_endian( |
778 | | &self, |
779 | | dst: &mut [u8], |
780 | | ) -> Result<usize, SerializeError> { |
781 | | self.write_to::<wire::NE>(dst) |
782 | | } |
783 | | |
784 | | /// The implementation of the public `write_to` serialization methods, |
785 | | /// which is generic over endianness. |
786 | | fn write_to<E: Endian>( |
787 | | &self, |
788 | | dst: &mut [u8], |
789 | | ) -> Result<usize, SerializeError> { |
790 | | let mut nw = 0; |
791 | | nw += wire::write_label(LABEL, &mut dst[nw..])?; |
792 | | nw += wire::write_endianness_check::<E>(&mut dst[nw..])?; |
793 | | nw += wire::write_version::<E>(VERSION, &mut dst[nw..])?; |
794 | | nw += { |
795 | | // Currently unused, intended for future flexibility |
796 | | E::write_u32(0, &mut dst[nw..]); |
797 | | size_of::<u32>() |
798 | | }; |
799 | | nw += self.flags.write_to::<E>(&mut dst[nw..])?; |
800 | | nw += self.tt.write_to::<E>(&mut dst[nw..])?; |
801 | | nw += self.st.write_to::<E>(&mut dst[nw..])?; |
802 | | nw += self.special.write_to::<E>(&mut dst[nw..])?; |
803 | | nw += self.quitset.write_to::<E>(&mut dst[nw..])?; |
804 | | Ok(nw) |
805 | | } |
806 | | |
807 | | /// Return the total number of bytes required to serialize this DFA. |
808 | | /// |
809 | | /// This is useful for determining the size of the buffer required to pass |
810 | | /// to one of the serialization routines: |
811 | | /// |
812 | | /// * [`DFA::write_to_little_endian`] |
813 | | /// * [`DFA::write_to_big_endian`] |
814 | | /// * [`DFA::write_to_native_endian`] |
815 | | /// |
816 | | /// Passing a buffer smaller than the size returned by this method will |
817 | | /// result in a serialization error. |
818 | | /// |
819 | | /// # Example |
820 | | /// |
821 | | /// This example shows how to dynamically allocate enough room to serialize |
822 | | /// a sparse DFA. |
823 | | /// |
824 | | /// ``` |
825 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
826 | | /// |
827 | | /// // Compile our original DFA. |
828 | | /// let original_dfa = DFA::new("foo[0-9]+")?; |
829 | | /// |
830 | | /// let mut buf = vec![0; original_dfa.write_to_len()]; |
831 | | /// let written = original_dfa.write_to_native_endian(&mut buf)?; |
832 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&buf[..written])?.0; |
833 | | /// |
834 | | /// let expected = Some(HalfMatch::must(0, 8)); |
835 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
836 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
837 | | /// ``` |
838 | | pub fn write_to_len(&self) -> usize { |
839 | | wire::write_label_len(LABEL) |
840 | | + wire::write_endianness_check_len() |
841 | | + wire::write_version_len() |
842 | | + size_of::<u32>() // unused, intended for future flexibility |
843 | | + self.flags.write_to_len() |
844 | | + self.tt.write_to_len() |
845 | | + self.st.write_to_len() |
846 | | + self.special.write_to_len() |
847 | | + self.quitset.write_to_len() |
848 | | } |
849 | | } |
850 | | |
851 | | impl<'a> DFA<&'a [u8]> { |
852 | | /// Safely deserialize a sparse DFA with a specific state identifier |
853 | | /// representation. Upon success, this returns both the deserialized DFA |
854 | | /// and the number of bytes read from the given slice. Namely, the contents |
855 | | /// of the slice beyond the DFA are not read. |
856 | | /// |
857 | | /// Deserializing a DFA using this routine will never allocate heap memory. |
858 | | /// For safety purposes, the DFA's transitions will be verified such that |
859 | | /// every transition points to a valid state. If this verification is too |
860 | | /// costly, then a [`DFA::from_bytes_unchecked`] API is provided, which |
861 | | /// will always execute in constant time. |
862 | | /// |
863 | | /// The bytes given must be generated by one of the serialization APIs |
864 | | /// of a `DFA` using a semver compatible release of this crate. Those |
865 | | /// include: |
866 | | /// |
867 | | /// * [`DFA::to_bytes_little_endian`] |
868 | | /// * [`DFA::to_bytes_big_endian`] |
869 | | /// * [`DFA::to_bytes_native_endian`] |
870 | | /// * [`DFA::write_to_little_endian`] |
871 | | /// * [`DFA::write_to_big_endian`] |
872 | | /// * [`DFA::write_to_native_endian`] |
873 | | /// |
874 | | /// The `to_bytes` methods allocate and return a `Vec<u8>` for you. The |
875 | | /// `write_to` methods do not allocate and write to an existing slice |
876 | | /// (which may be on the stack). Since deserialization always uses the |
877 | | /// native endianness of the target platform, the serialization API you use |
878 | | /// should match the endianness of the target platform. (It's often a good |
879 | | /// idea to generate serialized DFAs for both forms of endianness and then |
880 | | /// load the correct one based on endianness.) |
881 | | /// |
882 | | /// # Errors |
883 | | /// |
884 | | /// Generally speaking, it's easier to state the conditions in which an |
885 | | /// error is _not_ returned. All of the following must be true: |
886 | | /// |
887 | | /// * The bytes given must be produced by one of the serialization APIs |
888 | | /// on this DFA, as mentioned above. |
889 | | /// * The endianness of the target platform matches the endianness used to |
890 | | /// serialized the provided DFA. |
891 | | /// |
892 | | /// If any of the above are not true, then an error will be returned. |
893 | | /// |
894 | | /// Note that unlike deserializing a [`dense::DFA`], deserializing a sparse |
895 | | /// DFA has no alignment requirements. That is, an alignment of `1` is |
896 | | /// valid. |
897 | | /// |
898 | | /// # Panics |
899 | | /// |
900 | | /// This routine will never panic for any input. |
901 | | /// |
902 | | /// # Example |
903 | | /// |
904 | | /// This example shows how to serialize a DFA to raw bytes, deserialize it |
905 | | /// and then use it for searching. |
906 | | /// |
907 | | /// ``` |
908 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
909 | | /// |
910 | | /// let initial = DFA::new("foo[0-9]+")?; |
911 | | /// let bytes = initial.to_bytes_native_endian(); |
912 | | /// let dfa: DFA<&[u8]> = DFA::from_bytes(&bytes)?.0; |
913 | | /// |
914 | | /// let expected = Some(HalfMatch::must(0, 8)); |
915 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
916 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
917 | | /// ``` |
918 | | /// |
919 | | /// # Example: loading a DFA from static memory |
920 | | /// |
921 | | /// One use case this library supports is the ability to serialize a |
922 | | /// DFA to disk and then use `include_bytes!` to store it in a compiled |
923 | | /// Rust program. Those bytes can then be cheaply deserialized into a |
924 | | /// `DFA` structure at runtime and used for searching without having to |
925 | | /// re-compile the DFA (which can be quite costly). |
926 | | /// |
927 | | /// We can show this in two parts. The first part is serializing the DFA to |
928 | | /// a file: |
929 | | /// |
930 | | /// ```no_run |
931 | | /// use regex_automata::dfa::sparse::DFA; |
932 | | /// |
933 | | /// let dfa = DFA::new("foo[0-9]+")?; |
934 | | /// |
935 | | /// // Write a big endian serialized version of this DFA to a file. |
936 | | /// let bytes = dfa.to_bytes_big_endian(); |
937 | | /// std::fs::write("foo.bigendian.dfa", &bytes)?; |
938 | | /// |
939 | | /// // Do it again, but this time for little endian. |
940 | | /// let bytes = dfa.to_bytes_little_endian(); |
941 | | /// std::fs::write("foo.littleendian.dfa", &bytes)?; |
942 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
943 | | /// ``` |
944 | | /// |
945 | | /// And now the second part is embedding the DFA into the compiled program |
946 | | /// and deserializing it at runtime on first use. We use conditional |
947 | | /// compilation to choose the correct endianness. We do not need to employ |
948 | | /// any special tricks to ensure a proper alignment, since a sparse DFA has |
949 | | /// no alignment requirements. |
950 | | /// |
951 | | /// ```no_run |
952 | | /// use regex_automata::{ |
953 | | /// dfa::{Automaton, sparse::DFA}, |
954 | | /// util::lazy::Lazy, |
955 | | /// HalfMatch, Input, |
956 | | /// }; |
957 | | /// |
958 | | /// // This crate provides its own "lazy" type, kind of like |
959 | | /// // lazy_static! or once_cell::sync::Lazy. But it works in no-alloc |
960 | | /// // no-std environments and let's us write this using completely |
961 | | /// // safe code. |
962 | | /// static RE: Lazy<DFA<&'static [u8]>> = Lazy::new(|| { |
963 | | /// # const _: &str = stringify! { |
964 | | /// #[cfg(target_endian = "big")] |
965 | | /// static BYTES: &[u8] = include_bytes!("foo.bigendian.dfa"); |
966 | | /// #[cfg(target_endian = "little")] |
967 | | /// static BYTES: &[u8] = include_bytes!("foo.littleendian.dfa"); |
968 | | /// # }; |
969 | | /// # static BYTES: &[u8] = b""; |
970 | | /// |
971 | | /// let (dfa, _) = DFA::from_bytes(BYTES) |
972 | | /// .expect("serialized DFA should be valid"); |
973 | | /// dfa |
974 | | /// }); |
975 | | /// |
976 | | /// let expected = Ok(Some(HalfMatch::must(0, 8))); |
977 | | /// assert_eq!(expected, RE.try_search_fwd(&Input::new("foo12345"))); |
978 | | /// ``` |
979 | | /// |
980 | | /// Alternatively, consider using |
981 | | /// [`lazy_static`](https://crates.io/crates/lazy_static) |
982 | | /// or |
983 | | /// [`once_cell`](https://crates.io/crates/once_cell), |
984 | | /// which will guarantee safety for you. |
985 | 3.08k | pub fn from_bytes( |
986 | 3.08k | slice: &'a [u8], |
987 | 3.08k | ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { |
988 | | // SAFETY: This is safe because we validate both the sparse transitions |
989 | | // (by trying to decode every state) and start state ID list below. If |
990 | | // either validation fails, then we return an error. |
991 | 3.08k | let (dfa, nread) = unsafe { DFA::from_bytes_unchecked(slice)? }; |
992 | 1.99k | let seen = dfa.tt.validate(&dfa.special)?; |
993 | 1.67k | dfa.st.validate(&dfa.special, &seen)?; |
994 | | // N.B. dfa.special doesn't have a way to do unchecked deserialization, |
995 | | // so it has already been validated. |
996 | 1.63k | Ok((dfa, nread)) |
997 | 3.08k | } |
998 | | |
999 | | /// Deserialize a DFA with a specific state identifier representation in |
1000 | | /// constant time by omitting the verification of the validity of the |
1001 | | /// sparse transitions. |
1002 | | /// |
1003 | | /// This is just like [`DFA::from_bytes`], except it can potentially return |
1004 | | /// a DFA that exhibits undefined behavior if its transitions contains |
1005 | | /// invalid state identifiers. |
1006 | | /// |
1007 | | /// This routine is useful if you need to deserialize a DFA cheaply and |
1008 | | /// cannot afford the transition validation performed by `from_bytes`. |
1009 | | /// |
1010 | | /// # Safety |
1011 | | /// |
1012 | | /// This routine is not safe because it permits callers to provide |
1013 | | /// arbitrary transitions with possibly incorrect state identifiers. While |
1014 | | /// the various serialization routines will never return an incorrect |
1015 | | /// DFA, there is no guarantee that the bytes provided here are correct. |
1016 | | /// While `from_bytes_unchecked` will still do several forms of basic |
1017 | | /// validation, this routine does not check that the transitions themselves |
1018 | | /// are correct. Given an incorrect transition table, it is possible for |
1019 | | /// the search routines to access out-of-bounds memory because of explicit |
1020 | | /// bounds check elision. |
1021 | | /// |
1022 | | /// # Example |
1023 | | /// |
1024 | | /// ``` |
1025 | | /// use regex_automata::{dfa::{Automaton, sparse::DFA}, HalfMatch, Input}; |
1026 | | /// |
1027 | | /// let initial = DFA::new("foo[0-9]+")?; |
1028 | | /// let bytes = initial.to_bytes_native_endian(); |
1029 | | /// // SAFETY: This is guaranteed to be safe since the bytes given come |
1030 | | /// // directly from a compatible serialization routine. |
1031 | | /// let dfa: DFA<&[u8]> = unsafe { DFA::from_bytes_unchecked(&bytes)?.0 }; |
1032 | | /// |
1033 | | /// let expected = Some(HalfMatch::must(0, 8)); |
1034 | | /// assert_eq!(expected, dfa.try_search_fwd(&Input::new("foo12345"))?); |
1035 | | /// # Ok::<(), Box<dyn std::error::Error>>(()) |
1036 | | /// ``` |
1037 | 3.08k | pub unsafe fn from_bytes_unchecked( |
1038 | 3.08k | slice: &'a [u8], |
1039 | 3.08k | ) -> Result<(DFA<&'a [u8]>, usize), DeserializeError> { |
1040 | 3.08k | let mut nr = 0; |
1041 | | |
1042 | 3.08k | nr += wire::read_label(&slice[nr..], LABEL)?; |
1043 | 3.08k | nr += wire::read_endianness_check(&slice[nr..])?; |
1044 | 3.08k | nr += wire::read_version(&slice[nr..], VERSION)?; |
1045 | | |
1046 | 3.08k | let _unused = wire::try_read_u32(&slice[nr..], "unused space")?; |
1047 | 3.05k | nr += size_of::<u32>(); |
1048 | | |
1049 | 3.05k | let (flags, nread) = Flags::from_bytes(&slice[nr..])?; |
1050 | 3.05k | nr += nread; |
1051 | | |
1052 | 3.05k | let (tt, nread) = Transitions::from_bytes_unchecked(&slice[nr..])?; |
1053 | 2.87k | nr += nread; |
1054 | | |
1055 | 2.87k | let (st, nread) = StartTable::from_bytes_unchecked(&slice[nr..])?; |
1056 | 2.49k | nr += nread; |
1057 | | |
1058 | 2.49k | let (special, nread) = Special::from_bytes(&slice[nr..])?; |
1059 | 2.11k | nr += nread; |
1060 | 2.11k | if special.max.as_usize() >= tt.sparse().len() { |
1061 | 80 | return Err(DeserializeError::generic( |
1062 | 80 | "max should not be greater than or equal to sparse bytes", |
1063 | 80 | )); |
1064 | 2.03k | } |
1065 | | |
1066 | 2.03k | let (quitset, nread) = ByteSet::from_bytes(&slice[nr..])?; |
1067 | 1.99k | nr += nread; |
1068 | | |
1069 | | // Prefilters don't support serialization, so they're always absent. |
1070 | 1.99k | let pre = None; |
1071 | 1.99k | Ok((DFA { tt, st, special, pre, quitset, flags }, nr)) |
1072 | 3.08k | } |
1073 | | } |
1074 | | |
1075 | | /// Other routines that work for all `T`. |
1076 | | impl<T> DFA<T> { |
1077 | | /// Set or unset the prefilter attached to this DFA. |
1078 | | /// |
1079 | | /// This is useful when one has deserialized a DFA from `&[u8]`. |
1080 | | /// Deserialization does not currently include prefilters, so if you |
1081 | | /// want prefilter acceleration, you'll need to rebuild it and attach |
1082 | | /// it here. |
1083 | | pub fn set_prefilter(&mut self, prefilter: Option<Prefilter>) { |
1084 | | self.pre = prefilter |
1085 | | } |
1086 | | } |
1087 | | |
1088 | | impl<T: AsRef<[u8]>> fmt::Debug for DFA<T> { |
1089 | | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
1090 | | writeln!(f, "sparse::DFA(")?; |
1091 | | for state in self.tt.states() { |
1092 | | fmt_state_indicator(f, self, state.id())?; |
1093 | | writeln!(f, "{:06?}: {:?}", state.id().as_usize(), state)?; |
1094 | | } |
1095 | | writeln!(f, "")?; |
1096 | | for (i, (start_id, anchored, sty)) in self.st.iter().enumerate() { |
1097 | | if i % self.st.stride == 0 { |
1098 | | match anchored { |
1099 | | Anchored::No => writeln!(f, "START-GROUP(unanchored)")?, |
1100 | | Anchored::Yes => writeln!(f, "START-GROUP(anchored)")?, |
1101 | | Anchored::Pattern(pid) => writeln!( |
1102 | | f, |
1103 | | "START_GROUP(pattern: {:?})", |
1104 | | pid.as_usize() |
1105 | | )?, |
1106 | | } |
1107 | | } |
1108 | | writeln!(f, " {:?} => {:06?}", sty, start_id.as_usize())?; |
1109 | | } |
1110 | | writeln!(f, "state length: {:?}", self.tt.state_len)?; |
1111 | | writeln!(f, "pattern length: {:?}", self.pattern_len())?; |
1112 | | writeln!(f, "flags: {:?}", self.flags)?; |
1113 | | writeln!(f, ")")?; |
1114 | | Ok(()) |
1115 | | } |
1116 | | } |
1117 | | |
1118 | | // SAFETY: We assert that our implementation of each method is correct. |
1119 | | unsafe impl<T: AsRef<[u8]>> Automaton for DFA<T> { |
1120 | | #[inline] |
1121 | 165k | fn is_special_state(&self, id: StateID) -> bool { |
1122 | 165k | self.special.is_special_state(id) |
1123 | 165k | } |
1124 | | |
1125 | | #[inline] |
1126 | 14.1k | fn is_dead_state(&self, id: StateID) -> bool { |
1127 | 14.1k | self.special.is_dead_state(id) |
1128 | 14.1k | } |
1129 | | |
1130 | | #[inline] |
1131 | 0 | fn is_quit_state(&self, id: StateID) -> bool { |
1132 | 0 | self.special.is_quit_state(id) |
1133 | 0 | } |
1134 | | |
1135 | | #[inline] |
1136 | 88.5k | fn is_match_state(&self, id: StateID) -> bool { |
1137 | 88.5k | self.special.is_match_state(id) |
1138 | 88.5k | } |
1139 | | |
1140 | | #[inline] |
1141 | 82.1k | fn is_start_state(&self, id: StateID) -> bool { |
1142 | 82.1k | self.special.is_start_state(id) |
1143 | 82.1k | } |
1144 | | |
1145 | | #[inline] |
1146 | 82.1k | fn is_accel_state(&self, id: StateID) -> bool { |
1147 | 82.1k | self.special.is_accel_state(id) |
1148 | 82.1k | } |
1149 | | |
1150 | | // This is marked as inline to help dramatically boost sparse searching, |
1151 | | // which decodes each state it enters to follow the next transition. |
1152 | | #[cfg_attr(feature = "perf-inline", inline(always))] |
1153 | 83.5k | fn next_state(&self, current: StateID, input: u8) -> StateID { |
1154 | 83.5k | let input = self.tt.classes.get(input); |
1155 | 83.5k | self.tt.state(current).next(input) |
1156 | 83.5k | } |
1157 | | |
1158 | | #[inline] |
1159 | 83.5k | unsafe fn next_state_unchecked( |
1160 | 83.5k | &self, |
1161 | 83.5k | current: StateID, |
1162 | 83.5k | input: u8, |
1163 | 83.5k | ) -> StateID { |
1164 | 83.5k | self.next_state(current, input) |
1165 | 83.5k | } |
1166 | | |
1167 | | #[inline] |
1168 | 12.4k | fn next_eoi_state(&self, current: StateID) -> StateID { |
1169 | 12.4k | self.tt.state(current).next_eoi() |
1170 | 12.4k | } |
1171 | | |
1172 | | #[inline] |
1173 | | fn pattern_len(&self) -> usize { |
1174 | | self.tt.pattern_len |
1175 | | } |
1176 | | |
1177 | | #[inline] |
1178 | | fn match_len(&self, id: StateID) -> usize { |
1179 | | self.tt.state(id).pattern_len() |
1180 | | } |
1181 | | |
1182 | | #[inline] |
1183 | 25.3k | fn match_pattern(&self, id: StateID, match_index: usize) -> PatternID { |
1184 | | // This is an optimization for the very common case of a DFA with a |
1185 | | // single pattern. This conditional avoids a somewhat more costly path |
1186 | | // that finds the pattern ID from the state machine, which requires |
1187 | | // a bit of slicing/pointer-chasing. This optimization tends to only |
1188 | | // matter when matches are frequent. |
1189 | 25.3k | if self.tt.pattern_len == 1 { |
1190 | 2.07k | return PatternID::ZERO; |
1191 | 23.2k | } |
1192 | 23.2k | self.tt.state(id).pattern_id(match_index) |
1193 | 25.3k | } |
1194 | | |
1195 | | #[inline] |
1196 | 1.63k | fn has_empty(&self) -> bool { |
1197 | 1.63k | self.flags.has_empty |
1198 | 1.63k | } |
1199 | | |
1200 | | #[inline] |
1201 | 1.19k | fn is_utf8(&self) -> bool { |
1202 | 1.19k | self.flags.is_utf8 |
1203 | 1.19k | } |
1204 | | |
1205 | | #[inline] |
1206 | | fn is_always_start_anchored(&self) -> bool { |
1207 | | self.flags.is_always_start_anchored |
1208 | | } |
1209 | | |
1210 | | #[inline] |
1211 | 26.8k | fn start_state( |
1212 | 26.8k | &self, |
1213 | 26.8k | config: &start::Config, |
1214 | 26.8k | ) -> Result<StateID, StartError> { |
1215 | 26.8k | let anchored = config.get_anchored(); |
1216 | 26.8k | let start = match config.get_look_behind() { |
1217 | 1.63k | None => Start::Text, |
1218 | 25.2k | Some(byte) => { |
1219 | 25.2k | if !self.quitset.is_empty() && self.quitset.contains(byte) { |
1220 | 165 | return Err(StartError::quit(byte)); |
1221 | 25.0k | } |
1222 | 25.0k | self.st.start_map.get(byte) |
1223 | | } |
1224 | | }; |
1225 | 26.6k | self.st.start(anchored, start) |
1226 | 26.8k | } |
1227 | | |
1228 | | #[inline] |
1229 | 26.8k | fn universal_start_state(&self, mode: Anchored) -> Option<StateID> { |
1230 | 26.8k | match mode { |
1231 | 26.8k | Anchored::No => self.st.universal_start_unanchored, |
1232 | 0 | Anchored::Yes => self.st.universal_start_anchored, |
1233 | 0 | Anchored::Pattern(_) => None, |
1234 | | } |
1235 | 26.8k | } |
1236 | | |
1237 | | #[inline] |
1238 | 42.2k | fn accelerator(&self, id: StateID) -> &[u8] { |
1239 | 42.2k | self.tt.state(id).accelerator() |
1240 | 42.2k | } |
1241 | | |
1242 | | #[inline] |
1243 | 26.8k | fn get_prefilter(&self) -> Option<&Prefilter> { |
1244 | 26.8k | self.pre.as_ref() |
1245 | 26.8k | } |
1246 | | } |
1247 | | |
1248 | | /// The transition table portion of a sparse DFA. |
1249 | | /// |
1250 | | /// The transition table is the core part of the DFA in that it describes how |
1251 | | /// to move from one state to another based on the input sequence observed. |
1252 | | /// |
1253 | | /// Unlike a typical dense table based DFA, states in a sparse transition |
1254 | | /// table have variable size. That is, states with more transitions use more |
1255 | | /// space than states with fewer transitions. This means that finding the next |
1256 | | /// transition takes more work than with a dense DFA, but also typically uses |
1257 | | /// much less space. |
1258 | | #[derive(Clone)] |
1259 | | struct Transitions<T> { |
1260 | | /// The raw encoding of each state in this DFA. |
1261 | | /// |
1262 | | /// Each state has the following information: |
1263 | | /// |
1264 | | /// * A set of transitions to subsequent states. Transitions to the dead |
1265 | | /// state are omitted. |
1266 | | /// * If the state can be accelerated, then any additional accelerator |
1267 | | /// information. |
1268 | | /// * If the state is a match state, then the state contains all pattern |
1269 | | /// IDs that match when in that state. |
1270 | | /// |
1271 | | /// To decode a state, use Transitions::state. |
1272 | | /// |
1273 | | /// In practice, T is either Vec<u8> or &[u8]. |
1274 | | sparse: T, |
1275 | | /// A set of equivalence classes, where a single equivalence class |
1276 | | /// represents a set of bytes that never discriminate between a match |
1277 | | /// and a non-match in the DFA. Each equivalence class corresponds to a |
1278 | | /// single character in this DFA's alphabet, where the maximum number of |
1279 | | /// characters is 257 (each possible value of a byte plus the special |
1280 | | /// EOI transition). Consequently, the number of equivalence classes |
1281 | | /// corresponds to the number of transitions for each DFA state. Note |
1282 | | /// though that the *space* used by each DFA state in the transition table |
1283 | | /// may be larger. The total space used by each DFA state is known as the |
1284 | | /// stride and is documented above. |
1285 | | /// |
1286 | | /// The only time the number of equivalence classes is fewer than 257 is |
1287 | | /// if the DFA's kind uses byte classes which is the default. Equivalence |
1288 | | /// classes should generally only be disabled when debugging, so that |
1289 | | /// the transitions themselves aren't obscured. Disabling them has no |
1290 | | /// other benefit, since the equivalence class map is always used while |
1291 | | /// searching. In the vast majority of cases, the number of equivalence |
1292 | | /// classes is substantially smaller than 257, particularly when large |
1293 | | /// Unicode classes aren't used. |
1294 | | /// |
1295 | | /// N.B. Equivalence classes aren't particularly useful in a sparse DFA |
1296 | | /// in the current implementation, since equivalence classes generally tend |
1297 | | /// to correspond to continuous ranges of bytes that map to the same |
1298 | | /// transition. So in a sparse DFA, equivalence classes don't really lead |
1299 | | /// to a space savings. In the future, it would be good to try and remove |
1300 | | /// them from sparse DFAs entirely, but requires a bit of work since sparse |
1301 | | /// DFAs are built from dense DFAs, which are in turn built on top of |
1302 | | /// equivalence classes. |
1303 | | classes: ByteClasses, |
1304 | | /// The total number of states in this DFA. Note that a DFA always has at |
1305 | | /// least one state---the dead state---even the empty DFA. In particular, |
1306 | | /// the dead state always has ID 0 and is correspondingly always the first |
1307 | | /// state. The dead state is never a match state. |
1308 | | state_len: usize, |
1309 | | /// The total number of unique patterns represented by these match states. |
1310 | | pattern_len: usize, |
1311 | | } |
1312 | | |
1313 | | impl<'a> Transitions<&'a [u8]> { |
1314 | 3.05k | unsafe fn from_bytes_unchecked( |
1315 | 3.05k | mut slice: &'a [u8], |
1316 | 3.05k | ) -> Result<(Transitions<&'a [u8]>, usize), DeserializeError> { |
1317 | 3.05k | let slice_start = slice.as_ptr().as_usize(); |
1318 | | |
1319 | 3.04k | let (state_len, nr) = |
1320 | 3.05k | wire::try_read_u32_as_usize(&slice, "state length")?; |
1321 | 3.04k | slice = &slice[nr..]; |
1322 | | |
1323 | 3.04k | let (pattern_len, nr) = |
1324 | 3.04k | wire::try_read_u32_as_usize(&slice, "pattern length")?; |
1325 | 3.04k | slice = &slice[nr..]; |
1326 | | |
1327 | 3.04k | let (classes, nr) = ByteClasses::from_bytes(&slice)?; |
1328 | 2.92k | slice = &slice[nr..]; |
1329 | | |
1330 | 2.92k | let (len, nr) = |
1331 | 2.92k | wire::try_read_u32_as_usize(&slice, "sparse transitions length")?; |
1332 | 2.92k | slice = &slice[nr..]; |
1333 | | |
1334 | 2.92k | wire::check_slice_len(slice, len, "sparse states byte length")?; |
1335 | 2.87k | let sparse = &slice[..len]; |
1336 | 2.87k | slice = &slice[len..]; |
1337 | | |
1338 | 2.87k | let trans = Transitions { sparse, classes, state_len, pattern_len }; |
1339 | 2.87k | Ok((trans, slice.as_ptr().as_usize() - slice_start)) |
1340 | 3.05k | } |
1341 | | } |
1342 | | |
1343 | | impl<T: AsRef<[u8]>> Transitions<T> { |
1344 | | /// Writes a serialized form of this transition table to the buffer given. |
1345 | | /// If the buffer is too small, then an error is returned. To determine |
1346 | | /// how big the buffer must be, use `write_to_len`. |
1347 | | fn write_to<E: Endian>( |
1348 | | &self, |
1349 | | mut dst: &mut [u8], |
1350 | | ) -> Result<usize, SerializeError> { |
1351 | | let nwrite = self.write_to_len(); |
1352 | | if dst.len() < nwrite { |
1353 | | return Err(SerializeError::buffer_too_small( |
1354 | | "sparse transition table", |
1355 | | )); |
1356 | | } |
1357 | | dst = &mut dst[..nwrite]; |
1358 | | |
1359 | | // write state length |
1360 | | E::write_u32(u32::try_from(self.state_len).unwrap(), dst); |
1361 | | dst = &mut dst[size_of::<u32>()..]; |
1362 | | |
1363 | | // write pattern length |
1364 | | E::write_u32(u32::try_from(self.pattern_len).unwrap(), dst); |
1365 | | dst = &mut dst[size_of::<u32>()..]; |
1366 | | |
1367 | | // write byte class map |
1368 | | let n = self.classes.write_to(dst)?; |
1369 | | dst = &mut dst[n..]; |
1370 | | |
1371 | | // write number of bytes in sparse transitions |
1372 | | E::write_u32(u32::try_from(self.sparse().len()).unwrap(), dst); |
1373 | | dst = &mut dst[size_of::<u32>()..]; |
1374 | | |
1375 | | // write actual transitions |
1376 | | let mut id = DEAD; |
1377 | | while id.as_usize() < self.sparse().len() { |
1378 | | let state = self.state(id); |
1379 | | let n = state.write_to::<E>(&mut dst)?; |
1380 | | dst = &mut dst[n..]; |
1381 | | // The next ID is the offset immediately following `state`. |
1382 | | id = StateID::new(id.as_usize() + state.write_to_len()).unwrap(); |
1383 | | } |
1384 | | Ok(nwrite) |
1385 | | } |
1386 | | |
1387 | | /// Returns the number of bytes the serialized form of this transition |
1388 | | /// table will use. |
1389 | | fn write_to_len(&self) -> usize { |
1390 | | size_of::<u32>() // state length |
1391 | | + size_of::<u32>() // pattern length |
1392 | | + self.classes.write_to_len() |
1393 | | + size_of::<u32>() // sparse transitions length |
1394 | | + self.sparse().len() |
1395 | | } |
1396 | | |
1397 | | /// Validates that every state ID in this transition table is valid. |
1398 | | /// |
1399 | | /// That is, every state ID can be used to correctly index a state in this |
1400 | | /// table. |
1401 | 1.99k | fn validate(&self, sp: &Special) -> Result<Seen, DeserializeError> { |
1402 | 1.99k | let mut verified = Seen::new(); |
1403 | | // We need to make sure that we decode the correct number of states. |
1404 | | // Otherwise, an empty set of transitions would validate even if the |
1405 | | // recorded state length is non-empty. |
1406 | 1.99k | let mut len = 0; |
1407 | | // We can't use the self.states() iterator because it assumes the state |
1408 | | // encodings are valid. It could panic if they aren't. |
1409 | 1.99k | let mut id = DEAD; |
1410 | 29.6k | while id.as_usize() < self.sparse().len() { |
1411 | | // Before we even decode the state, we check that the ID itself |
1412 | | // is well formed. That is, if it's a special state then it must |
1413 | | // actually be a quit, dead, accel, match or start state. |
1414 | 27.9k | if sp.is_special_state(id) { |
1415 | 7.65k | let is_actually_special = sp.is_dead_state(id) |
1416 | 5.65k | || sp.is_quit_state(id) |
1417 | 5.57k | || sp.is_match_state(id) |
1418 | 4.20k | || sp.is_start_state(id) |
1419 | 3.27k | || sp.is_accel_state(id); |
1420 | 7.65k | if !is_actually_special { |
1421 | | // This is kind of a cryptic error message... |
1422 | 49 | return Err(DeserializeError::generic( |
1423 | 49 | "found sparse state tagged as special but \ |
1424 | 49 | wasn't actually special", |
1425 | 49 | )); |
1426 | 7.60k | } |
1427 | 20.3k | } |
1428 | 27.9k | let state = self.try_state(sp, id)?; |
1429 | 27.6k | verified.insert(id); |
1430 | | // The next ID should be the offset immediately following `state`. |
1431 | 27.6k | id = StateID::new(wire::add( |
1432 | 27.6k | id.as_usize(), |
1433 | 27.6k | state.write_to_len(), |
1434 | | "next state ID offset", |
1435 | 0 | )?) |
1436 | 27.6k | .map_err(|err| { |
1437 | 0 | DeserializeError::state_id_error(err, "next state ID offset") |
1438 | 0 | })?; |
1439 | 27.6k | len += 1; |
1440 | | } |
1441 | | // Now that we've checked that all top-level states are correct and |
1442 | | // importantly, collected a set of valid state IDs, we have all the |
1443 | | // information we need to check that all transitions are correct too. |
1444 | | // |
1445 | | // Note that we can't use `valid_ids` to iterate because it will |
1446 | | // be empty in no-std no-alloc contexts. (And yes, that means our |
1447 | | // verification isn't quite as good.) We can use `self.states()` |
1448 | | // though at least, since we know that all states can at least be |
1449 | | // decoded and traversed correctly. |
1450 | 25.7k | for state in self.states() { |
1451 | | // Check that all transitions in this state are correct. |
1452 | 60.5k | for i in 0..state.ntrans { |
1453 | 60.5k | let to = state.next_at(i); |
1454 | | // For no-alloc, we just check that the state can decode. It is |
1455 | | // technically possible that the state ID could still point to |
1456 | | // a non-existent state even if it decodes (fuzzing proved this |
1457 | | // to be true), but it shouldn't result in any memory unsafety |
1458 | | // or panics in non-debug mode. |
1459 | | #[cfg(not(feature = "alloc"))] |
1460 | | { |
1461 | | let _ = self.try_state(sp, to)?; |
1462 | | } |
1463 | | #[cfg(feature = "alloc")] |
1464 | | { |
1465 | 60.5k | if !verified.contains(&to) { |
1466 | 22 | return Err(DeserializeError::generic( |
1467 | 22 | "found transition that points to a \ |
1468 | 22 | non-existent state", |
1469 | 22 | )); |
1470 | 60.5k | } |
1471 | | } |
1472 | | } |
1473 | | } |
1474 | 1.68k | if len != self.state_len { |
1475 | 9 | return Err(DeserializeError::generic( |
1476 | 9 | "mismatching sparse state length", |
1477 | 9 | )); |
1478 | 1.67k | } |
1479 | 1.67k | Ok(verified) |
1480 | 1.99k | } |
1481 | | |
1482 | | /// Converts these transitions to a borrowed value. |
1483 | | fn as_ref(&self) -> Transitions<&'_ [u8]> { |
1484 | | Transitions { |
1485 | | sparse: self.sparse(), |
1486 | | classes: self.classes.clone(), |
1487 | | state_len: self.state_len, |
1488 | | pattern_len: self.pattern_len, |
1489 | | } |
1490 | | } |
1491 | | |
1492 | | /// Converts these transitions to an owned value. |
1493 | | #[cfg(feature = "alloc")] |
1494 | | fn to_owned(&self) -> Transitions<alloc::vec::Vec<u8>> { |
1495 | | Transitions { |
1496 | | sparse: self.sparse().to_vec(), |
1497 | | classes: self.classes.clone(), |
1498 | | state_len: self.state_len, |
1499 | | pattern_len: self.pattern_len, |
1500 | | } |
1501 | | } |
1502 | | |
1503 | | /// Return a convenient representation of the given state. |
1504 | | /// |
1505 | | /// This panics if the state is invalid. |
1506 | | /// |
1507 | | /// This is marked as inline to help dramatically boost sparse searching, |
1508 | | /// which decodes each state it enters to follow the next transition. Other |
1509 | | /// functions involved are also inlined, which should hopefully eliminate |
1510 | | /// a lot of the extraneous decoding that is never needed just to follow |
1511 | | /// the next transition. |
1512 | | #[cfg_attr(feature = "perf-inline", inline(always))] |
1513 | 187k | fn state(&self, id: StateID) -> State<'_> { |
1514 | 187k | let mut state = &self.sparse()[id.as_usize()..]; |
1515 | 187k | let mut ntrans = wire::read_u16(&state).as_usize(); |
1516 | 187k | let is_match = (1 << 15) & ntrans != 0; |
1517 | 187k | ntrans &= !(1 << 15); |
1518 | 187k | state = &state[2..]; |
1519 | | |
1520 | 187k | let (input_ranges, state) = state.split_at(ntrans * 2); |
1521 | 187k | let (next, state) = state.split_at(ntrans * StateID::SIZE); |
1522 | 187k | let (pattern_ids, state) = if is_match { |
1523 | 49.7k | let npats = wire::read_u32(&state).as_usize(); |
1524 | 49.7k | state[4..].split_at(npats * 4) |
1525 | | } else { |
1526 | 137k | (&[][..], state) |
1527 | | }; |
1528 | | |
1529 | 187k | let accel_len = usize::from(state[0]); |
1530 | 187k | let accel = &state[1..accel_len + 1]; |
1531 | 187k | State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel } |
1532 | 187k | } <regex_automata::dfa::sparse::Transitions<&[u8]>>::state Line | Count | Source | 1513 | 25.7k | fn state(&self, id: StateID) -> State<'_> { | 1514 | 25.7k | let mut state = &self.sparse()[id.as_usize()..]; | 1515 | 25.7k | let mut ntrans = wire::read_u16(&state).as_usize(); | 1516 | 25.7k | let is_match = (1 << 15) & ntrans != 0; | 1517 | 25.7k | ntrans &= !(1 << 15); | 1518 | 25.7k | state = &state[2..]; | 1519 | | | 1520 | 25.7k | let (input_ranges, state) = state.split_at(ntrans * 2); | 1521 | 25.7k | let (next, state) = state.split_at(ntrans * StateID::SIZE); | 1522 | 25.7k | let (pattern_ids, state) = if is_match { | 1523 | 1.23k | let npats = wire::read_u32(&state).as_usize(); | 1524 | 1.23k | state[4..].split_at(npats * 4) | 1525 | | } else { | 1526 | 24.5k | (&[][..], state) | 1527 | | }; | 1528 | | | 1529 | 25.7k | let accel_len = usize::from(state[0]); | 1530 | 25.7k | let accel = &state[1..accel_len + 1]; | 1531 | 25.7k | State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel } | 1532 | 25.7k | } |
<regex_automata::dfa::sparse::Transitions<&[u8]>>::state Line | Count | Source | 1513 | 161k | fn state(&self, id: StateID) -> State<'_> { | 1514 | 161k | let mut state = &self.sparse()[id.as_usize()..]; | 1515 | 161k | let mut ntrans = wire::read_u16(&state).as_usize(); | 1516 | 161k | let is_match = (1 << 15) & ntrans != 0; | 1517 | 161k | ntrans &= !(1 << 15); | 1518 | 161k | state = &state[2..]; | 1519 | | | 1520 | 161k | let (input_ranges, state) = state.split_at(ntrans * 2); | 1521 | 161k | let (next, state) = state.split_at(ntrans * StateID::SIZE); | 1522 | 161k | let (pattern_ids, state) = if is_match { | 1523 | 48.5k | let npats = wire::read_u32(&state).as_usize(); | 1524 | 48.5k | state[4..].split_at(npats * 4) | 1525 | | } else { | 1526 | 113k | (&[][..], state) | 1527 | | }; | 1528 | | | 1529 | 161k | let accel_len = usize::from(state[0]); | 1530 | 161k | let accel = &state[1..accel_len + 1]; | 1531 | 161k | State { id, is_match, ntrans, input_ranges, next, pattern_ids, accel } | 1532 | 161k | } |
|
1533 | | |
1534 | | /// Like `state`, but will return an error if the state encoding is |
1535 | | /// invalid. This is useful for verifying states after deserialization, |
1536 | | /// which is required for a safe deserialization API. |
1537 | | /// |
1538 | | /// Note that this only verifies that this state is decodable and that |
1539 | | /// all of its data is consistent. It does not verify that its state ID |
1540 | | /// transitions point to valid states themselves, nor does it verify that |
1541 | | /// every pattern ID is valid. |
1542 | 27.9k | fn try_state( |
1543 | 27.9k | &self, |
1544 | 27.9k | sp: &Special, |
1545 | 27.9k | id: StateID, |
1546 | 27.9k | ) -> Result<State<'_>, DeserializeError> { |
1547 | 27.9k | if id.as_usize() > self.sparse().len() { |
1548 | 0 | return Err(DeserializeError::generic( |
1549 | 0 | "invalid caller provided sparse state ID", |
1550 | 0 | )); |
1551 | 27.9k | } |
1552 | 27.9k | let mut state = &self.sparse()[id.as_usize()..]; |
1553 | | // Encoding format starts with a u16 that stores the total number of |
1554 | | // transitions in this state. |
1555 | 27.9k | let (mut ntrans, _) = |
1556 | 27.9k | wire::try_read_u16_as_usize(state, "state transition length")?; |
1557 | 27.9k | let is_match = ((1 << 15) & ntrans) != 0; |
1558 | 27.9k | ntrans &= !(1 << 15); |
1559 | 27.9k | state = &state[2..]; |
1560 | 27.9k | if ntrans > 257 || ntrans == 0 { |
1561 | 52 | return Err(DeserializeError::generic( |
1562 | 52 | "invalid transition length", |
1563 | 52 | )); |
1564 | 27.8k | } |
1565 | 27.8k | if is_match && !sp.is_match_state(id) { |
1566 | 11 | return Err(DeserializeError::generic( |
1567 | 11 | "state marked as match but not in match ID range", |
1568 | 11 | )); |
1569 | 27.8k | } else if !is_match && sp.is_match_state(id) { |
1570 | 8 | return Err(DeserializeError::generic( |
1571 | 8 | "state in match ID range but not marked as match state", |
1572 | 8 | )); |
1573 | 27.8k | } |
1574 | | |
1575 | | // Each transition has two pieces: an inclusive range of bytes on which |
1576 | | // it is defined, and the state ID that those bytes transition to. The |
1577 | | // pairs come first, followed by a corresponding sequence of state IDs. |
1578 | 27.8k | let input_ranges_len = ntrans.checked_mul(2).unwrap(); |
1579 | 27.8k | wire::check_slice_len(state, input_ranges_len, "sparse byte pairs")?; |
1580 | 27.8k | let (input_ranges, state) = state.split_at(input_ranges_len); |
1581 | | // Every range should be of the form A-B, where A<=B. |
1582 | 66.9k | for pair in input_ranges.chunks(2) { |
1583 | 66.9k | let (start, end) = (pair[0], pair[1]); |
1584 | 66.9k | if start > end { |
1585 | 12 | return Err(DeserializeError::generic("invalid input range")); |
1586 | 66.9k | } |
1587 | | } |
1588 | | |
1589 | | // And now extract the corresponding sequence of state IDs. We leave |
1590 | | // this sequence as a &[u8] instead of a &[S] because sparse DFAs do |
1591 | | // not have any alignment requirements. |
1592 | 27.8k | let next_len = ntrans |
1593 | 27.8k | .checked_mul(self.id_len()) |
1594 | 27.8k | .expect("state size * #trans should always fit in a usize"); |
1595 | 27.8k | wire::check_slice_len(state, next_len, "sparse trans state IDs")?; |
1596 | 27.8k | let (next, state) = state.split_at(next_len); |
1597 | | // We can at least verify that every state ID is in bounds. |
1598 | 66.6k | for idbytes in next.chunks(self.id_len()) { |
1599 | 66.6k | let (id, _) = |
1600 | 66.6k | wire::read_state_id(idbytes, "sparse state ID in try_state")?; |
1601 | 66.6k | wire::check_slice_len( |
1602 | 66.6k | self.sparse(), |
1603 | 66.6k | id.as_usize(), |
1604 | | "invalid sparse state ID", |
1605 | 26 | )?; |
1606 | | } |
1607 | | |
1608 | | // If this is a match state, then read the pattern IDs for this state. |
1609 | | // Pattern IDs is a u32-length prefixed sequence of native endian |
1610 | | // encoded 32-bit integers. |
1611 | 27.7k | let (pattern_ids, state) = if is_match { |
1612 | 1.34k | let (npats, nr) = |
1613 | 1.34k | wire::try_read_u32_as_usize(state, "pattern ID length")?; |
1614 | 1.34k | let state = &state[nr..]; |
1615 | 1.34k | if npats == 0 { |
1616 | 1 | return Err(DeserializeError::generic( |
1617 | 1 | "state marked as a match, but pattern length is zero", |
1618 | 1 | )); |
1619 | 1.34k | } |
1620 | | |
1621 | 1.34k | let pattern_ids_len = |
1622 | 1.34k | wire::mul(npats, 4, "sparse pattern ID byte length")?; |
1623 | 1.34k | wire::check_slice_len( |
1624 | 1.34k | state, |
1625 | 1.34k | pattern_ids_len, |
1626 | | "sparse pattern IDs", |
1627 | 31 | )?; |
1628 | 1.31k | let (pattern_ids, state) = state.split_at(pattern_ids_len); |
1629 | 93.3k | for patbytes in pattern_ids.chunks(PatternID::SIZE) { |
1630 | 93.3k | wire::read_pattern_id( |
1631 | 93.3k | patbytes, |
1632 | | "sparse pattern ID in try_state", |
1633 | 6 | )?; |
1634 | | } |
1635 | 1.30k | (pattern_ids, state) |
1636 | | } else { |
1637 | 26.4k | (&[][..], state) |
1638 | | }; |
1639 | 27.7k | if is_match && pattern_ids.is_empty() { |
1640 | 0 | return Err(DeserializeError::generic( |
1641 | 0 | "state marked as a match, but has no pattern IDs", |
1642 | 0 | )); |
1643 | 27.7k | } |
1644 | 27.7k | if sp.is_match_state(id) && pattern_ids.is_empty() { |
1645 | 0 | return Err(DeserializeError::generic( |
1646 | 0 | "state marked special as a match, but has no pattern IDs", |
1647 | 0 | )); |
1648 | 27.7k | } |
1649 | 27.7k | if sp.is_match_state(id) != is_match { |
1650 | 0 | return Err(DeserializeError::generic( |
1651 | 0 | "whether state is a match or not is inconsistent", |
1652 | 0 | )); |
1653 | 27.7k | } |
1654 | | |
1655 | | // Now read this state's accelerator info. The first byte is the length |
1656 | | // of the accelerator, which is typically 0 (for no acceleration) but |
1657 | | // is no bigger than 3. The length indicates the number of bytes that |
1658 | | // follow, where each byte corresponds to a transition out of this |
1659 | | // state. |
1660 | 27.7k | if state.is_empty() { |
1661 | 26 | return Err(DeserializeError::generic("no accelerator length")); |
1662 | 27.7k | } |
1663 | 27.7k | let (accel_len, state) = (usize::from(state[0]), &state[1..]); |
1664 | | |
1665 | 27.7k | if accel_len > 3 { |
1666 | 6 | return Err(DeserializeError::generic( |
1667 | 6 | "sparse invalid accelerator length", |
1668 | 6 | )); |
1669 | 27.7k | } else if accel_len == 0 && sp.is_accel_state(id) { |
1670 | 4 | return Err(DeserializeError::generic( |
1671 | 4 | "got no accelerators in state, but in accelerator ID range", |
1672 | 4 | )); |
1673 | 27.7k | } else if accel_len > 0 && !sp.is_accel_state(id) { |
1674 | 5 | return Err(DeserializeError::generic( |
1675 | 5 | "state in accelerator ID range, but has no accelerators", |
1676 | 5 | )); |
1677 | 27.6k | } |
1678 | | |
1679 | 27.6k | wire::check_slice_len( |
1680 | 27.6k | state, |
1681 | 27.6k | accel_len, |
1682 | | "sparse corrupt accelerator length", |
1683 | 1 | )?; |
1684 | 27.6k | let (accel, _) = (&state[..accel_len], &state[accel_len..]); |
1685 | | |
1686 | 27.6k | let state = State { |
1687 | 27.6k | id, |
1688 | 27.6k | is_match, |
1689 | 27.6k | ntrans, |
1690 | 27.6k | input_ranges, |
1691 | 27.6k | next, |
1692 | 27.6k | pattern_ids, |
1693 | 27.6k | accel, |
1694 | 27.6k | }; |
1695 | 27.6k | if sp.is_quit_state(state.next_at(state.ntrans - 1)) { |
1696 | 2 | return Err(DeserializeError::generic( |
1697 | 2 | "state with EOI transition to quit state is illegal", |
1698 | 2 | )); |
1699 | 27.6k | } |
1700 | 27.6k | Ok(state) |
1701 | 27.9k | } |
1702 | | |
1703 | | /// Return an iterator over all of the states in this DFA. |
1704 | | /// |
1705 | | /// The iterator returned yields tuples, where the first element is the |
1706 | | /// state ID and the second element is the state itself. |
1707 | 1.70k | fn states(&self) -> StateIter<'_, T> { |
1708 | 1.70k | StateIter { trans: self, id: DEAD.as_usize() } |
1709 | 1.70k | } |
1710 | | |
1711 | | /// Returns the sparse transitions as raw bytes. |
1712 | 369k | fn sparse(&self) -> &[u8] { |
1713 | 369k | self.sparse.as_ref() |
1714 | 369k | } Unexecuted instantiation: <regex_automata::dfa::sparse::Transitions<alloc::vec::Vec<u8>>>::sparse <regex_automata::dfa::sparse::Transitions<&[u8]>>::sparse Line | Count | Source | 1712 | 207k | fn sparse(&self) -> &[u8] { | 1713 | 207k | self.sparse.as_ref() | 1714 | 207k | } |
<regex_automata::dfa::sparse::Transitions<&[u8]>>::sparse Line | Count | Source | 1712 | 161k | fn sparse(&self) -> &[u8] { | 1713 | 161k | self.sparse.as_ref() | 1714 | 161k | } |
|
1715 | | |
1716 | | /// Returns the number of bytes represented by a single state ID. |
1717 | 55.6k | fn id_len(&self) -> usize { |
1718 | 55.6k | StateID::SIZE |
1719 | 55.6k | } |
1720 | | |
1721 | | /// Return the memory usage, in bytes, of these transitions. |
1722 | | /// |
1723 | | /// This does not include the size of a `Transitions` value itself. |
1724 | 0 | fn memory_usage(&self) -> usize { |
1725 | 0 | self.sparse().len() |
1726 | 0 | } |
1727 | | } |
1728 | | |
1729 | | #[cfg(feature = "dfa-build")] |
1730 | | impl<T: AsMut<[u8]>> Transitions<T> { |
1731 | | /// Return a convenient mutable representation of the given state. |
1732 | | /// This panics if the state is invalid. |
1733 | 0 | fn state_mut(&mut self, id: StateID) -> StateMut<'_> { |
1734 | 0 | let mut state = &mut self.sparse_mut()[id.as_usize()..]; |
1735 | 0 | let mut ntrans = wire::read_u16(&state).as_usize(); |
1736 | 0 | let is_match = (1 << 15) & ntrans != 0; |
1737 | 0 | ntrans &= !(1 << 15); |
1738 | 0 | state = &mut state[2..]; |
1739 | | |
1740 | 0 | let (input_ranges, state) = state.split_at_mut(ntrans * 2); |
1741 | 0 | let (next, state) = state.split_at_mut(ntrans * StateID::SIZE); |
1742 | 0 | let (pattern_ids, state) = if is_match { |
1743 | 0 | let npats = wire::read_u32(&state).as_usize(); |
1744 | 0 | state[4..].split_at_mut(npats * 4) |
1745 | | } else { |
1746 | 0 | (&mut [][..], state) |
1747 | | }; |
1748 | | |
1749 | 0 | let accel_len = usize::from(state[0]); |
1750 | 0 | let accel = &mut state[1..accel_len + 1]; |
1751 | 0 | StateMut { |
1752 | 0 | id, |
1753 | 0 | is_match, |
1754 | 0 | ntrans, |
1755 | 0 | input_ranges, |
1756 | 0 | next, |
1757 | 0 | pattern_ids, |
1758 | 0 | accel, |
1759 | 0 | } |
1760 | 0 | } |
1761 | | |
1762 | | /// Returns the sparse transitions as raw mutable bytes. |
1763 | 0 | fn sparse_mut(&mut self) -> &mut [u8] { |
1764 | 0 | self.sparse.as_mut() |
1765 | 0 | } |
1766 | | } |
1767 | | |
1768 | | /// The set of all possible starting states in a DFA. |
1769 | | /// |
1770 | | /// See the eponymous type in the `dense` module for more details. This type |
1771 | | /// is very similar to `dense::StartTable`, except that its underlying |
1772 | | /// representation is `&[u8]` instead of `&[S]`. (The latter would require |
1773 | | /// sparse DFAs to be aligned, which is explicitly something we do not require |
1774 | | /// because we don't really need it.) |
1775 | | #[derive(Clone)] |
1776 | | struct StartTable<T> { |
1777 | | /// The initial start state IDs as a contiguous table of native endian |
1778 | | /// encoded integers, represented by `S`. |
1779 | | /// |
1780 | | /// In practice, T is either Vec<u8> or &[u8] and has no alignment |
1781 | | /// requirements. |
1782 | | /// |
1783 | | /// The first `2 * stride` (currently always 8) entries always correspond |
1784 | | /// to the starts states for the entire DFA, with the first 4 entries being |
1785 | | /// for unanchored searches and the second 4 entries being for anchored |
1786 | | /// searches. To keep things simple, we always use 8 entries even if the |
1787 | | /// `StartKind` is not both. |
1788 | | /// |
1789 | | /// After that, there are `stride * patterns` state IDs, where `patterns` |
1790 | | /// may be zero in the case of a DFA with no patterns or in the case where |
1791 | | /// the DFA was built without enabling starting states for each pattern. |
1792 | | table: T, |
1793 | | /// The starting state configuration supported. When 'both', both |
1794 | | /// unanchored and anchored searches work. When 'unanchored', anchored |
1795 | | /// searches panic. When 'anchored', unanchored searches panic. |
1796 | | kind: StartKind, |
1797 | | /// The start state configuration for every possible byte. |
1798 | | start_map: StartByteMap, |
1799 | | /// The number of starting state IDs per pattern. |
1800 | | stride: usize, |
1801 | | /// The total number of patterns for which starting states are encoded. |
1802 | | /// This is `None` for DFAs that were built without start states for each |
1803 | | /// pattern. Thus, one cannot use this field to say how many patterns |
1804 | | /// are in the DFA in all cases. It is specific to how many patterns are |
1805 | | /// represented in this start table. |
1806 | | pattern_len: Option<usize>, |
1807 | | /// The universal starting state for unanchored searches. This is only |
1808 | | /// present when the DFA supports unanchored searches and when all starting |
1809 | | /// state IDs for an unanchored search are equivalent. |
1810 | | universal_start_unanchored: Option<StateID>, |
1811 | | /// The universal starting state for anchored searches. This is only |
1812 | | /// present when the DFA supports anchored searches and when all starting |
1813 | | /// state IDs for an anchored search are equivalent. |
1814 | | universal_start_anchored: Option<StateID>, |
1815 | | } |
1816 | | |
1817 | | #[cfg(feature = "dfa-build")] |
1818 | | impl StartTable<Vec<u8>> { |
1819 | 0 | fn new<T: AsRef<[u32]>>( |
1820 | 0 | dfa: &dense::DFA<T>, |
1821 | 0 | pattern_len: Option<usize>, |
1822 | 0 | ) -> StartTable<Vec<u8>> { |
1823 | 0 | let stride = Start::len(); |
1824 | | // This is OK since the only way we're here is if a dense DFA could be |
1825 | | // constructed successfully, which uses the same space. |
1826 | 0 | let len = stride |
1827 | 0 | .checked_mul(pattern_len.unwrap_or(0)) |
1828 | 0 | .unwrap() |
1829 | 0 | .checked_add(stride.checked_mul(2).unwrap()) |
1830 | 0 | .unwrap() |
1831 | 0 | .checked_mul(StateID::SIZE) |
1832 | 0 | .unwrap(); |
1833 | 0 | StartTable { |
1834 | 0 | table: vec![0; len], |
1835 | 0 | kind: dfa.start_kind(), |
1836 | 0 | start_map: dfa.start_map().clone(), |
1837 | 0 | stride, |
1838 | 0 | pattern_len, |
1839 | 0 | universal_start_unanchored: dfa |
1840 | 0 | .universal_start_state(Anchored::No), |
1841 | 0 | universal_start_anchored: dfa.universal_start_state(Anchored::Yes), |
1842 | 0 | } |
1843 | 0 | } |
1844 | | |
1845 | 0 | fn from_dense_dfa<T: AsRef<[u32]>>( |
1846 | 0 | dfa: &dense::DFA<T>, |
1847 | 0 | remap: &[StateID], |
1848 | 0 | ) -> Result<StartTable<Vec<u8>>, BuildError> { |
1849 | | // Unless the DFA has start states compiled for each pattern, then |
1850 | | // as far as the starting state table is concerned, there are zero |
1851 | | // patterns to account for. It will instead only store starting states |
1852 | | // for the entire DFA. |
1853 | 0 | let start_pattern_len = if dfa.starts_for_each_pattern() { |
1854 | 0 | Some(dfa.pattern_len()) |
1855 | | } else { |
1856 | 0 | None |
1857 | | }; |
1858 | 0 | let mut sl = StartTable::new(dfa, start_pattern_len); |
1859 | 0 | for (old_start_id, anchored, sty) in dfa.starts() { |
1860 | 0 | let new_start_id = remap[dfa.to_index(old_start_id)]; |
1861 | 0 | sl.set_start(anchored, sty, new_start_id); |
1862 | 0 | } |
1863 | 0 | if let Some(ref mut id) = sl.universal_start_anchored { |
1864 | 0 | *id = remap[dfa.to_index(*id)]; |
1865 | 0 | } |
1866 | 0 | if let Some(ref mut id) = sl.universal_start_unanchored { |
1867 | 0 | *id = remap[dfa.to_index(*id)]; |
1868 | 0 | } |
1869 | 0 | Ok(sl) |
1870 | 0 | } |
1871 | | } |
1872 | | |
1873 | | impl<'a> StartTable<&'a [u8]> { |
1874 | 2.87k | unsafe fn from_bytes_unchecked( |
1875 | 2.87k | mut slice: &'a [u8], |
1876 | 2.87k | ) -> Result<(StartTable<&'a [u8]>, usize), DeserializeError> { |
1877 | 2.87k | let slice_start = slice.as_ptr().as_usize(); |
1878 | | |
1879 | 2.87k | let (kind, nr) = StartKind::from_bytes(slice)?; |
1880 | 2.82k | slice = &slice[nr..]; |
1881 | | |
1882 | 2.82k | let (start_map, nr) = StartByteMap::from_bytes(slice)?; |
1883 | 2.76k | slice = &slice[nr..]; |
1884 | | |
1885 | 2.75k | let (stride, nr) = |
1886 | 2.76k | wire::try_read_u32_as_usize(slice, "sparse start table stride")?; |
1887 | 2.75k | slice = &slice[nr..]; |
1888 | 2.75k | if stride != Start::len() { |
1889 | 52 | return Err(DeserializeError::generic( |
1890 | 52 | "invalid sparse starting table stride", |
1891 | 52 | )); |
1892 | 2.70k | } |
1893 | | |
1894 | 2.70k | let (maybe_pattern_len, nr) = |
1895 | 2.70k | wire::try_read_u32_as_usize(slice, "sparse start table patterns")?; |
1896 | 2.70k | slice = &slice[nr..]; |
1897 | 2.70k | let pattern_len = if maybe_pattern_len.as_u32() == u32::MAX { |
1898 | 935 | None |
1899 | | } else { |
1900 | 1.76k | Some(maybe_pattern_len) |
1901 | | }; |
1902 | 2.70k | if pattern_len.map_or(false, |len| len > PatternID::LIMIT) { |
1903 | 37 | return Err(DeserializeError::generic( |
1904 | 37 | "sparse invalid number of patterns", |
1905 | 37 | )); |
1906 | 2.66k | } |
1907 | | |
1908 | 2.65k | let (universal_unanchored, nr) = |
1909 | 2.66k | wire::try_read_u32(slice, "universal unanchored start")?; |
1910 | 2.65k | slice = &slice[nr..]; |
1911 | 2.65k | let universal_start_unanchored = if universal_unanchored == u32::MAX { |
1912 | 76 | None |
1913 | | } else { |
1914 | 2.57k | Some(StateID::try_from(universal_unanchored).map_err(|e| { |
1915 | 34 | DeserializeError::state_id_error( |
1916 | 34 | e, |
1917 | | "universal unanchored start", |
1918 | | ) |
1919 | 34 | })?) |
1920 | | }; |
1921 | | |
1922 | 2.57k | let (universal_anchored, nr) = |
1923 | 2.61k | wire::try_read_u32(slice, "universal anchored start")?; |
1924 | 2.57k | slice = &slice[nr..]; |
1925 | 2.57k | let universal_start_anchored = if universal_anchored == u32::MAX { |
1926 | 32 | None |
1927 | | } else { |
1928 | 2.54k | Some(StateID::try_from(universal_anchored).map_err(|e| { |
1929 | 32 | DeserializeError::state_id_error(e, "universal anchored start") |
1930 | 32 | })?) |
1931 | | }; |
1932 | | |
1933 | 2.54k | let pattern_table_size = wire::mul( |
1934 | 2.54k | stride, |
1935 | 2.54k | pattern_len.unwrap_or(0), |
1936 | | "sparse invalid pattern length", |
1937 | 0 | )?; |
1938 | | // Our start states always start with a single stride of start states |
1939 | | // for the entire automaton which permit it to match any pattern. What |
1940 | | // follows it are an optional set of start states for each pattern. |
1941 | 2.54k | let start_state_len = wire::add( |
1942 | 2.54k | wire::mul(2, stride, "start state stride too big")?, |
1943 | 2.54k | pattern_table_size, |
1944 | | "sparse invalid 'any' pattern starts size", |
1945 | 0 | )?; |
1946 | 2.54k | let table_bytes_len = wire::mul( |
1947 | 2.54k | start_state_len, |
1948 | | StateID::SIZE, |
1949 | | "sparse pattern table bytes length", |
1950 | 0 | )?; |
1951 | 2.54k | wire::check_slice_len( |
1952 | 2.54k | slice, |
1953 | 2.54k | table_bytes_len, |
1954 | | "sparse start ID table", |
1955 | 57 | )?; |
1956 | 2.49k | let table = &slice[..table_bytes_len]; |
1957 | 2.49k | slice = &slice[table_bytes_len..]; |
1958 | | |
1959 | 2.49k | let sl = StartTable { |
1960 | 2.49k | table, |
1961 | 2.49k | kind, |
1962 | 2.49k | start_map, |
1963 | 2.49k | stride, |
1964 | 2.49k | pattern_len, |
1965 | 2.49k | universal_start_unanchored, |
1966 | 2.49k | universal_start_anchored, |
1967 | 2.49k | }; |
1968 | 2.49k | Ok((sl, slice.as_ptr().as_usize() - slice_start)) |
1969 | 2.87k | } |
1970 | | } |
1971 | | |
1972 | | impl<T: AsRef<[u8]>> StartTable<T> { |
1973 | | fn write_to<E: Endian>( |
1974 | | &self, |
1975 | | mut dst: &mut [u8], |
1976 | | ) -> Result<usize, SerializeError> { |
1977 | | let nwrite = self.write_to_len(); |
1978 | | if dst.len() < nwrite { |
1979 | | return Err(SerializeError::buffer_too_small( |
1980 | | "sparse starting table ids", |
1981 | | )); |
1982 | | } |
1983 | | dst = &mut dst[..nwrite]; |
1984 | | |
1985 | | // write start kind |
1986 | | let nw = self.kind.write_to::<E>(dst)?; |
1987 | | dst = &mut dst[nw..]; |
1988 | | // write start byte map |
1989 | | let nw = self.start_map.write_to(dst)?; |
1990 | | dst = &mut dst[nw..]; |
1991 | | // write stride |
1992 | | E::write_u32(u32::try_from(self.stride).unwrap(), dst); |
1993 | | dst = &mut dst[size_of::<u32>()..]; |
1994 | | // write pattern length |
1995 | | E::write_u32( |
1996 | | u32::try_from(self.pattern_len.unwrap_or(0xFFFF_FFFF)).unwrap(), |
1997 | | dst, |
1998 | | ); |
1999 | | dst = &mut dst[size_of::<u32>()..]; |
2000 | | // write universal start unanchored state id, u32::MAX if absent |
2001 | | E::write_u32( |
2002 | | self.universal_start_unanchored |
2003 | | .map_or(u32::MAX, |sid| sid.as_u32()), |
2004 | | dst, |
2005 | | ); |
2006 | | dst = &mut dst[size_of::<u32>()..]; |
2007 | | // write universal start anchored state id, u32::MAX if absent |
2008 | | E::write_u32( |
2009 | | self.universal_start_anchored.map_or(u32::MAX, |sid| sid.as_u32()), |
2010 | | dst, |
2011 | | ); |
2012 | | dst = &mut dst[size_of::<u32>()..]; |
2013 | | // write start IDs |
2014 | | for (sid, _, _) in self.iter() { |
2015 | | E::write_u32(sid.as_u32(), dst); |
2016 | | dst = &mut dst[StateID::SIZE..]; |
2017 | | } |
2018 | | Ok(nwrite) |
2019 | | } |
2020 | | |
2021 | | /// Returns the number of bytes the serialized form of this transition |
2022 | | /// table will use. |
2023 | | fn write_to_len(&self) -> usize { |
2024 | | self.kind.write_to_len() |
2025 | | + self.start_map.write_to_len() |
2026 | | + size_of::<u32>() // stride |
2027 | | + size_of::<u32>() // # patterns |
2028 | | + size_of::<u32>() // universal unanchored start |
2029 | | + size_of::<u32>() // universal anchored start |
2030 | | + self.table().len() |
2031 | | } |
2032 | | |
2033 | | /// Validates that every starting state ID in this table is valid. |
2034 | | /// |
2035 | | /// That is, every starting state ID can be used to correctly decode a |
2036 | | /// state in the DFA's sparse transitions. |
2037 | 1.67k | fn validate( |
2038 | 1.67k | &self, |
2039 | 1.67k | sp: &Special, |
2040 | 1.67k | seen: &Seen, |
2041 | 1.67k | ) -> Result<(), DeserializeError> { |
2042 | 301k | for (id, _, _) in self.iter() { |
2043 | 301k | if !seen.contains(&id) { |
2044 | 39 | return Err(DeserializeError::generic( |
2045 | 39 | "found invalid start state ID", |
2046 | 39 | )); |
2047 | 301k | } |
2048 | 301k | if sp.is_match_state(id) { |
2049 | 2 | return Err(DeserializeError::generic( |
2050 | 2 | "start states cannot be match states", |
2051 | 2 | )); |
2052 | 301k | } |
2053 | | } |
2054 | 1.63k | Ok(()) |
2055 | 1.67k | } |
2056 | | |
2057 | | /// Converts this start list to a borrowed value. |
2058 | | fn as_ref(&self) -> StartTable<&'_ [u8]> { |
2059 | | StartTable { |
2060 | | table: self.table(), |
2061 | | kind: self.kind, |
2062 | | start_map: self.start_map.clone(), |
2063 | | stride: self.stride, |
2064 | | pattern_len: self.pattern_len, |
2065 | | universal_start_unanchored: self.universal_start_unanchored, |
2066 | | universal_start_anchored: self.universal_start_anchored, |
2067 | | } |
2068 | | } |
2069 | | |
2070 | | /// Converts this start list to an owned value. |
2071 | | #[cfg(feature = "alloc")] |
2072 | | fn to_owned(&self) -> StartTable<alloc::vec::Vec<u8>> { |
2073 | | StartTable { |
2074 | | table: self.table().to_vec(), |
2075 | | kind: self.kind, |
2076 | | start_map: self.start_map.clone(), |
2077 | | stride: self.stride, |
2078 | | pattern_len: self.pattern_len, |
2079 | | universal_start_unanchored: self.universal_start_unanchored, |
2080 | | universal_start_anchored: self.universal_start_anchored, |
2081 | | } |
2082 | | } |
2083 | | |
2084 | | /// Return the start state for the given index and pattern ID. If the |
2085 | | /// pattern ID is None, then the corresponding start state for the entire |
2086 | | /// DFA is returned. If the pattern ID is not None, then the corresponding |
2087 | | /// starting state for the given pattern is returned. If this start table |
2088 | | /// does not have individual starting states for each pattern, then this |
2089 | | /// panics. |
2090 | 26.6k | fn start( |
2091 | 26.6k | &self, |
2092 | 26.6k | anchored: Anchored, |
2093 | 26.6k | start: Start, |
2094 | 26.6k | ) -> Result<StateID, StartError> { |
2095 | 26.6k | let start_index = start.as_usize(); |
2096 | 26.6k | let index = match anchored { |
2097 | | Anchored::No => { |
2098 | 26.6k | if !self.kind.has_unanchored() { |
2099 | 75 | return Err(StartError::unsupported_anchored(anchored)); |
2100 | 26.6k | } |
2101 | 26.6k | start_index |
2102 | | } |
2103 | | Anchored::Yes => { |
2104 | 0 | if !self.kind.has_anchored() { |
2105 | 0 | return Err(StartError::unsupported_anchored(anchored)); |
2106 | 0 | } |
2107 | 0 | self.stride + start_index |
2108 | | } |
2109 | 0 | Anchored::Pattern(pid) => { |
2110 | 0 | let len = match self.pattern_len { |
2111 | | None => { |
2112 | 0 | return Err(StartError::unsupported_anchored(anchored)) |
2113 | | } |
2114 | 0 | Some(len) => len, |
2115 | | }; |
2116 | 0 | if pid.as_usize() >= len { |
2117 | 0 | return Ok(DEAD); |
2118 | 0 | } |
2119 | 0 | (2 * self.stride) |
2120 | 0 | + (self.stride * pid.as_usize()) |
2121 | 0 | + start_index |
2122 | | } |
2123 | | }; |
2124 | 26.6k | let start = index * StateID::SIZE; |
2125 | | // This OK since we're allowed to assume that the start table contains |
2126 | | // valid StateIDs. |
2127 | 26.6k | Ok(wire::read_state_id_unchecked(&self.table()[start..]).0) |
2128 | 26.6k | } |
2129 | | |
2130 | | /// Return an iterator over all start IDs in this table. |
2131 | 1.67k | fn iter(&self) -> StartStateIter<'_, T> { |
2132 | 1.67k | StartStateIter { st: self, i: 0 } |
2133 | 1.67k | } |
2134 | | |
2135 | | /// Returns the total number of start state IDs in this table. |
2136 | 303k | fn len(&self) -> usize { |
2137 | 303k | self.table().len() / StateID::SIZE |
2138 | 303k | } |
2139 | | |
2140 | | /// Returns the table as a raw slice of bytes. |
2141 | 631k | fn table(&self) -> &[u8] { |
2142 | 631k | self.table.as_ref() |
2143 | 631k | } Unexecuted instantiation: <regex_automata::dfa::sparse::StartTable<alloc::vec::Vec<u8>>>::table <regex_automata::dfa::sparse::StartTable<&[u8]>>::table Line | Count | Source | 2141 | 604k | fn table(&self) -> &[u8] { | 2142 | 604k | self.table.as_ref() | 2143 | 604k | } |
<regex_automata::dfa::sparse::StartTable<&[u8]>>::table Line | Count | Source | 2141 | 26.6k | fn table(&self) -> &[u8] { | 2142 | 26.6k | self.table.as_ref() | 2143 | 26.6k | } |
|
2144 | | |
2145 | | /// Return the memory usage, in bytes, of this start list. |
2146 | | /// |
2147 | | /// This does not include the size of a `StartTable` value itself. |
2148 | 0 | fn memory_usage(&self) -> usize { |
2149 | 0 | self.table().len() |
2150 | 0 | } |
2151 | | } |
2152 | | |
2153 | | #[cfg(feature = "dfa-build")] |
2154 | | impl<T: AsMut<[u8]>> StartTable<T> { |
2155 | | /// Set the start state for the given index and pattern. |
2156 | | /// |
2157 | | /// If the pattern ID or state ID are not valid, then this will panic. |
2158 | 0 | fn set_start(&mut self, anchored: Anchored, start: Start, id: StateID) { |
2159 | 0 | let start_index = start.as_usize(); |
2160 | 0 | let index = match anchored { |
2161 | 0 | Anchored::No => start_index, |
2162 | 0 | Anchored::Yes => self.stride + start_index, |
2163 | 0 | Anchored::Pattern(pid) => { |
2164 | 0 | let pid = pid.as_usize(); |
2165 | 0 | let len = self |
2166 | 0 | .pattern_len |
2167 | 0 | .expect("start states for each pattern enabled"); |
2168 | 0 | assert!(pid < len, "invalid pattern ID {pid:?}"); |
2169 | 0 | self.stride |
2170 | 0 | .checked_mul(pid) |
2171 | 0 | .unwrap() |
2172 | 0 | .checked_add(self.stride.checked_mul(2).unwrap()) |
2173 | 0 | .unwrap() |
2174 | 0 | .checked_add(start_index) |
2175 | 0 | .unwrap() |
2176 | | } |
2177 | | }; |
2178 | 0 | let start = index * StateID::SIZE; |
2179 | 0 | let end = start + StateID::SIZE; |
2180 | 0 | wire::write_state_id::<wire::NE>( |
2181 | 0 | id, |
2182 | 0 | &mut self.table.as_mut()[start..end], |
2183 | | ); |
2184 | 0 | } |
2185 | | } |
2186 | | |
2187 | | /// An iterator over all state state IDs in a sparse DFA. |
2188 | | struct StartStateIter<'a, T> { |
2189 | | st: &'a StartTable<T>, |
2190 | | i: usize, |
2191 | | } |
2192 | | |
2193 | | impl<'a, T: AsRef<[u8]>> Iterator for StartStateIter<'a, T> { |
2194 | | type Item = (StateID, Anchored, Start); |
2195 | | |
2196 | 303k | fn next(&mut self) -> Option<(StateID, Anchored, Start)> { |
2197 | 303k | let i = self.i; |
2198 | 303k | if i >= self.st.len() { |
2199 | 1.63k | return None; |
2200 | 301k | } |
2201 | 301k | self.i += 1; |
2202 | | |
2203 | | // This unwrap is okay since the stride of any DFA must always match |
2204 | | // the number of start state types. |
2205 | 301k | let start_type = Start::from_usize(i % self.st.stride).unwrap(); |
2206 | 301k | let anchored = if i < self.st.stride { |
2207 | 9.98k | Anchored::No |
2208 | 291k | } else if i < (2 * self.st.stride) { |
2209 | 9.91k | Anchored::Yes |
2210 | | } else { |
2211 | 281k | let pid = (i - (2 * self.st.stride)) / self.st.stride; |
2212 | 281k | Anchored::Pattern(PatternID::new(pid).unwrap()) |
2213 | | }; |
2214 | 301k | let start = i * StateID::SIZE; |
2215 | 301k | let end = start + StateID::SIZE; |
2216 | 301k | let bytes = self.st.table()[start..end].try_into().unwrap(); |
2217 | | // This is OK since we're allowed to assume that any IDs in this start |
2218 | | // table are correct and valid for this DFA. |
2219 | 301k | let id = StateID::from_ne_bytes_unchecked(bytes); |
2220 | 301k | Some((id, anchored, start_type)) |
2221 | 303k | } |
2222 | | } |
2223 | | |
2224 | | impl<'a, T> fmt::Debug for StartStateIter<'a, T> { |
2225 | | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
2226 | | f.debug_struct("StartStateIter").field("i", &self.i).finish() |
2227 | | } |
2228 | | } |
2229 | | |
2230 | | /// An iterator over all states in a sparse DFA. |
2231 | | /// |
2232 | | /// This iterator yields tuples, where the first element is the state ID and |
2233 | | /// the second element is the state itself. |
2234 | | struct StateIter<'a, T> { |
2235 | | trans: &'a Transitions<T>, |
2236 | | id: usize, |
2237 | | } |
2238 | | |
2239 | | impl<'a, T: AsRef<[u8]>> Iterator for StateIter<'a, T> { |
2240 | | type Item = State<'a>; |
2241 | | |
2242 | 27.4k | fn next(&mut self) -> Option<State<'a>> { |
2243 | 27.4k | if self.id >= self.trans.sparse().len() { |
2244 | 1.68k | return None; |
2245 | 25.7k | } |
2246 | 25.7k | let state = self.trans.state(StateID::new_unchecked(self.id)); |
2247 | 25.7k | self.id = self.id + state.write_to_len(); |
2248 | 25.7k | Some(state) |
2249 | 27.4k | } |
2250 | | } |
2251 | | |
2252 | | impl<'a, T> fmt::Debug for StateIter<'a, T> { |
2253 | | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
2254 | | f.debug_struct("StateIter").field("id", &self.id).finish() |
2255 | | } |
2256 | | } |
2257 | | |
2258 | | /// A representation of a sparse DFA state that can be cheaply materialized |
2259 | | /// from a state identifier. |
2260 | | #[derive(Clone)] |
2261 | | struct State<'a> { |
2262 | | /// The identifier of this state. |
2263 | | id: StateID, |
2264 | | /// Whether this is a match state or not. |
2265 | | is_match: bool, |
2266 | | /// The number of transitions in this state. |
2267 | | ntrans: usize, |
2268 | | /// Pairs of input ranges, where there is one pair for each transition. |
2269 | | /// Each pair specifies an inclusive start and end byte range for the |
2270 | | /// corresponding transition. |
2271 | | input_ranges: &'a [u8], |
2272 | | /// Transitions to the next state. This slice contains native endian |
2273 | | /// encoded state identifiers, with `S` as the representation. Thus, there |
2274 | | /// are `ntrans * size_of::<S>()` bytes in this slice. |
2275 | | next: &'a [u8], |
2276 | | /// If this is a match state, then this contains the pattern IDs that match |
2277 | | /// when the DFA is in this state. |
2278 | | /// |
2279 | | /// This is a contiguous sequence of 32-bit native endian encoded integers. |
2280 | | pattern_ids: &'a [u8], |
2281 | | /// An accelerator for this state, if present. If this state has no |
2282 | | /// accelerator, then this is an empty slice. When non-empty, this slice |
2283 | | /// has length at most 3 and corresponds to the exhaustive set of bytes |
2284 | | /// that must be seen in order to transition out of this state. |
2285 | | accel: &'a [u8], |
2286 | | } |
2287 | | |
2288 | | impl<'a> State<'a> { |
2289 | | /// Searches for the next transition given an input byte. If no such |
2290 | | /// transition could be found, then a dead state is returned. |
2291 | | /// |
2292 | | /// This is marked as inline to help dramatically boost sparse searching, |
2293 | | /// which decodes each state it enters to follow the next transition. |
2294 | | #[cfg_attr(feature = "perf-inline", inline(always))] |
2295 | 83.5k | fn next(&self, input: u8) -> StateID { |
2296 | | // This straight linear search was observed to be much better than |
2297 | | // binary search on ASCII haystacks, likely because a binary search |
2298 | | // visits the ASCII case last but a linear search sees it first. A |
2299 | | // binary search does do a little better on non-ASCII haystacks, but |
2300 | | // not by much. There might be a better trade off lurking here. |
2301 | 84.2k | for i in 0..(self.ntrans - 1) { |
2302 | 84.2k | let (start, end) = self.range(i); |
2303 | 84.2k | if start <= input && input <= end { |
2304 | 69.5k | return self.next_at(i); |
2305 | 14.7k | } |
2306 | | // We could bail early with an extra branch: if input < b1, then |
2307 | | // we know we'll never find a matching transition. Interestingly, |
2308 | | // this extra branch seems to not help performance, or will even |
2309 | | // hurt it. It's likely very dependent on the DFA itself and what |
2310 | | // is being searched. |
2311 | | } |
2312 | 14.0k | DEAD |
2313 | 83.5k | } |
2314 | | |
2315 | | /// Returns the next state ID for the special EOI transition. |
2316 | 12.4k | fn next_eoi(&self) -> StateID { |
2317 | 12.4k | self.next_at(self.ntrans - 1) |
2318 | 12.4k | } |
2319 | | |
2320 | | /// Returns the identifier for this state. |
2321 | 0 | fn id(&self) -> StateID { |
2322 | 0 | self.id |
2323 | 0 | } |
2324 | | |
2325 | | /// Returns the inclusive input byte range for the ith transition in this |
2326 | | /// state. |
2327 | 84.2k | fn range(&self, i: usize) -> (u8, u8) { |
2328 | 84.2k | (self.input_ranges[i * 2], self.input_ranges[i * 2 + 1]) |
2329 | 84.2k | } |
2330 | | |
2331 | | /// Returns the next state for the ith transition in this state. |
2332 | 170k | fn next_at(&self, i: usize) -> StateID { |
2333 | 170k | let start = i * StateID::SIZE; |
2334 | 170k | let end = start + StateID::SIZE; |
2335 | 170k | let bytes = self.next[start..end].try_into().unwrap(); |
2336 | 170k | StateID::from_ne_bytes_unchecked(bytes) |
2337 | 170k | } |
2338 | | |
2339 | | /// Returns the pattern ID for the given match index. If the match index |
2340 | | /// is invalid, then this panics. |
2341 | 23.2k | fn pattern_id(&self, match_index: usize) -> PatternID { |
2342 | 23.2k | let start = match_index * PatternID::SIZE; |
2343 | 23.2k | wire::read_pattern_id_unchecked(&self.pattern_ids[start..]).0 |
2344 | 23.2k | } |
2345 | | |
2346 | | /// Returns the total number of pattern IDs for this state. This is always |
2347 | | /// zero when `is_match` is false. |
2348 | 0 | fn pattern_len(&self) -> usize { |
2349 | 0 | assert_eq!(0, self.pattern_ids.len() % 4); |
2350 | 0 | self.pattern_ids.len() / 4 |
2351 | 0 | } |
2352 | | |
2353 | | /// Return an accelerator for this state. |
2354 | 42.2k | fn accelerator(&self) -> &'a [u8] { |
2355 | 42.2k | self.accel |
2356 | 42.2k | } |
2357 | | |
2358 | | /// Write the raw representation of this state to the given buffer using |
2359 | | /// the given endianness. |
2360 | | fn write_to<E: Endian>( |
2361 | | &self, |
2362 | | mut dst: &mut [u8], |
2363 | | ) -> Result<usize, SerializeError> { |
2364 | | let nwrite = self.write_to_len(); |
2365 | | if dst.len() < nwrite { |
2366 | | return Err(SerializeError::buffer_too_small( |
2367 | | "sparse state transitions", |
2368 | | )); |
2369 | | } |
2370 | | |
2371 | | let ntrans = |
2372 | | if self.is_match { self.ntrans | (1 << 15) } else { self.ntrans }; |
2373 | | E::write_u16(u16::try_from(ntrans).unwrap(), dst); |
2374 | | dst = &mut dst[size_of::<u16>()..]; |
2375 | | |
2376 | | dst[..self.input_ranges.len()].copy_from_slice(self.input_ranges); |
2377 | | dst = &mut dst[self.input_ranges.len()..]; |
2378 | | |
2379 | | for i in 0..self.ntrans { |
2380 | | E::write_u32(self.next_at(i).as_u32(), dst); |
2381 | | dst = &mut dst[StateID::SIZE..]; |
2382 | | } |
2383 | | |
2384 | | if self.is_match { |
2385 | | E::write_u32(u32::try_from(self.pattern_len()).unwrap(), dst); |
2386 | | dst = &mut dst[size_of::<u32>()..]; |
2387 | | for i in 0..self.pattern_len() { |
2388 | | let pid = self.pattern_id(i); |
2389 | | E::write_u32(pid.as_u32(), dst); |
2390 | | dst = &mut dst[PatternID::SIZE..]; |
2391 | | } |
2392 | | } |
2393 | | |
2394 | | dst[0] = u8::try_from(self.accel.len()).unwrap(); |
2395 | | dst[1..][..self.accel.len()].copy_from_slice(self.accel); |
2396 | | |
2397 | | Ok(nwrite) |
2398 | | } |
2399 | | |
2400 | | /// Return the total number of bytes that this state consumes in its |
2401 | | /// encoded form. |
2402 | 53.4k | fn write_to_len(&self) -> usize { |
2403 | 53.4k | let mut len = 2 |
2404 | 53.4k | + (self.ntrans * 2) |
2405 | 53.4k | + (self.ntrans * StateID::SIZE) |
2406 | 53.4k | + (1 + self.accel.len()); |
2407 | 53.4k | if self.is_match { |
2408 | 2.52k | len += size_of::<u32>() + self.pattern_ids.len(); |
2409 | 50.9k | } |
2410 | 53.4k | len |
2411 | 53.4k | } |
2412 | | } |
2413 | | |
2414 | | impl<'a> fmt::Debug for State<'a> { |
2415 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
2416 | 0 | let mut printed = false; |
2417 | 0 | for i in 0..(self.ntrans - 1) { |
2418 | 0 | let next = self.next_at(i); |
2419 | 0 | if next == DEAD { |
2420 | 0 | continue; |
2421 | 0 | } |
2422 | | |
2423 | 0 | if printed { |
2424 | 0 | write!(f, ", ")?; |
2425 | 0 | } |
2426 | 0 | let (start, end) = self.range(i); |
2427 | 0 | if start == end { |
2428 | 0 | write!(f, "{:?} => {:?}", DebugByte(start), next.as_usize())?; |
2429 | | } else { |
2430 | 0 | write!( |
2431 | 0 | f, |
2432 | 0 | "{:?}-{:?} => {:?}", |
2433 | 0 | DebugByte(start), |
2434 | 0 | DebugByte(end), |
2435 | 0 | next.as_usize(), |
2436 | 0 | )?; |
2437 | | } |
2438 | 0 | printed = true; |
2439 | | } |
2440 | 0 | let eoi = self.next_at(self.ntrans - 1); |
2441 | 0 | if eoi != DEAD { |
2442 | 0 | if printed { |
2443 | 0 | write!(f, ", ")?; |
2444 | 0 | } |
2445 | 0 | write!(f, "EOI => {:?}", eoi.as_usize())?; |
2446 | 0 | } |
2447 | 0 | Ok(()) |
2448 | 0 | } |
2449 | | } |
2450 | | |
2451 | | /// A representation of a mutable sparse DFA state that can be cheaply |
2452 | | /// materialized from a state identifier. |
2453 | | #[cfg(feature = "dfa-build")] |
2454 | | struct StateMut<'a> { |
2455 | | /// The identifier of this state. |
2456 | | id: StateID, |
2457 | | /// Whether this is a match state or not. |
2458 | | is_match: bool, |
2459 | | /// The number of transitions in this state. |
2460 | | ntrans: usize, |
2461 | | /// Pairs of input ranges, where there is one pair for each transition. |
2462 | | /// Each pair specifies an inclusive start and end byte range for the |
2463 | | /// corresponding transition. |
2464 | | input_ranges: &'a mut [u8], |
2465 | | /// Transitions to the next state. This slice contains native endian |
2466 | | /// encoded state identifiers, with `S` as the representation. Thus, there |
2467 | | /// are `ntrans * size_of::<S>()` bytes in this slice. |
2468 | | next: &'a mut [u8], |
2469 | | /// If this is a match state, then this contains the pattern IDs that match |
2470 | | /// when the DFA is in this state. |
2471 | | /// |
2472 | | /// This is a contiguous sequence of 32-bit native endian encoded integers. |
2473 | | pattern_ids: &'a [u8], |
2474 | | /// An accelerator for this state, if present. If this state has no |
2475 | | /// accelerator, then this is an empty slice. When non-empty, this slice |
2476 | | /// has length at most 3 and corresponds to the exhaustive set of bytes |
2477 | | /// that must be seen in order to transition out of this state. |
2478 | | accel: &'a mut [u8], |
2479 | | } |
2480 | | |
2481 | | #[cfg(feature = "dfa-build")] |
2482 | | impl<'a> StateMut<'a> { |
2483 | | /// Sets the ith transition to the given state. |
2484 | 0 | fn set_next_at(&mut self, i: usize, next: StateID) { |
2485 | 0 | let start = i * StateID::SIZE; |
2486 | 0 | let end = start + StateID::SIZE; |
2487 | 0 | wire::write_state_id::<wire::NE>(next, &mut self.next[start..end]); |
2488 | 0 | } |
2489 | | } |
2490 | | |
2491 | | #[cfg(feature = "dfa-build")] |
2492 | | impl<'a> fmt::Debug for StateMut<'a> { |
2493 | 0 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
2494 | 0 | let state = State { |
2495 | 0 | id: self.id, |
2496 | 0 | is_match: self.is_match, |
2497 | 0 | ntrans: self.ntrans, |
2498 | 0 | input_ranges: self.input_ranges, |
2499 | 0 | next: self.next, |
2500 | 0 | pattern_ids: self.pattern_ids, |
2501 | 0 | accel: self.accel, |
2502 | 0 | }; |
2503 | 0 | fmt::Debug::fmt(&state, f) |
2504 | 0 | } |
2505 | | } |
2506 | | |
2507 | | // In order to validate everything, we not only need to make sure we |
2508 | | // can decode every state, but that every transition in every state |
2509 | | // points to a valid state. There are many duplicative transitions, so |
2510 | | // we record state IDs that we've verified so that we don't redo the |
2511 | | // decoding work. |
2512 | | // |
2513 | | // Except, when in no_std mode, we don't have dynamic memory allocation |
2514 | | // available to us, so we skip this optimization. It's not clear |
2515 | | // whether doing something more clever is worth it just yet. If you're |
2516 | | // profiling this code and need it to run faster, please file an issue. |
2517 | | // |
2518 | | // OK, so we also use this to record the set of valid state IDs. Since |
2519 | | // it is possible for a transition to point to an invalid state ID that |
2520 | | // still (somehow) deserializes to a valid state. So we need to make |
2521 | | // sure our transitions are limited to actually correct state IDs. |
2522 | | // The problem is, I'm not sure how to do this verification step in |
2523 | | // no-std no-alloc mode. I think we'd *have* to store the set of valid |
2524 | | // state IDs in the DFA itself. For now, we don't do this verification |
2525 | | // in no-std no-alloc mode. The worst thing that can happen is an |
2526 | | // incorrect result. But no panics or memory safety problems should |
2527 | | // result. Because we still do validate that the state itself is |
2528 | | // "valid" in the sense that everything it points to actually exists. |
2529 | | // |
2530 | | // ---AG |
2531 | | #[derive(Debug)] |
2532 | | struct Seen { |
2533 | | #[cfg(feature = "alloc")] |
2534 | | set: alloc::collections::BTreeSet<StateID>, |
2535 | | #[cfg(not(feature = "alloc"))] |
2536 | | set: core::marker::PhantomData<StateID>, |
2537 | | } |
2538 | | |
2539 | | #[cfg(feature = "alloc")] |
2540 | | impl Seen { |
2541 | 1.99k | fn new() -> Seen { |
2542 | 1.99k | Seen { set: alloc::collections::BTreeSet::new() } |
2543 | 1.99k | } |
2544 | 27.6k | fn insert(&mut self, id: StateID) { |
2545 | 27.6k | self.set.insert(id); |
2546 | 27.6k | } |
2547 | 362k | fn contains(&self, id: &StateID) -> bool { |
2548 | 362k | self.set.contains(id) |
2549 | 362k | } |
2550 | | } |
2551 | | |
2552 | | #[cfg(not(feature = "alloc"))] |
2553 | | impl Seen { |
2554 | | fn new() -> Seen { |
2555 | | Seen { set: core::marker::PhantomData } |
2556 | | } |
2557 | | fn insert(&mut self, _id: StateID) {} |
2558 | | fn contains(&self, _id: &StateID) -> bool { |
2559 | | true |
2560 | | } |
2561 | | } |
2562 | | |
2563 | | /* |
2564 | | /// A binary search routine specialized specifically to a sparse DFA state's |
2565 | | /// transitions. Specifically, the transitions are defined as a set of pairs |
2566 | | /// of input bytes that delineate an inclusive range of bytes. If the input |
2567 | | /// byte is in the range, then the corresponding transition is a match. |
2568 | | /// |
2569 | | /// This binary search accepts a slice of these pairs and returns the position |
2570 | | /// of the matching pair (the ith transition), or None if no matching pair |
2571 | | /// could be found. |
2572 | | /// |
2573 | | /// Note that this routine is not currently used since it was observed to |
2574 | | /// either decrease performance when searching ASCII, or did not provide enough |
2575 | | /// of a boost on non-ASCII haystacks to be worth it. However, we leave it here |
2576 | | /// for posterity in case we can find a way to use it. |
2577 | | /// |
2578 | | /// In theory, we could use the standard library's search routine if we could |
2579 | | /// cast a `&[u8]` to a `&[(u8, u8)]`, but I don't believe this is currently |
2580 | | /// guaranteed to be safe and is thus UB (since I don't think the in-memory |
2581 | | /// representation of `(u8, u8)` has been nailed down). One could define a |
2582 | | /// repr(C) type, but the casting doesn't seem justified. |
2583 | | #[cfg_attr(feature = "perf-inline", inline(always))] |
2584 | | fn binary_search_ranges(ranges: &[u8], needle: u8) -> Option<usize> { |
2585 | | debug_assert!(ranges.len() % 2 == 0, "ranges must have even length"); |
2586 | | debug_assert!(ranges.len() <= 512, "ranges should be short"); |
2587 | | |
2588 | | let (mut left, mut right) = (0, ranges.len() / 2); |
2589 | | while left < right { |
2590 | | let mid = (left + right) / 2; |
2591 | | let (b1, b2) = (ranges[mid * 2], ranges[mid * 2 + 1]); |
2592 | | if needle < b1 { |
2593 | | right = mid; |
2594 | | } else if needle > b2 { |
2595 | | left = mid + 1; |
2596 | | } else { |
2597 | | return Some(mid); |
2598 | | } |
2599 | | } |
2600 | | None |
2601 | | } |
2602 | | */ |
2603 | | |
2604 | | #[cfg(all(test, feature = "syntax", feature = "dfa-build"))] |
2605 | | mod tests { |
2606 | | use crate::{ |
2607 | | dfa::{dense::DFA, Automaton}, |
2608 | | nfa::thompson, |
2609 | | Input, MatchError, |
2610 | | }; |
2611 | | |
2612 | | // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. |
2613 | | #[test] |
2614 | | fn heuristic_unicode_forward() { |
2615 | | let dfa = DFA::builder() |
2616 | | .configure(DFA::config().unicode_word_boundary(true)) |
2617 | | .thompson(thompson::Config::new().reverse(true)) |
2618 | | .build(r"\b[0-9]+\b") |
2619 | | .unwrap() |
2620 | | .to_sparse() |
2621 | | .unwrap(); |
2622 | | |
2623 | | let input = Input::new("β123").range(2..); |
2624 | | let expected = MatchError::quit(0xB2, 1); |
2625 | | let got = dfa.try_search_fwd(&input); |
2626 | | assert_eq!(Err(expected), got); |
2627 | | |
2628 | | let input = Input::new("123β").range(..3); |
2629 | | let expected = MatchError::quit(0xCE, 3); |
2630 | | let got = dfa.try_search_fwd(&input); |
2631 | | assert_eq!(Err(expected), got); |
2632 | | } |
2633 | | |
2634 | | // See the analogous test in src/hybrid/dfa.rs and src/dfa/dense.rs. |
2635 | | #[test] |
2636 | | fn heuristic_unicode_reverse() { |
2637 | | let dfa = DFA::builder() |
2638 | | .configure(DFA::config().unicode_word_boundary(true)) |
2639 | | .thompson(thompson::Config::new().reverse(true)) |
2640 | | .build(r"\b[0-9]+\b") |
2641 | | .unwrap() |
2642 | | .to_sparse() |
2643 | | .unwrap(); |
2644 | | |
2645 | | let input = Input::new("β123").range(2..); |
2646 | | let expected = MatchError::quit(0xB2, 1); |
2647 | | let got = dfa.try_search_rev(&input); |
2648 | | assert_eq!(Err(expected), got); |
2649 | | |
2650 | | let input = Input::new("123β").range(..3); |
2651 | | let expected = MatchError::quit(0xCE, 3); |
2652 | | let got = dfa.try_search_rev(&input); |
2653 | | assert_eq!(Err(expected), got); |
2654 | | } |
2655 | | } |