1// Copyright 2008 The RE2 Authors. All Rights Reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// A DFA (deterministic finite automaton)-based regular expression search.
6//
7// The DFA search has two main parts: the construction of the automaton,
8// which is represented by a graph of State structures, and the execution
9// of the automaton over a given input string.
10//
11// The basic idea is that the State graph is constructed so that the
12// execution can simply start with a state s, and then for each byte c in
13// the input string, execute "s = s->next[c]", checking at each point whether
14// the current s represents a matching state.
15//
16// The simple explanation just given does convey the essence of this code,
17// but it omits the details of how the State graph gets constructed as well
18// as some performance-driven optimizations to the execution of the automaton.
19// All these details are explained in the comments for the code following
20// the definition of class DFA.
21//
22// See http://swtch.com/~rsc/regexp/ for a very bare-bones equivalent.
23
24#include <stddef.h>
25#include <stdint.h>
26#include <stdio.h>
27#include <string.h>
28#include <algorithm>
29#include <atomic>
30#include <deque>
31#include <new>
32#include <string>
33#include <utility>
34#include <vector>
35
36#include "absl/base/call_once.h"
37#include "absl/base/macros.h"
38#include "absl/base/thread_annotations.h"
39#include "absl/container/flat_hash_map.h"
40#include "absl/container/flat_hash_set.h"
41#include "absl/strings/str_format.h"
42#include "absl/synchronization/mutex.h"
43#include "absl/types/span.h"
44#include "util/logging.h"
45#include "util/strutil.h"
46#include "re2/pod_array.h"
47#include "re2/prog.h"
48#include "re2/re2.h"
49#include "re2/sparse_set.h"
50
51// Silence "zero-sized array in struct/union" warning for DFA::State::next_.
52#ifdef _MSC_VER
53#pragma warning(disable: 4200)
54#endif
55
56namespace re2 {
57
58// Controls whether the DFA should bail out early if the NFA would be faster.
59static bool dfa_should_bail_when_slow = true;
60
61void Prog::TESTING_ONLY_set_dfa_should_bail_when_slow(bool b) {
62 dfa_should_bail_when_slow = b;
63}
64
65// Changing this to true compiles in prints that trace execution of the DFA.
66// Generates a lot of output -- only useful for debugging.
67static const bool ExtraDebug = false;
68
69// A DFA implementation of a regular expression program.
70// Since this is entirely a forward declaration mandated by C++,
71// some of the comments here are better understood after reading
72// the comments in the sections that follow the DFA definition.
73class DFA {
74 public:
75 DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem);
76 ~DFA();
77 bool ok() const { return !init_failed_; }
78 Prog::MatchKind kind() { return kind_; }
79
80 // Searches for the regular expression in text, which is considered
81 // as a subsection of context for the purposes of interpreting flags
82 // like ^ and $ and \A and \z.
83 // Returns whether a match was found.
84 // If a match is found, sets *ep to the end point of the best match in text.
85 // If "anchored", the match must begin at the start of text.
86 // If "want_earliest_match", the match that ends first is used, not
87 // necessarily the best one.
88 // If "run_forward" is true, the DFA runs from text.begin() to text.end().
89 // If it is false, the DFA runs from text.end() to text.begin(),
90 // returning the leftmost end of the match instead of the rightmost one.
91 // If the DFA cannot complete the search (for example, if it is out of
92 // memory), it sets *failed and returns false.
93 bool Search(absl::string_view text, absl::string_view context, bool anchored,
94 bool want_earliest_match, bool run_forward, bool* failed,
95 const char** ep, SparseSet* matches);
96
97 // Builds out all states for the entire DFA.
98 // If cb is not empty, it receives one callback per state built.
99 // Returns the number of states built.
100 // FOR TESTING OR EXPERIMENTAL PURPOSES ONLY.
101 int BuildAllStates(const Prog::DFAStateCallback& cb);
102
103 // Computes min and max for matching strings. Won't return strings
104 // bigger than maxlen.
105 bool PossibleMatchRange(std::string* min, std::string* max, int maxlen);
106
107 // These data structures are logically private, but C++ makes it too
108 // difficult to mark them as such.
109 class RWLocker;
110 class StateSaver;
111 class Workq;
112
113 // A single DFA state. The DFA is represented as a graph of these
114 // States, linked by the next_ pointers. If in state s and reading
115 // byte c, the next state should be s->next_[c].
116 struct State {
117 inline bool IsMatch() const { return (flag_ & kFlagMatch) != 0; }
118
119 template <typename H>
120 friend H AbslHashValue(H h, const State& a) {
121 const absl::Span<const int> ainst(a.inst_, a.ninst_);
122 return H::combine(std::move(h), a.flag_, ainst);
123 }
124
125 friend bool operator==(const State& a, const State& b) {
126 const absl::Span<const int> ainst(a.inst_, a.ninst_);
127 const absl::Span<const int> binst(b.inst_, b.ninst_);
128 return &a == &b || (a.flag_ == b.flag_ && ainst == binst);
129 }
130
131 int* inst_; // Instruction pointers in the state.
132 int ninst_; // # of inst_ pointers.
133 uint32_t flag_; // Empty string bitfield flags in effect on the way
134 // into this state, along with kFlagMatch if this
135 // is a matching state.
136
137// Work around the bug affecting flexible array members in GCC 6.x (for x >= 1).
138// (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70932)
139#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && __GNUC_MINOR__ >= 1
140 std::atomic<State*> next_[0]; // Outgoing arrows from State,
141#else
142 std::atomic<State*> next_[]; // Outgoing arrows from State,
143#endif
144
145 // one per input byte class
146 };
147
148 enum {
149 kByteEndText = 256, // imaginary byte at end of text
150
151 kFlagEmptyMask = 0xFF, // State.flag_: bits holding kEmptyXXX flags
152 kFlagMatch = 0x0100, // State.flag_: this is a matching state
153 kFlagLastWord = 0x0200, // State.flag_: last byte was a word char
154 kFlagNeedShift = 16, // needed kEmpty bits are or'ed in shifted left
155 };
156
157 struct StateHash {
158 size_t operator()(const State* a) const {
159 DCHECK(a != NULL);
160 return absl::Hash<State>()(*a);
161 }
162 };
163
164 struct StateEqual {
165 bool operator()(const State* a, const State* b) const {
166 DCHECK(a != NULL);
167 DCHECK(b != NULL);
168 return *a == *b;
169 }
170 };
171
172 typedef absl::flat_hash_set<State*, StateHash, StateEqual> StateSet;
173
174 private:
175 // Make it easier to swap in a scalable reader-writer mutex.
176 using CacheMutex = absl::Mutex;
177
178 enum {
179 // Indices into start_ for unanchored searches.
180 // Add kStartAnchored for anchored searches.
181 kStartBeginText = 0, // text at beginning of context
182 kStartBeginLine = 2, // text at beginning of line
183 kStartAfterWordChar = 4, // text follows a word character
184 kStartAfterNonWordChar = 6, // text follows non-word character
185 kMaxStart = 8,
186
187 kStartAnchored = 1,
188 };
189
190 // Resets the DFA State cache, flushing all saved State* information.
191 // Releases and reacquires cache_mutex_ via cache_lock, so any
192 // State* existing before the call are not valid after the call.
193 // Use a StateSaver to preserve important states across the call.
194 // cache_mutex_.r <= L < mutex_
195 // After: cache_mutex_.w <= L < mutex_
196 void ResetCache(RWLocker* cache_lock);
197
198 // Looks up and returns the State corresponding to a Workq.
199 // L >= mutex_
200 State* WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag);
201
202 // Looks up and returns a State matching the inst, ninst, and flag.
203 // L >= mutex_
204 State* CachedState(int* inst, int ninst, uint32_t flag);
205
206 // Clear the cache entirely.
207 // Must hold cache_mutex_.w or be in destructor.
208 void ClearCache();
209
210 // Converts a State into a Workq: the opposite of WorkqToCachedState.
211 // L >= mutex_
212 void StateToWorkq(State* s, Workq* q);
213
214 // Runs a State on a given byte, returning the next state.
215 State* RunStateOnByteUnlocked(State*, int); // cache_mutex_.r <= L < mutex_
216 State* RunStateOnByte(State*, int); // L >= mutex_
217
218 // Runs a Workq on a given byte followed by a set of empty-string flags,
219 // producing a new Workq in nq. If a match instruction is encountered,
220 // sets *ismatch to true.
221 // L >= mutex_
222 void RunWorkqOnByte(Workq* q, Workq* nq,
223 int c, uint32_t flag, bool* ismatch);
224
225 // Runs a Workq on a set of empty-string flags, producing a new Workq in nq.
226 // L >= mutex_
227 void RunWorkqOnEmptyString(Workq* q, Workq* nq, uint32_t flag);
228
229 // Adds the instruction id to the Workq, following empty arrows
230 // according to flag.
231 // L >= mutex_
232 void AddToQueue(Workq* q, int id, uint32_t flag);
233
234 // For debugging, returns a text representation of State.
235 static std::string DumpState(State* state);
236
237 // For debugging, returns a text representation of a Workq.
238 static std::string DumpWorkq(Workq* q);
239
240 // Search parameters
241 struct SearchParams {
242 SearchParams(absl::string_view text, absl::string_view context,
243 RWLocker* cache_lock)
244 : text(text),
245 context(context),
246 anchored(false),
247 can_prefix_accel(false),
248 want_earliest_match(false),
249 run_forward(false),
250 start(NULL),
251 cache_lock(cache_lock),
252 failed(false),
253 ep(NULL),
254 matches(NULL) {}
255
256 absl::string_view text;
257 absl::string_view context;
258 bool anchored;
259 bool can_prefix_accel;
260 bool want_earliest_match;
261 bool run_forward;
262 State* start;
263 RWLocker* cache_lock;
264 bool failed; // "out" parameter: whether search gave up
265 const char* ep; // "out" parameter: end pointer for match
266 SparseSet* matches;
267
268 private:
269 SearchParams(const SearchParams&) = delete;
270 SearchParams& operator=(const SearchParams&) = delete;
271 };
272
273 // Before each search, the parameters to Search are analyzed by
274 // AnalyzeSearch to determine the state in which to start.
275 struct StartInfo {
276 StartInfo() : start(NULL) {}
277 std::atomic<State*> start;
278 };
279
280 // Fills in params->start and params->can_prefix_accel using
281 // the other search parameters. Returns true on success,
282 // false on failure.
283 // cache_mutex_.r <= L < mutex_
284 bool AnalyzeSearch(SearchParams* params);
285 bool AnalyzeSearchHelper(SearchParams* params, StartInfo* info,
286 uint32_t flags);
287
288 // The generic search loop, inlined to create specialized versions.
289 // cache_mutex_.r <= L < mutex_
290 // Might unlock and relock cache_mutex_ via params->cache_lock.
291 template <bool can_prefix_accel,
292 bool want_earliest_match,
293 bool run_forward>
294 inline bool InlinedSearchLoop(SearchParams* params);
295
296 // The specialized versions of InlinedSearchLoop. The three letters
297 // at the ends of the name denote the true/false values used as the
298 // last three parameters of InlinedSearchLoop.
299 // cache_mutex_.r <= L < mutex_
300 // Might unlock and relock cache_mutex_ via params->cache_lock.
301 bool SearchFFF(SearchParams* params);
302 bool SearchFFT(SearchParams* params);
303 bool SearchFTF(SearchParams* params);
304 bool SearchFTT(SearchParams* params);
305 bool SearchTFF(SearchParams* params);
306 bool SearchTFT(SearchParams* params);
307 bool SearchTTF(SearchParams* params);
308 bool SearchTTT(SearchParams* params);
309
310 // The main search loop: calls an appropriate specialized version of
311 // InlinedSearchLoop.
312 // cache_mutex_.r <= L < mutex_
313 // Might unlock and relock cache_mutex_ via params->cache_lock.
314 bool FastSearchLoop(SearchParams* params);
315
316
317 // Looks up bytes in bytemap_ but handles case c == kByteEndText too.
318 int ByteMap(int c) {
319 if (c == kByteEndText)
320 return prog_->bytemap_range();
321 return prog_->bytemap()[c];
322 }
323
324 // Constant after initialization.
325 Prog* prog_; // The regular expression program to run.
326 Prog::MatchKind kind_; // The kind of DFA.
327 bool init_failed_; // initialization failed (out of memory)
328
329 absl::Mutex mutex_; // mutex_ >= cache_mutex_.r
330
331 // Scratch areas, protected by mutex_.
332 Workq* q0_; // Two pre-allocated work queues.
333 Workq* q1_;
334 PODArray<int> stack_; // Pre-allocated stack for AddToQueue
335
336 // State* cache. Many threads use and add to the cache simultaneously,
337 // holding cache_mutex_ for reading and mutex_ (above) when adding.
338 // If the cache fills and needs to be discarded, the discarding is done
339 // while holding cache_mutex_ for writing, to avoid interrupting other
340 // readers. Any State* pointers are only valid while cache_mutex_
341 // is held.
342 CacheMutex cache_mutex_;
343 int64_t mem_budget_; // Total memory budget for all States.
344 int64_t state_budget_; // Amount of memory remaining for new States.
345 StateSet state_cache_; // All States computed so far.
346 StartInfo start_[kMaxStart];
347
348 DFA(const DFA&) = delete;
349 DFA& operator=(const DFA&) = delete;
350};
351
352// Shorthand for casting to uint8_t*.
353static inline const uint8_t* BytePtr(const void* v) {
354 return reinterpret_cast<const uint8_t*>(v);
355}
356
357// Work queues
358
359// Marks separate thread groups of different priority
360// in the work queue when in leftmost-longest matching mode.
361#define Mark (-1)
362
363// Separates the match IDs from the instructions in inst_.
364// Used only for "many match" DFA states.
365#define MatchSep (-2)
366
367// Internally, the DFA uses a sparse array of
368// program instruction pointers as a work queue.
369// In leftmost longest mode, marks separate sections
370// of workq that started executing at different
371// locations in the string (earlier locations first).
372class DFA::Workq : public SparseSet {
373 public:
374 // Constructor: n is number of normal slots, maxmark number of mark slots.
375 Workq(int n, int maxmark) :
376 SparseSet(n+maxmark),
377 n_(n),
378 maxmark_(maxmark),
379 nextmark_(n),
380 last_was_mark_(true) {
381 }
382
383 bool is_mark(int i) { return i >= n_; }
384
385 int maxmark() { return maxmark_; }
386
387 void clear() {
388 SparseSet::clear();
389 nextmark_ = n_;
390 }
391
392 void mark() {
393 if (last_was_mark_)
394 return;
395 last_was_mark_ = false;
396 SparseSet::insert_new(nextmark_++);
397 }
398
399 int size() {
400 return n_ + maxmark_;
401 }
402
403 void insert(int id) {
404 if (contains(id))
405 return;
406 insert_new(id);
407 }
408
409 void insert_new(int id) {
410 last_was_mark_ = false;
411 SparseSet::insert_new(id);
412 }
413
414 private:
415 int n_; // size excluding marks
416 int maxmark_; // maximum number of marks
417 int nextmark_; // id of next mark
418 bool last_was_mark_; // last inserted was mark
419
420 Workq(const Workq&) = delete;
421 Workq& operator=(const Workq&) = delete;
422};
423
424DFA::DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem)
425 : prog_(prog),
426 kind_(kind),
427 init_failed_(false),
428 q0_(NULL),
429 q1_(NULL),
430 mem_budget_(max_mem) {
431 if (ExtraDebug)
432 absl::FPrintF(stderr, "\nkind %d\n%s\n", kind_, prog_->DumpUnanchored());
433 int nmark = 0;
434 if (kind_ == Prog::kLongestMatch)
435 nmark = prog_->size();
436 // See DFA::AddToQueue() for why this is so.
437 int nstack = prog_->inst_count(kInstCapture) +
438 prog_->inst_count(kInstEmptyWidth) +
439 prog_->inst_count(kInstNop) +
440 nmark + 1; // + 1 for start inst
441
442 // Account for space needed for DFA, q0, q1, stack.
443 mem_budget_ -= sizeof(DFA);
444 mem_budget_ -= (prog_->size() + nmark) *
445 (sizeof(int)+sizeof(int)) * 2; // q0, q1
446 mem_budget_ -= nstack * sizeof(int); // stack
447 if (mem_budget_ < 0) {
448 init_failed_ = true;
449 return;
450 }
451
452 state_budget_ = mem_budget_;
453
454 // Make sure there is a reasonable amount of working room left.
455 // At minimum, the search requires room for two states in order
456 // to limp along, restarting frequently. We'll get better performance
457 // if there is room for a larger number of states, say 20.
458 // Note that a state stores list heads only, so we use the program
459 // list count for the upper bound, not the program size.
460 int nnext = prog_->bytemap_range() + 1; // + 1 for kByteEndText slot
461 int64_t one_state = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
462 (prog_->list_count()+nmark)*sizeof(int);
463 if (state_budget_ < 20*one_state) {
464 init_failed_ = true;
465 return;
466 }
467
468 q0_ = new Workq(prog_->size(), nmark);
469 q1_ = new Workq(prog_->size(), nmark);
470 stack_ = PODArray<int>(nstack);
471}
472
473DFA::~DFA() {
474 delete q0_;
475 delete q1_;
476 ClearCache();
477}
478
479// In the DFA state graph, s->next[c] == NULL means that the
480// state has not yet been computed and needs to be. We need
481// a different special value to signal that s->next[c] is a
482// state that can never lead to a match (and thus the search
483// can be called off). Hence DeadState.
484#define DeadState reinterpret_cast<State*>(1)
485
486// Signals that the rest of the string matches no matter what it is.
487#define FullMatchState reinterpret_cast<State*>(2)
488
489#define SpecialStateMax FullMatchState
490
491// Debugging printouts
492
493// For debugging, returns a string representation of the work queue.
494std::string DFA::DumpWorkq(Workq* q) {
495 std::string s;
496 const char* sep = "";
497 for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
498 if (q->is_mark(*it)) {
499 s += "|";
500 sep = "";
501 } else {
502 s += absl::StrFormat("%s%d", sep, *it);
503 sep = ",";
504 }
505 }
506 return s;
507}
508
509// For debugging, returns a string representation of the state.
510std::string DFA::DumpState(State* state) {
511 if (state == NULL)
512 return "_";
513 if (state == DeadState)
514 return "X";
515 if (state == FullMatchState)
516 return "*";
517 std::string s;
518 const char* sep = "";
519 s += absl::StrFormat("(%p)", state);
520 for (int i = 0; i < state->ninst_; i++) {
521 if (state->inst_[i] == Mark) {
522 s += "|";
523 sep = "";
524 } else if (state->inst_[i] == MatchSep) {
525 s += "||";
526 sep = "";
527 } else {
528 s += absl::StrFormat("%s%d", sep, state->inst_[i]);
529 sep = ",";
530 }
531 }
532 s += absl::StrFormat(" flag=%#x", state->flag_);
533 return s;
534}
535
536//////////////////////////////////////////////////////////////////////
537//
538// DFA state graph construction.
539//
540// The DFA state graph is a heavily-linked collection of State* structures.
541// The state_cache_ is a set of all the State structures ever allocated,
542// so that if the same state is reached by two different paths,
543// the same State structure can be used. This reduces allocation
544// requirements and also avoids duplication of effort across the two
545// identical states.
546//
547// A State is defined by an ordered list of instruction ids and a flag word.
548//
549// The choice of an ordered list of instructions differs from a typical
550// textbook DFA implementation, which would use an unordered set.
551// Textbook descriptions, however, only care about whether
552// the DFA matches, not where it matches in the text. To decide where the
553// DFA matches, we need to mimic the behavior of the dominant backtracking
554// implementations like PCRE, which try one possible regular expression
555// execution, then another, then another, stopping when one of them succeeds.
556// The DFA execution tries these many executions in parallel, representing
557// each by an instruction id. These pointers are ordered in the State.inst_
558// list in the same order that the executions would happen in a backtracking
559// search: if a match is found during execution of inst_[2], inst_[i] for i>=3
560// can be discarded.
561//
562// Textbooks also typically do not consider context-aware empty string operators
563// like ^ or $. These are handled by the flag word, which specifies the set
564// of empty-string operators that should be matched when executing at the
565// current text position. These flag bits are defined in prog.h.
566// The flag word also contains two DFA-specific bits: kFlagMatch if the state
567// is a matching state (one that reached a kInstMatch in the program)
568// and kFlagLastWord if the last processed byte was a word character, for the
569// implementation of \B and \b.
570//
571// The flag word also contains, shifted up 16 bits, the bits looked for by
572// any kInstEmptyWidth instructions in the state. These provide a useful
573// summary indicating when new flags might be useful.
574//
575// The permanent representation of a State's instruction ids is just an array,
576// but while a state is being analyzed, these instruction ids are represented
577// as a Workq, which is an array that allows iteration in insertion order.
578
579// NOTE(rsc): The choice of State construction determines whether the DFA
580// mimics backtracking implementations (so-called leftmost first matching) or
581// traditional DFA implementations (so-called leftmost longest matching as
582// prescribed by POSIX). This implementation chooses to mimic the
583// backtracking implementations, because we want to replace PCRE. To get
584// POSIX behavior, the states would need to be considered not as a simple
585// ordered list of instruction ids, but as a list of unordered sets of instruction
586// ids. A match by a state in one set would inhibit the running of sets
587// farther down the list but not other instruction ids in the same set. Each
588// set would correspond to matches beginning at a given point in the string.
589// This is implemented by separating different sets with Mark pointers.
590
591// Looks in the State cache for a State matching q, flag.
592// If one is found, returns it. If one is not found, allocates one,
593// inserts it in the cache, and returns it.
594// If mq is not null, MatchSep and the match IDs in mq will be appended
595// to the State.
596DFA::State* DFA::WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag) {
597 //mutex_.AssertHeld();
598
599 // Construct array of instruction ids for the new state.
600 // Only ByteRange, EmptyWidth, and Match instructions are useful to keep:
601 // those are the only operators with any effect in
602 // RunWorkqOnEmptyString or RunWorkqOnByte.
603 PODArray<int> inst(q->size());
604 int n = 0;
605 uint32_t needflags = 0; // flags needed by kInstEmptyWidth instructions
606 bool sawmatch = false; // whether queue contains guaranteed kInstMatch
607 bool sawmark = false; // whether queue contains a Mark
608 if (ExtraDebug)
609 absl::FPrintF(stderr, "WorkqToCachedState %s [%#x]", DumpWorkq(q), flag);
610 for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
611 int id = *it;
612 if (sawmatch && (kind_ == Prog::kFirstMatch || q->is_mark(id)))
613 break;
614 if (q->is_mark(id)) {
615 if (n > 0 && inst[n-1] != Mark) {
616 sawmark = true;
617 inst[n++] = Mark;
618 }
619 continue;
620 }
621 Prog::Inst* ip = prog_->inst(id);
622 switch (ip->opcode()) {
623 case kInstAltMatch:
624 // This state will continue to a match no matter what
625 // the rest of the input is. If it is the highest priority match
626 // being considered, return the special FullMatchState
627 // to indicate that it's all matches from here out.
628 if (kind_ != Prog::kManyMatch &&
629 (kind_ != Prog::kFirstMatch ||
630 (it == q->begin() && ip->greedy(prog_))) &&
631 (kind_ != Prog::kLongestMatch || !sawmark) &&
632 (flag & kFlagMatch)) {
633 if (ExtraDebug)
634 absl::FPrintF(stderr, " -> FullMatchState\n");
635 return FullMatchState;
636 }
637 ABSL_FALLTHROUGH_INTENDED;
638 default:
639 // Record iff id is the head of its list, which must
640 // be the case if id-1 is the last of *its* list. :)
641 if (prog_->inst(id-1)->last())
642 inst[n++] = *it;
643 if (ip->opcode() == kInstEmptyWidth)
644 needflags |= ip->empty();
645 if (ip->opcode() == kInstMatch && !prog_->anchor_end())
646 sawmatch = true;
647 break;
648 }
649 }
650 DCHECK_LE(n, q->size());
651 if (n > 0 && inst[n-1] == Mark)
652 n--;
653
654 // If there are no empty-width instructions waiting to execute,
655 // then the extra flag bits will not be used, so there is no
656 // point in saving them. (Discarding them reduces the number
657 // of distinct states.)
658 if (needflags == 0)
659 flag &= kFlagMatch;
660
661 // NOTE(rsc): The code above cannot do flag &= needflags,
662 // because if the right flags were present to pass the current
663 // kInstEmptyWidth instructions, new kInstEmptyWidth instructions
664 // might be reached that in turn need different flags.
665 // The only sure thing is that if there are no kInstEmptyWidth
666 // instructions at all, no flags will be needed.
667 // We could do the extra work to figure out the full set of
668 // possibly needed flags by exploring past the kInstEmptyWidth
669 // instructions, but the check above -- are any flags needed
670 // at all? -- handles the most common case. More fine-grained
671 // analysis can only be justified by measurements showing that
672 // too many redundant states are being allocated.
673
674 // If there are no Insts in the list, it's a dead state,
675 // which is useful to signal with a special pointer so that
676 // the execution loop can stop early. This is only okay
677 // if the state is *not* a matching state.
678 if (n == 0 && flag == 0) {
679 if (ExtraDebug)
680 absl::FPrintF(stderr, " -> DeadState\n");
681 return DeadState;
682 }
683
684 // If we're in longest match mode, the state is a sequence of
685 // unordered state sets separated by Marks. Sort each set
686 // to canonicalize, to reduce the number of distinct sets stored.
687 if (kind_ == Prog::kLongestMatch) {
688 int* ip = inst.data();
689 int* ep = ip + n;
690 while (ip < ep) {
691 int* markp = ip;
692 while (markp < ep && *markp != Mark)
693 markp++;
694 std::sort(ip, markp);
695 if (markp < ep)
696 markp++;
697 ip = markp;
698 }
699 }
700
701 // If we're in many match mode, canonicalize for similar reasons:
702 // we have an unordered set of states (i.e. we don't have Marks)
703 // and sorting will reduce the number of distinct sets stored.
704 if (kind_ == Prog::kManyMatch) {
705 int* ip = inst.data();
706 int* ep = ip + n;
707 std::sort(ip, ep);
708 }
709
710 // Append MatchSep and the match IDs in mq if necessary.
711 if (mq != NULL) {
712 inst[n++] = MatchSep;
713 for (Workq::iterator i = mq->begin(); i != mq->end(); ++i) {
714 int id = *i;
715 Prog::Inst* ip = prog_->inst(id);
716 if (ip->opcode() == kInstMatch)
717 inst[n++] = ip->match_id();
718 }
719 }
720
721 // Save the needed empty-width flags in the top bits for use later.
722 flag |= needflags << kFlagNeedShift;
723
724 State* state = CachedState(inst.data(), n, flag);
725 return state;
726}
727
728// Looks in the State cache for a State matching inst, ninst, flag.
729// If one is found, returns it. If one is not found, allocates one,
730// inserts it in the cache, and returns it.
731DFA::State* DFA::CachedState(int* inst, int ninst, uint32_t flag) {
732 //mutex_.AssertHeld();
733
734 // Look in the cache for a pre-existing state.
735 // We have to initialise the struct like this because otherwise
736 // MSVC will complain about the flexible array member. :(
737 State state;
738 state.inst_ = inst;
739 state.ninst_ = ninst;
740 state.flag_ = flag;
741 StateSet::iterator it = state_cache_.find(&state);
742 if (it != state_cache_.end()) {
743 if (ExtraDebug)
744 absl::FPrintF(stderr, " -cached-> %s\n", DumpState(*it));
745 return *it;
746 }
747
748 // Must have enough memory for new state.
749 // In addition to what we're going to allocate,
750 // the state cache hash table seems to incur about 18 bytes per
751 // State*. Worst case for non-small sets is it being half full, where each
752 // value present takes up 1 byte hash sample plus the pointer itself.
753 const int kStateCacheOverhead = 18;
754 int nnext = prog_->bytemap_range() + 1; // + 1 for kByteEndText slot
755 int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
756 ninst*sizeof(int);
757 if (mem_budget_ < mem + kStateCacheOverhead) {
758 mem_budget_ = -1;
759 return NULL;
760 }
761 mem_budget_ -= mem + kStateCacheOverhead;
762
763 // Allocate new state along with room for next_ and inst_.
764 char* space = std::allocator<char>().allocate(mem);
765 State* s = new (space) State;
766 (void) new (s->next_) std::atomic<State*>[nnext];
767 // Work around a unfortunate bug in older versions of libstdc++.
768 // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64658)
769 for (int i = 0; i < nnext; i++)
770 (void) new (s->next_ + i) std::atomic<State*>(NULL);
771 s->inst_ = new (s->next_ + nnext) int[ninst];
772 memmove(s->inst_, inst, ninst*sizeof s->inst_[0]);
773 s->ninst_ = ninst;
774 s->flag_ = flag;
775 if (ExtraDebug)
776 absl::FPrintF(stderr, " -> %s\n", DumpState(s));
777
778 // Put state in cache and return it.
779 state_cache_.insert(s);
780 return s;
781}
782
783// Clear the cache. Must hold cache_mutex_.w or be in destructor.
784void DFA::ClearCache() {
785 StateSet::iterator begin = state_cache_.begin();
786 StateSet::iterator end = state_cache_.end();
787 while (begin != end) {
788 StateSet::iterator tmp = begin;
789 ++begin;
790 // Deallocate the blob of memory that we allocated in DFA::CachedState().
791 // We recompute mem in order to benefit from sized delete where possible.
792 int ninst = (*tmp)->ninst_;
793 int nnext = prog_->bytemap_range() + 1; // + 1 for kByteEndText slot
794 int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
795 ninst*sizeof(int);
796 std::allocator<char>().deallocate(reinterpret_cast<char*>(*tmp), mem);
797 }
798 state_cache_.clear();
799}
800
801// Copies insts in state s to the work queue q.
802void DFA::StateToWorkq(State* s, Workq* q) {
803 q->clear();
804 for (int i = 0; i < s->ninst_; i++) {
805 if (s->inst_[i] == Mark) {
806 q->mark();
807 } else if (s->inst_[i] == MatchSep) {
808 // Nothing after this is an instruction!
809 break;
810 } else {
811 // Explore from the head of the list.
812 AddToQueue(q, s->inst_[i], s->flag_ & kFlagEmptyMask);
813 }
814 }
815}
816
817// Adds ip to the work queue, following empty arrows according to flag.
818void DFA::AddToQueue(Workq* q, int id, uint32_t flag) {
819
820 // Use stack_ to hold our stack of instructions yet to process.
821 // It was preallocated as follows:
822 // one entry per Capture;
823 // one entry per EmptyWidth; and
824 // one entry per Nop.
825 // This reflects the maximum number of stack pushes that each can
826 // perform. (Each instruction can be processed at most once.)
827 // When using marks, we also added nmark == prog_->size().
828 // (Otherwise, nmark == 0.)
829 int* stk = stack_.data();
830 int nstk = 0;
831
832 stk[nstk++] = id;
833 while (nstk > 0) {
834 DCHECK_LE(nstk, stack_.size());
835 id = stk[--nstk];
836
837 Loop:
838 if (id == Mark) {
839 q->mark();
840 continue;
841 }
842
843 if (id == 0)
844 continue;
845
846 // If ip is already on the queue, nothing to do.
847 // Otherwise add it. We don't actually keep all the
848 // ones that get added, but adding all of them here
849 // increases the likelihood of q->contains(id),
850 // reducing the amount of duplicated work.
851 if (q->contains(id))
852 continue;
853 q->insert_new(id);
854
855 // Process instruction.
856 Prog::Inst* ip = prog_->inst(id);
857 switch (ip->opcode()) {
858 default:
859 LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
860 break;
861
862 case kInstByteRange: // just save these on the queue
863 case kInstMatch:
864 if (ip->last())
865 break;
866 id = id+1;
867 goto Loop;
868
869 case kInstCapture: // DFA treats captures as no-ops.
870 case kInstNop:
871 if (!ip->last())
872 stk[nstk++] = id+1;
873
874 // If this instruction is the [00-FF]* loop at the beginning of
875 // a leftmost-longest unanchored search, separate with a Mark so
876 // that future threads (which will start farther to the right in
877 // the input string) are lower priority than current threads.
878 if (ip->opcode() == kInstNop && q->maxmark() > 0 &&
879 id == prog_->start_unanchored() && id != prog_->start())
880 stk[nstk++] = Mark;
881 id = ip->out();
882 goto Loop;
883
884 case kInstAltMatch:
885 DCHECK(!ip->last());
886 id = id+1;
887 goto Loop;
888
889 case kInstEmptyWidth:
890 if (!ip->last())
891 stk[nstk++] = id+1;
892
893 // Continue on if we have all the right flag bits.
894 if (ip->empty() & ~flag)
895 break;
896 id = ip->out();
897 goto Loop;
898 }
899 }
900}
901
902// Running of work queues. In the work queue, order matters:
903// the queue is sorted in priority order. If instruction i comes before j,
904// then the instructions that i produces during the run must come before
905// the ones that j produces. In order to keep this invariant, all the
906// work queue runners have to take an old queue to process and then
907// also a new queue to fill in. It's not acceptable to add to the end of
908// an existing queue, because new instructions will not end up in the
909// correct position.
910
911// Runs the work queue, processing the empty strings indicated by flag.
912// For example, flag == kEmptyBeginLine|kEmptyEndLine means to match
913// both ^ and $. It is important that callers pass all flags at once:
914// processing both ^ and $ is not the same as first processing only ^
915// and then processing only $. Doing the two-step sequence won't match
916// ^$^$^$ but processing ^ and $ simultaneously will (and is the behavior
917// exhibited by existing implementations).
918void DFA::RunWorkqOnEmptyString(Workq* oldq, Workq* newq, uint32_t flag) {
919 newq->clear();
920 for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
921 if (oldq->is_mark(*i))
922 AddToQueue(newq, Mark, flag);
923 else
924 AddToQueue(newq, *i, flag);
925 }
926}
927
928// Runs the work queue, processing the single byte c followed by any empty
929// strings indicated by flag. For example, c == 'a' and flag == kEmptyEndLine,
930// means to match c$. Sets the bool *ismatch to true if the end of the
931// regular expression program has been reached (the regexp has matched).
932void DFA::RunWorkqOnByte(Workq* oldq, Workq* newq,
933 int c, uint32_t flag, bool* ismatch) {
934 //mutex_.AssertHeld();
935
936 newq->clear();
937 for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
938 if (oldq->is_mark(*i)) {
939 if (*ismatch)
940 return;
941 newq->mark();
942 continue;
943 }
944 int id = *i;
945 Prog::Inst* ip = prog_->inst(id);
946 switch (ip->opcode()) {
947 default:
948 LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
949 break;
950
951 case kInstFail: // never succeeds
952 case kInstCapture: // already followed
953 case kInstNop: // already followed
954 case kInstAltMatch: // already followed
955 case kInstEmptyWidth: // already followed
956 break;
957
958 case kInstByteRange: // can follow if c is in range
959 if (!ip->Matches(c))
960 break;
961 AddToQueue(newq, ip->out(), flag);
962 if (ip->hint() != 0) {
963 // We have a hint, but we must cancel out the
964 // increment that will occur after the break.
965 i += ip->hint() - 1;
966 } else {
967 // We have no hint, so we must find the end
968 // of the current list and then skip to it.
969 Prog::Inst* ip0 = ip;
970 while (!ip->last())
971 ++ip;
972 i += ip - ip0;
973 }
974 break;
975
976 case kInstMatch:
977 if (prog_->anchor_end() && c != kByteEndText &&
978 kind_ != Prog::kManyMatch)
979 break;
980 *ismatch = true;
981 if (kind_ == Prog::kFirstMatch) {
982 // Can stop processing work queue since we found a match.
983 return;
984 }
985 break;
986 }
987 }
988
989 if (ExtraDebug)
990 absl::FPrintF(stderr, "%s on %d[%#x] -> %s [%d]\n",
991 DumpWorkq(oldq), c, flag, DumpWorkq(newq), *ismatch);
992}
993
994// Processes input byte c in state, returning new state.
995// Caller does not hold mutex.
996DFA::State* DFA::RunStateOnByteUnlocked(State* state, int c) {
997 // Keep only one RunStateOnByte going
998 // even if the DFA is being run by multiple threads.
999 absl::MutexLock l(&mutex_);
1000 return RunStateOnByte(state, c);
1001}
1002
1003// Processes input byte c in state, returning new state.
1004DFA::State* DFA::RunStateOnByte(State* state, int c) {
1005 //mutex_.AssertHeld();
1006
1007 if (state <= SpecialStateMax) {
1008 if (state == FullMatchState) {
1009 // It is convenient for routines like PossibleMatchRange
1010 // if we implement RunStateOnByte for FullMatchState:
1011 // once you get into this state you never get out,
1012 // so it's pretty easy.
1013 return FullMatchState;
1014 }
1015 if (state == DeadState) {
1016 LOG(DFATAL) << "DeadState in RunStateOnByte";
1017 return NULL;
1018 }
1019 if (state == NULL) {
1020 LOG(DFATAL) << "NULL state in RunStateOnByte";
1021 return NULL;
1022 }
1023 LOG(DFATAL) << "Unexpected special state in RunStateOnByte";
1024 return NULL;
1025 }
1026
1027 // If someone else already computed this, return it.
1028 State* ns = state->next_[ByteMap(c)].load(std::memory_order_relaxed);
1029 if (ns != NULL)
1030 return ns;
1031
1032 // Convert state into Workq.
1033 StateToWorkq(state, q0_);
1034
1035 // Flags marking the kinds of empty-width things (^ $ etc)
1036 // around this byte. Before the byte we have the flags recorded
1037 // in the State structure itself. After the byte we have
1038 // nothing yet (but that will change: read on).
1039 uint32_t needflag = state->flag_ >> kFlagNeedShift;
1040 uint32_t beforeflag = state->flag_ & kFlagEmptyMask;
1041 uint32_t oldbeforeflag = beforeflag;
1042 uint32_t afterflag = 0;
1043
1044 if (c == '\n') {
1045 // Insert implicit $ and ^ around \n
1046 beforeflag |= kEmptyEndLine;
1047 afterflag |= kEmptyBeginLine;
1048 }
1049
1050 if (c == kByteEndText) {
1051 // Insert implicit $ and \z before the fake "end text" byte.
1052 beforeflag |= kEmptyEndLine | kEmptyEndText;
1053 }
1054
1055 // The state flag kFlagLastWord says whether the last
1056 // byte processed was a word character. Use that info to
1057 // insert empty-width (non-)word boundaries.
1058 bool islastword = (state->flag_ & kFlagLastWord) != 0;
1059 bool isword = c != kByteEndText && Prog::IsWordChar(static_cast<uint8_t>(c));
1060 if (isword == islastword)
1061 beforeflag |= kEmptyNonWordBoundary;
1062 else
1063 beforeflag |= kEmptyWordBoundary;
1064
1065 // Okay, finally ready to run.
1066 // Only useful to rerun on empty string if there are new, useful flags.
1067 if (beforeflag & ~oldbeforeflag & needflag) {
1068 RunWorkqOnEmptyString(q0_, q1_, beforeflag);
1069 using std::swap;
1070 swap(q0_, q1_);
1071 }
1072 bool ismatch = false;
1073 RunWorkqOnByte(q0_, q1_, c, afterflag, &ismatch);
1074 using std::swap;
1075 swap(q0_, q1_);
1076
1077 // Save afterflag along with ismatch and isword in new state.
1078 uint32_t flag = afterflag;
1079 if (ismatch)
1080 flag |= kFlagMatch;
1081 if (isword)
1082 flag |= kFlagLastWord;
1083
1084 if (ismatch && kind_ == Prog::kManyMatch)
1085 ns = WorkqToCachedState(q0_, q1_, flag);
1086 else
1087 ns = WorkqToCachedState(q0_, NULL, flag);
1088
1089 // Flush ns before linking to it.
1090 // Write barrier before updating state->next_ so that the
1091 // main search loop can proceed without any locking, for speed.
1092 // (Otherwise it would need one mutex operation per input byte.)
1093 state->next_[ByteMap(c)].store(ns, std::memory_order_release);
1094 return ns;
1095}
1096
1097
1098//////////////////////////////////////////////////////////////////////
1099// DFA cache reset.
1100
1101// Reader-writer lock helper.
1102//
1103// The DFA uses a reader-writer mutex to protect the state graph itself.
1104// Traversing the state graph requires holding the mutex for reading,
1105// and discarding the state graph and starting over requires holding the
1106// lock for writing. If a search needs to expand the graph but is out
1107// of memory, it will need to drop its read lock and then acquire the
1108// write lock. Since it cannot then atomically downgrade from write lock
1109// to read lock, it runs the rest of the search holding the write lock.
1110// (This probably helps avoid repeated contention, but really the decision
1111// is forced by the Mutex interface.) It's a bit complicated to keep
1112// track of whether the lock is held for reading or writing and thread
1113// that through the search, so instead we encapsulate it in the RWLocker
1114// and pass that around.
1115
1116class DFA::RWLocker {
1117 public:
1118 explicit RWLocker(CacheMutex* mu);
1119 ~RWLocker();
1120
1121 // If the lock is only held for reading right now,
1122 // drop the read lock and re-acquire for writing.
1123 // Subsequent calls to LockForWriting are no-ops.
1124 // Notice that the lock is *released* temporarily.
1125 void LockForWriting();
1126
1127 private:
1128 CacheMutex* mu_;
1129 bool writing_;
1130
1131 RWLocker(const RWLocker&) = delete;
1132 RWLocker& operator=(const RWLocker&) = delete;
1133};
1134
1135DFA::RWLocker::RWLocker(CacheMutex* mu) : mu_(mu), writing_(false) {
1136 mu_->ReaderLock();
1137}
1138
1139// This function is marked as ABSL_NO_THREAD_SAFETY_ANALYSIS because
1140// the annotations don't support lock upgrade.
1141void DFA::RWLocker::LockForWriting() ABSL_NO_THREAD_SAFETY_ANALYSIS {
1142 if (!writing_) {
1143 mu_->ReaderUnlock();
1144 mu_->WriterLock();
1145 writing_ = true;
1146 }
1147}
1148
1149DFA::RWLocker::~RWLocker() {
1150 if (!writing_)
1151 mu_->ReaderUnlock();
1152 else
1153 mu_->WriterUnlock();
1154}
1155
1156
1157// When the DFA's State cache fills, we discard all the states in the
1158// cache and start over. Many threads can be using and adding to the
1159// cache at the same time, so we synchronize using the cache_mutex_
1160// to keep from stepping on other threads. Specifically, all the
1161// threads using the current cache hold cache_mutex_ for reading.
1162// When a thread decides to flush the cache, it drops cache_mutex_
1163// and then re-acquires it for writing. That ensures there are no
1164// other threads accessing the cache anymore. The rest of the search
1165// runs holding cache_mutex_ for writing, avoiding any contention
1166// with or cache pollution caused by other threads.
1167
1168void DFA::ResetCache(RWLocker* cache_lock) {
1169 // Re-acquire the cache_mutex_ for writing (exclusive use).
1170 cache_lock->LockForWriting();
1171
1172 hooks::GetDFAStateCacheResetHook()({
1173 state_budget_,
1174 state_cache_.size(),
1175 });
1176
1177 // Clear the cache, reset the memory budget.
1178 for (int i = 0; i < kMaxStart; i++)
1179 start_[i].start.store(NULL, std::memory_order_relaxed);
1180 ClearCache();
1181 mem_budget_ = state_budget_;
1182}
1183
1184// Typically, a couple States do need to be preserved across a cache
1185// reset, like the State at the current point in the search.
1186// The StateSaver class helps keep States across cache resets.
1187// It makes a copy of the state's guts outside the cache (before the reset)
1188// and then can be asked, after the reset, to recreate the State
1189// in the new cache. For example, in a DFA method ("this" is a DFA):
1190//
1191// StateSaver saver(this, s);
1192// ResetCache(cache_lock);
1193// s = saver.Restore();
1194//
1195// The saver should always have room in the cache to re-create the state,
1196// because resetting the cache locks out all other threads, and the cache
1197// is known to have room for at least a couple states (otherwise the DFA
1198// constructor fails).
1199
1200class DFA::StateSaver {
1201 public:
1202 explicit StateSaver(DFA* dfa, State* state);
1203 ~StateSaver();
1204
1205 // Recreates and returns a state equivalent to the
1206 // original state passed to the constructor.
1207 // Returns NULL if the cache has filled, but
1208 // since the DFA guarantees to have room in the cache
1209 // for a couple states, should never return NULL
1210 // if used right after ResetCache.
1211 State* Restore();
1212
1213 private:
1214 DFA* dfa_; // the DFA to use
1215 int* inst_; // saved info from State
1216 int ninst_;
1217 uint32_t flag_;
1218 bool is_special_; // whether original state was special
1219 State* special_; // if is_special_, the original state
1220
1221 StateSaver(const StateSaver&) = delete;
1222 StateSaver& operator=(const StateSaver&) = delete;
1223};
1224
1225DFA::StateSaver::StateSaver(DFA* dfa, State* state) {
1226 dfa_ = dfa;
1227 if (state <= SpecialStateMax) {
1228 inst_ = NULL;
1229 ninst_ = 0;
1230 flag_ = 0;
1231 is_special_ = true;
1232 special_ = state;
1233 return;
1234 }
1235 is_special_ = false;
1236 special_ = NULL;
1237 flag_ = state->flag_;
1238 ninst_ = state->ninst_;
1239 inst_ = new int[ninst_];
1240 memmove(inst_, state->inst_, ninst_*sizeof inst_[0]);
1241}
1242
1243DFA::StateSaver::~StateSaver() {
1244 if (!is_special_)
1245 delete[] inst_;
1246}
1247
1248DFA::State* DFA::StateSaver::Restore() {
1249 if (is_special_)
1250 return special_;
1251 absl::MutexLock l(&dfa_->mutex_);
1252 State* s = dfa_->CachedState(inst_, ninst_, flag_);
1253 if (s == NULL)
1254 LOG(DFATAL) << "StateSaver failed to restore state.";
1255 return s;
1256}
1257
1258
1259//////////////////////////////////////////////////////////////////////
1260//
1261// DFA execution.
1262//
1263// The basic search loop is easy: start in a state s and then for each
1264// byte c in the input, s = s->next[c].
1265//
1266// This simple description omits a few efficiency-driven complications.
1267//
1268// First, the State graph is constructed incrementally: it is possible
1269// that s->next[c] is null, indicating that that state has not been
1270// fully explored. In this case, RunStateOnByte must be invoked to
1271// determine the next state, which is cached in s->next[c] to save
1272// future effort. An alternative reason for s->next[c] to be null is
1273// that the DFA has reached a so-called "dead state", in which any match
1274// is no longer possible. In this case RunStateOnByte will return NULL
1275// and the processing of the string can stop early.
1276//
1277// Second, a 256-element pointer array for s->next_ makes each State
1278// quite large (2kB on 64-bit machines). Instead, dfa->bytemap_[]
1279// maps from bytes to "byte classes" and then next_ only needs to have
1280// as many pointers as there are byte classes. A byte class is simply a
1281// range of bytes that the regexp never distinguishes between.
1282// A regexp looking for a[abc] would have four byte ranges -- 0 to 'a'-1,
1283// 'a', 'b' to 'c', and 'c' to 0xFF. The bytemap slows us a little bit
1284// but in exchange we typically cut the size of a State (and thus our
1285// memory footprint) by about 5-10x. The comments still refer to
1286// s->next[c] for simplicity, but code should refer to s->next_[bytemap_[c]].
1287//
1288// Third, it is common for a DFA for an unanchored match to begin in a
1289// state in which only one particular byte value can take the DFA to a
1290// different state. That is, s->next[c] != s for only one c. In this
1291// situation, the DFA can do better than executing the simple loop.
1292// Instead, it can call memchr to search very quickly for the byte c.
1293// Whether the start state has this property is determined during a
1294// pre-compilation pass and the "can_prefix_accel" argument is set.
1295//
1296// Fourth, the desired behavior is to search for the leftmost-best match
1297// (approximately, the same one that Perl would find), which is not
1298// necessarily the match ending earliest in the string. Each time a
1299// match is found, it must be noted, but the DFA must continue on in
1300// hope of finding a higher-priority match. In some cases, the caller only
1301// cares whether there is any match at all, not which one is found.
1302// The "want_earliest_match" flag causes the search to stop at the first
1303// match found.
1304//
1305// Fifth, one algorithm that uses the DFA needs it to run over the
1306// input string backward, beginning at the end and ending at the beginning.
1307// Passing false for the "run_forward" flag causes the DFA to run backward.
1308//
1309// The checks for these last three cases, which in a naive implementation
1310// would be performed once per input byte, slow the general loop enough
1311// to merit specialized versions of the search loop for each of the
1312// eight possible settings of the three booleans. Rather than write
1313// eight different functions, we write one general implementation and then
1314// inline it to create the specialized ones.
1315//
1316// Note that matches are delayed by one byte, to make it easier to
1317// accomodate match conditions depending on the next input byte (like $ and \b).
1318// When s->next[c]->IsMatch(), it means that there is a match ending just
1319// *before* byte c.
1320
1321// The generic search loop. Searches text for a match, returning
1322// the pointer to the end of the chosen match, or NULL if no match.
1323// The bools are equal to the same-named variables in params, but
1324// making them function arguments lets the inliner specialize
1325// this function to each combination (see two paragraphs above).
1326template <bool can_prefix_accel,
1327 bool want_earliest_match,
1328 bool run_forward>
1329inline bool DFA::InlinedSearchLoop(SearchParams* params) {
1330 State* start = params->start;
1331 const uint8_t* bp = BytePtr(params->text.data()); // start of text
1332 const uint8_t* p = bp; // text scanning point
1333 const uint8_t* ep = BytePtr(params->text.data() +
1334 params->text.size()); // end of text
1335 const uint8_t* resetp = NULL; // p at last cache reset
1336 if (!run_forward) {
1337 using std::swap;
1338 swap(p, ep);
1339 }
1340
1341 const uint8_t* bytemap = prog_->bytemap();
1342 const uint8_t* lastmatch = NULL; // most recent matching position in text
1343 bool matched = false;
1344
1345 State* s = start;
1346 if (ExtraDebug)
1347 absl::FPrintF(stderr, "@stx: %s\n", DumpState(s));
1348
1349 if (s->IsMatch()) {
1350 matched = true;
1351 lastmatch = p;
1352 if (ExtraDebug)
1353 absl::FPrintF(stderr, "match @stx! [%s]\n", DumpState(s));
1354 if (params->matches != NULL && kind_ == Prog::kManyMatch) {
1355 for (int i = s->ninst_ - 1; i >= 0; i--) {
1356 int id = s->inst_[i];
1357 if (id == MatchSep)
1358 break;
1359 params->matches->insert(id);
1360 }
1361 }
1362 if (want_earliest_match) {
1363 params->ep = reinterpret_cast<const char*>(lastmatch);
1364 return true;
1365 }
1366 }
1367
1368 while (p != ep) {
1369 if (ExtraDebug)
1370 absl::FPrintF(stderr, "@%d: %s\n", p - bp, DumpState(s));
1371
1372 if (can_prefix_accel && s == start) {
1373 // In start state, only way out is to find the prefix,
1374 // so we use prefix accel (e.g. memchr) to skip ahead.
1375 // If not found, we can skip to the end of the string.
1376 p = BytePtr(prog_->PrefixAccel(p, ep - p));
1377 if (p == NULL) {
1378 p = ep;
1379 break;
1380 }
1381 }
1382
1383 int c;
1384 if (run_forward)
1385 c = *p++;
1386 else
1387 c = *--p;
1388
1389 // Note that multiple threads might be consulting
1390 // s->next_[bytemap[c]] simultaneously.
1391 // RunStateOnByte takes care of the appropriate locking,
1392 // including a memory barrier so that the unlocked access
1393 // (sometimes known as "double-checked locking") is safe.
1394 // The alternative would be either one DFA per thread
1395 // or one mutex operation per input byte.
1396 //
1397 // ns == DeadState means the state is known to be dead
1398 // (no more matches are possible).
1399 // ns == NULL means the state has not yet been computed
1400 // (need to call RunStateOnByteUnlocked).
1401 // RunStateOnByte returns ns == NULL if it is out of memory.
1402 // ns == FullMatchState means the rest of the string matches.
1403 //
1404 // Okay to use bytemap[] not ByteMap() here, because
1405 // c is known to be an actual byte and not kByteEndText.
1406
1407 State* ns = s->next_[bytemap[c]].load(std::memory_order_acquire);
1408 if (ns == NULL) {
1409 ns = RunStateOnByteUnlocked(s, c);
1410 if (ns == NULL) {
1411 // After we reset the cache, we hold cache_mutex exclusively,
1412 // so if resetp != NULL, it means we filled the DFA state
1413 // cache with this search alone (without any other threads).
1414 // Benchmarks show that doing a state computation on every
1415 // byte runs at about 0.2 MB/s, while the NFA (nfa.cc) can do the
1416 // same at about 2 MB/s. Unless we're processing an average
1417 // of 10 bytes per state computation, fail so that RE2 can
1418 // fall back to the NFA. However, RE2::Set cannot fall back,
1419 // so we just have to keep on keeping on in that case.
1420 if (dfa_should_bail_when_slow && resetp != NULL &&
1421 static_cast<size_t>(p - resetp) < 10*state_cache_.size() &&
1422 kind_ != Prog::kManyMatch) {
1423 params->failed = true;
1424 return false;
1425 }
1426 resetp = p;
1427
1428 // Prepare to save start and s across the reset.
1429 StateSaver save_start(this, start);
1430 StateSaver save_s(this, s);
1431
1432 // Discard all the States in the cache.
1433 ResetCache(params->cache_lock);
1434
1435 // Restore start and s so we can continue.
1436 if ((start = save_start.Restore()) == NULL ||
1437 (s = save_s.Restore()) == NULL) {
1438 // Restore already did LOG(DFATAL).
1439 params->failed = true;
1440 return false;
1441 }
1442 ns = RunStateOnByteUnlocked(s, c);
1443 if (ns == NULL) {
1444 LOG(DFATAL) << "RunStateOnByteUnlocked failed after ResetCache";
1445 params->failed = true;
1446 return false;
1447 }
1448 }
1449 }
1450 if (ns <= SpecialStateMax) {
1451 if (ns == DeadState) {
1452 params->ep = reinterpret_cast<const char*>(lastmatch);
1453 return matched;
1454 }
1455 // FullMatchState
1456 params->ep = reinterpret_cast<const char*>(ep);
1457 return true;
1458 }
1459
1460 s = ns;
1461 if (s->IsMatch()) {
1462 matched = true;
1463 // The DFA notices the match one byte late,
1464 // so adjust p before using it in the match.
1465 if (run_forward)
1466 lastmatch = p - 1;
1467 else
1468 lastmatch = p + 1;
1469 if (ExtraDebug)
1470 absl::FPrintF(stderr, "match @%d! [%s]\n", lastmatch - bp, DumpState(s));
1471 if (params->matches != NULL && kind_ == Prog::kManyMatch) {
1472 for (int i = s->ninst_ - 1; i >= 0; i--) {
1473 int id = s->inst_[i];
1474 if (id == MatchSep)
1475 break;
1476 params->matches->insert(id);
1477 }
1478 }
1479 if (want_earliest_match) {
1480 params->ep = reinterpret_cast<const char*>(lastmatch);
1481 return true;
1482 }
1483 }
1484 }
1485
1486 // Process one more byte to see if it triggers a match.
1487 // (Remember, matches are delayed one byte.)
1488 if (ExtraDebug)
1489 absl::FPrintF(stderr, "@etx: %s\n", DumpState(s));
1490
1491 int lastbyte;
1492 if (run_forward) {
1493 if (EndPtr(params->text) == EndPtr(params->context))
1494 lastbyte = kByteEndText;
1495 else
1496 lastbyte = EndPtr(params->text)[0] & 0xFF;
1497 } else {
1498 if (BeginPtr(params->text) == BeginPtr(params->context))
1499 lastbyte = kByteEndText;
1500 else
1501 lastbyte = BeginPtr(params->text)[-1] & 0xFF;
1502 }
1503
1504 State* ns = s->next_[ByteMap(lastbyte)].load(std::memory_order_acquire);
1505 if (ns == NULL) {
1506 ns = RunStateOnByteUnlocked(s, lastbyte);
1507 if (ns == NULL) {
1508 StateSaver save_s(this, s);
1509 ResetCache(params->cache_lock);
1510 if ((s = save_s.Restore()) == NULL) {
1511 params->failed = true;
1512 return false;
1513 }
1514 ns = RunStateOnByteUnlocked(s, lastbyte);
1515 if (ns == NULL) {
1516 LOG(DFATAL) << "RunStateOnByteUnlocked failed after Reset";
1517 params->failed = true;
1518 return false;
1519 }
1520 }
1521 }
1522 if (ns <= SpecialStateMax) {
1523 if (ns == DeadState) {
1524 params->ep = reinterpret_cast<const char*>(lastmatch);
1525 return matched;
1526 }
1527 // FullMatchState
1528 params->ep = reinterpret_cast<const char*>(ep);
1529 return true;
1530 }
1531
1532 s = ns;
1533 if (s->IsMatch()) {
1534 matched = true;
1535 lastmatch = p;
1536 if (ExtraDebug)
1537 absl::FPrintF(stderr, "match @etx! [%s]\n", DumpState(s));
1538 if (params->matches != NULL && kind_ == Prog::kManyMatch) {
1539 for (int i = s->ninst_ - 1; i >= 0; i--) {
1540 int id = s->inst_[i];
1541 if (id == MatchSep)
1542 break;
1543 params->matches->insert(id);
1544 }
1545 }
1546 }
1547
1548 params->ep = reinterpret_cast<const char*>(lastmatch);
1549 return matched;
1550}
1551
1552// Inline specializations of the general loop.
1553bool DFA::SearchFFF(SearchParams* params) {
1554 return InlinedSearchLoop<false, false, false>(params);
1555}
1556bool DFA::SearchFFT(SearchParams* params) {
1557 return InlinedSearchLoop<false, false, true>(params);
1558}
1559bool DFA::SearchFTF(SearchParams* params) {
1560 return InlinedSearchLoop<false, true, false>(params);
1561}
1562bool DFA::SearchFTT(SearchParams* params) {
1563 return InlinedSearchLoop<false, true, true>(params);
1564}
1565bool DFA::SearchTFF(SearchParams* params) {
1566 return InlinedSearchLoop<true, false, false>(params);
1567}
1568bool DFA::SearchTFT(SearchParams* params) {
1569 return InlinedSearchLoop<true, false, true>(params);
1570}
1571bool DFA::SearchTTF(SearchParams* params) {
1572 return InlinedSearchLoop<true, true, false>(params);
1573}
1574bool DFA::SearchTTT(SearchParams* params) {
1575 return InlinedSearchLoop<true, true, true>(params);
1576}
1577
1578// For performance, calls the appropriate specialized version
1579// of InlinedSearchLoop.
1580bool DFA::FastSearchLoop(SearchParams* params) {
1581 // Because the methods are private, the Searches array
1582 // cannot be declared at top level.
1583 static bool (DFA::*Searches[])(SearchParams*) = {
1584 &DFA::SearchFFF,
1585 &DFA::SearchFFT,
1586 &DFA::SearchFTF,
1587 &DFA::SearchFTT,
1588 &DFA::SearchTFF,
1589 &DFA::SearchTFT,
1590 &DFA::SearchTTF,
1591 &DFA::SearchTTT,
1592 };
1593
1594 int index = 4 * params->can_prefix_accel +
1595 2 * params->want_earliest_match +
1596 1 * params->run_forward;
1597 return (this->*Searches[index])(params);
1598}
1599
1600
1601// The discussion of DFA execution above ignored the question of how
1602// to determine the initial state for the search loop. There are two
1603// factors that influence the choice of start state.
1604//
1605// The first factor is whether the search is anchored or not.
1606// The regexp program (Prog*) itself has
1607// two different entry points: one for anchored searches and one for
1608// unanchored searches. (The unanchored version starts with a leading ".*?"
1609// and then jumps to the anchored one.)
1610//
1611// The second factor is where text appears in the larger context, which
1612// determines which empty-string operators can be matched at the beginning
1613// of execution. If text is at the very beginning of context, \A and ^ match.
1614// Otherwise if text is at the beginning of a line, then ^ matches.
1615// Otherwise it matters whether the character before text is a word character
1616// or a non-word character.
1617//
1618// The two cases (unanchored vs not) and four cases (empty-string flags)
1619// combine to make the eight cases recorded in the DFA's begin_text_[2],
1620// begin_line_[2], after_wordchar_[2], and after_nonwordchar_[2] cached
1621// StartInfos. The start state for each is filled in the first time it
1622// is used for an actual search.
1623
1624// Examines text, context, and anchored to determine the right start
1625// state for the DFA search loop. Fills in params and returns true on success.
1626// Returns false on failure.
1627bool DFA::AnalyzeSearch(SearchParams* params) {
1628 absl::string_view text = params->text;
1629 absl::string_view context = params->context;
1630
1631 // Sanity check: make sure that text lies within context.
1632 if (BeginPtr(text) < BeginPtr(context) || EndPtr(text) > EndPtr(context)) {
1633 LOG(DFATAL) << "context does not contain text";
1634 params->start = DeadState;
1635 return true;
1636 }
1637
1638 // Determine correct search type.
1639 int start;
1640 uint32_t flags;
1641 if (params->run_forward) {
1642 if (BeginPtr(text) == BeginPtr(context)) {
1643 start = kStartBeginText;
1644 flags = kEmptyBeginText|kEmptyBeginLine;
1645 } else if (BeginPtr(text)[-1] == '\n') {
1646 start = kStartBeginLine;
1647 flags = kEmptyBeginLine;
1648 } else if (Prog::IsWordChar(BeginPtr(text)[-1] & 0xFF)) {
1649 start = kStartAfterWordChar;
1650 flags = kFlagLastWord;
1651 } else {
1652 start = kStartAfterNonWordChar;
1653 flags = 0;
1654 }
1655 } else {
1656 if (EndPtr(text) == EndPtr(context)) {
1657 start = kStartBeginText;
1658 flags = kEmptyBeginText|kEmptyBeginLine;
1659 } else if (EndPtr(text)[0] == '\n') {
1660 start = kStartBeginLine;
1661 flags = kEmptyBeginLine;
1662 } else if (Prog::IsWordChar(EndPtr(text)[0] & 0xFF)) {
1663 start = kStartAfterWordChar;
1664 flags = kFlagLastWord;
1665 } else {
1666 start = kStartAfterNonWordChar;
1667 flags = 0;
1668 }
1669 }
1670 if (params->anchored)
1671 start |= kStartAnchored;
1672 StartInfo* info = &start_[start];
1673
1674 // Try once without cache_lock for writing.
1675 // Try again after resetting the cache
1676 // (ResetCache will relock cache_lock for writing).
1677 if (!AnalyzeSearchHelper(params, info, flags)) {
1678 ResetCache(params->cache_lock);
1679 if (!AnalyzeSearchHelper(params, info, flags)) {
1680 LOG(DFATAL) << "Failed to analyze start state.";
1681 params->failed = true;
1682 return false;
1683 }
1684 }
1685
1686 params->start = info->start.load(std::memory_order_acquire);
1687
1688 // Even if we could prefix accel, we cannot do so when anchored and,
1689 // less obviously, we cannot do so when we are going to need flags.
1690 // This trick works only when there is a single byte that leads to a
1691 // different state!
1692 if (prog_->can_prefix_accel() &&
1693 !params->anchored &&
1694 params->start > SpecialStateMax &&
1695 params->start->flag_ >> kFlagNeedShift == 0)
1696 params->can_prefix_accel = true;
1697
1698 if (ExtraDebug)
1699 absl::FPrintF(stderr, "anchored=%d fwd=%d flags=%#x state=%s can_prefix_accel=%d\n",
1700 params->anchored, params->run_forward, flags,
1701 DumpState(params->start), params->can_prefix_accel);
1702
1703 return true;
1704}
1705
1706// Fills in info if needed. Returns true on success, false on failure.
1707bool DFA::AnalyzeSearchHelper(SearchParams* params, StartInfo* info,
1708 uint32_t flags) {
1709 // Quick check.
1710 State* start = info->start.load(std::memory_order_acquire);
1711 if (start != NULL)
1712 return true;
1713
1714 absl::MutexLock l(&mutex_);
1715 start = info->start.load(std::memory_order_relaxed);
1716 if (start != NULL)
1717 return true;
1718
1719 q0_->clear();
1720 AddToQueue(q0_,
1721 params->anchored ? prog_->start() : prog_->start_unanchored(),
1722 flags);
1723 start = WorkqToCachedState(q0_, NULL, flags);
1724 if (start == NULL)
1725 return false;
1726
1727 // Synchronize with "quick check" above.
1728 info->start.store(start, std::memory_order_release);
1729 return true;
1730}
1731
1732// The actual DFA search: calls AnalyzeSearch and then FastSearchLoop.
1733bool DFA::Search(absl::string_view text, absl::string_view context,
1734 bool anchored, bool want_earliest_match, bool run_forward,
1735 bool* failed, const char** epp, SparseSet* matches) {
1736 *epp = NULL;
1737 if (!ok()) {
1738 *failed = true;
1739 return false;
1740 }
1741 *failed = false;
1742
1743 if (ExtraDebug) {
1744 absl::FPrintF(stderr, "\nprogram:\n%s\n", prog_->DumpUnanchored());
1745 absl::FPrintF(stderr, "text %s anchored=%d earliest=%d fwd=%d kind %d\n",
1746 text, anchored, want_earliest_match, run_forward, kind_);
1747 }
1748
1749 RWLocker l(&cache_mutex_);
1750 SearchParams params(text, context, &l);
1751 params.anchored = anchored;
1752 params.want_earliest_match = want_earliest_match;
1753 params.run_forward = run_forward;
1754 params.matches = matches;
1755
1756 if (!AnalyzeSearch(&params)) {
1757 *failed = true;
1758 return false;
1759 }
1760 if (params.start == DeadState)
1761 return false;
1762 if (params.start == FullMatchState) {
1763 if (run_forward == want_earliest_match)
1764 *epp = text.data();
1765 else
1766 *epp = text.data() + text.size();
1767 return true;
1768 }
1769 if (ExtraDebug)
1770 absl::FPrintF(stderr, "start %s\n", DumpState(params.start));
1771 bool ret = FastSearchLoop(&params);
1772 if (params.failed) {
1773 *failed = true;
1774 return false;
1775 }
1776 *epp = params.ep;
1777 return ret;
1778}
1779
1780DFA* Prog::GetDFA(MatchKind kind) {
1781 // For a forward DFA, half the memory goes to each DFA.
1782 // However, if it is a "many match" DFA, then there is
1783 // no counterpart with which the memory must be shared.
1784 //
1785 // For a reverse DFA, all the memory goes to the
1786 // "longest match" DFA, because RE2 never does reverse
1787 // "first match" searches.
1788 if (kind == kFirstMatch) {
1789 absl::call_once(dfa_first_once_, [](Prog* prog) {
1790 prog->dfa_first_ = new DFA(prog, kFirstMatch, prog->dfa_mem_ / 2);
1791 }, this);
1792 return dfa_first_;
1793 } else if (kind == kManyMatch) {
1794 absl::call_once(dfa_first_once_, [](Prog* prog) {
1795 prog->dfa_first_ = new DFA(prog, kManyMatch, prog->dfa_mem_);
1796 }, this);
1797 return dfa_first_;
1798 } else {
1799 absl::call_once(dfa_longest_once_, [](Prog* prog) {
1800 if (!prog->reversed_)
1801 prog->dfa_longest_ = new DFA(prog, kLongestMatch, prog->dfa_mem_ / 2);
1802 else
1803 prog->dfa_longest_ = new DFA(prog, kLongestMatch, prog->dfa_mem_);
1804 }, this);
1805 return dfa_longest_;
1806 }
1807}
1808
1809void Prog::DeleteDFA(DFA* dfa) {
1810 delete dfa;
1811}
1812
1813// Executes the regexp program to search in text,
1814// which itself is inside the larger context. (As a convenience,
1815// passing a NULL context is equivalent to passing text.)
1816// Returns true if a match is found, false if not.
1817// If a match is found, fills in match0->end() to point at the end of the match
1818// and sets match0->begin() to text.begin(), since the DFA can't track
1819// where the match actually began.
1820//
1821// This is the only external interface (class DFA only exists in this file).
1822//
1823bool Prog::SearchDFA(absl::string_view text, absl::string_view context,
1824 Anchor anchor, MatchKind kind, absl::string_view* match0,
1825 bool* failed, SparseSet* matches) {
1826 *failed = false;
1827
1828 if (context.data() == NULL)
1829 context = text;
1830 bool caret = anchor_start();
1831 bool dollar = anchor_end();
1832 if (reversed_) {
1833 using std::swap;
1834 swap(caret, dollar);
1835 }
1836 if (caret && BeginPtr(context) != BeginPtr(text))
1837 return false;
1838 if (dollar && EndPtr(context) != EndPtr(text))
1839 return false;
1840
1841 // Handle full match by running an anchored longest match
1842 // and then checking if it covers all of text.
1843 bool anchored = anchor == kAnchored || anchor_start() || kind == kFullMatch;
1844 bool endmatch = false;
1845 if (kind == kManyMatch) {
1846 // This is split out in order to avoid clobbering kind.
1847 } else if (kind == kFullMatch || anchor_end()) {
1848 endmatch = true;
1849 kind = kLongestMatch;
1850 }
1851
1852 // If the caller doesn't care where the match is (just whether one exists),
1853 // then we can stop at the very first match we find, the so-called
1854 // "earliest match".
1855 bool want_earliest_match = false;
1856 if (kind == kManyMatch) {
1857 // This is split out in order to avoid clobbering kind.
1858 if (matches == NULL) {
1859 want_earliest_match = true;
1860 }
1861 } else if (match0 == NULL && !endmatch) {
1862 want_earliest_match = true;
1863 kind = kLongestMatch;
1864 }
1865
1866 DFA* dfa = GetDFA(kind);
1867 const char* ep;
1868 bool matched = dfa->Search(text, context, anchored,
1869 want_earliest_match, !reversed_,
1870 failed, &ep, matches);
1871 if (*failed) {
1872 hooks::GetDFASearchFailureHook()({
1873 // Nothing yet...
1874 });
1875 return false;
1876 }
1877 if (!matched)
1878 return false;
1879 if (endmatch && ep != (reversed_ ? text.data() : text.data() + text.size()))
1880 return false;
1881
1882 // If caller cares, record the boundary of the match.
1883 // We only know where it ends, so use the boundary of text
1884 // as the beginning.
1885 if (match0) {
1886 if (reversed_)
1887 *match0 =
1888 absl::string_view(ep, static_cast<size_t>(text.data() + text.size() - ep));
1889 else
1890 *match0 =
1891 absl::string_view(text.data(), static_cast<size_t>(ep - text.data()));
1892 }
1893 return true;
1894}
1895
1896// Build out all states in DFA. Returns number of states.
1897int DFA::BuildAllStates(const Prog::DFAStateCallback& cb) {
1898 if (!ok())
1899 return 0;
1900
1901 // Pick out start state for unanchored search
1902 // at beginning of text.
1903 RWLocker l(&cache_mutex_);
1904 SearchParams params(absl::string_view(), absl::string_view(), &l);
1905 params.anchored = false;
1906 if (!AnalyzeSearch(&params) ||
1907 params.start == NULL ||
1908 params.start == DeadState)
1909 return 0;
1910
1911 // Add start state to work queue.
1912 // Note that any State* that we handle here must point into the cache,
1913 // so we can simply depend on pointer-as-a-number hashing and equality.
1914 absl::flat_hash_map<State*, int> m;
1915 std::deque<State*> q;
1916 m.emplace(params.start, static_cast<int>(m.size()));
1917 q.push_back(params.start);
1918
1919 // Compute the input bytes needed to cover all of the next pointers.
1920 int nnext = prog_->bytemap_range() + 1; // + 1 for kByteEndText slot
1921 std::vector<int> input(nnext);
1922 for (int c = 0; c < 256; c++) {
1923 int b = prog_->bytemap()[c];
1924 while (c < 256-1 && prog_->bytemap()[c+1] == b)
1925 c++;
1926 input[b] = c;
1927 }
1928 input[prog_->bytemap_range()] = kByteEndText;
1929
1930 // Scratch space for the output.
1931 std::vector<int> output(nnext);
1932
1933 // Flood to expand every state.
1934 bool oom = false;
1935 while (!q.empty()) {
1936 State* s = q.front();
1937 q.pop_front();
1938 for (int c : input) {
1939 State* ns = RunStateOnByteUnlocked(s, c);
1940 if (ns == NULL) {
1941 oom = true;
1942 break;
1943 }
1944 if (ns == DeadState) {
1945 output[ByteMap(c)] = -1;
1946 continue;
1947 }
1948 if (m.find(ns) == m.end()) {
1949 m.emplace(ns, static_cast<int>(m.size()));
1950 q.push_back(ns);
1951 }
1952 output[ByteMap(c)] = m[ns];
1953 }
1954 if (cb)
1955 cb(oom ? NULL : output.data(),
1956 s == FullMatchState || s->IsMatch());
1957 if (oom)
1958 break;
1959 }
1960
1961 return static_cast<int>(m.size());
1962}
1963
1964// Build out all states in DFA for kind. Returns number of states.
1965int Prog::BuildEntireDFA(MatchKind kind, const DFAStateCallback& cb) {
1966 return GetDFA(kind)->BuildAllStates(cb);
1967}
1968
1969// Computes min and max for matching string.
1970// Won't return strings bigger than maxlen.
1971bool DFA::PossibleMatchRange(std::string* min, std::string* max, int maxlen) {
1972 if (!ok())
1973 return false;
1974
1975 // NOTE: if future users of PossibleMatchRange want more precision when
1976 // presented with infinitely repeated elements, consider making this a
1977 // parameter to PossibleMatchRange.
1978 static int kMaxEltRepetitions = 0;
1979
1980 // Keep track of the number of times we've visited states previously. We only
1981 // revisit a given state if it's part of a repeated group, so if the value
1982 // portion of the map tuple exceeds kMaxEltRepetitions we bail out and set
1983 // |*max| to |PrefixSuccessor(*max)|.
1984 //
1985 // Also note that previously_visited_states[UnseenStatePtr] will, in the STL
1986 // tradition, implicitly insert a '0' value at first use. We take advantage
1987 // of that property below.
1988 absl::flat_hash_map<State*, int> previously_visited_states;
1989
1990 // Pick out start state for anchored search at beginning of text.
1991 RWLocker l(&cache_mutex_);
1992 SearchParams params(absl::string_view(), absl::string_view(), &l);
1993 params.anchored = true;
1994 if (!AnalyzeSearch(&params))
1995 return false;
1996 if (params.start == DeadState) { // No matching strings
1997 *min = "";
1998 *max = "";
1999 return true;
2000 }
2001 if (params.start == FullMatchState) // Every string matches: no max
2002 return false;
2003
2004 // The DFA is essentially a big graph rooted at params.start,
2005 // and paths in the graph correspond to accepted strings.
2006 // Each node in the graph has potentially 256+1 arrows
2007 // coming out, one for each byte plus the magic end of
2008 // text character kByteEndText.
2009
2010 // To find the smallest possible prefix of an accepted
2011 // string, we just walk the graph preferring to follow
2012 // arrows with the lowest bytes possible. To find the
2013 // largest possible prefix, we follow the largest bytes
2014 // possible.
2015
2016 // The test for whether there is an arrow from s on byte j is
2017 // ns = RunStateOnByteUnlocked(s, j);
2018 // if (ns == NULL)
2019 // return false;
2020 // if (ns != DeadState && ns->ninst > 0)
2021 // The RunStateOnByteUnlocked call asks the DFA to build out the graph.
2022 // It returns NULL only if the DFA has run out of memory,
2023 // in which case we can't be sure of anything.
2024 // The second check sees whether there was graph built
2025 // and whether it is interesting graph. Nodes might have
2026 // ns->ninst == 0 if they exist only to represent the fact
2027 // that a match was found on the previous byte.
2028
2029 // Build minimum prefix.
2030 State* s = params.start;
2031 min->clear();
2032 absl::MutexLock lock(&mutex_);
2033 for (int i = 0; i < maxlen; i++) {
2034 if (previously_visited_states[s] > kMaxEltRepetitions)
2035 break;
2036 previously_visited_states[s]++;
2037
2038 // Stop if min is a match.
2039 State* ns = RunStateOnByte(s, kByteEndText);
2040 if (ns == NULL) // DFA out of memory
2041 return false;
2042 if (ns != DeadState && (ns == FullMatchState || ns->IsMatch()))
2043 break;
2044
2045 // Try to extend the string with low bytes.
2046 bool extended = false;
2047 for (int j = 0; j < 256; j++) {
2048 ns = RunStateOnByte(s, j);
2049 if (ns == NULL) // DFA out of memory
2050 return false;
2051 if (ns == FullMatchState ||
2052 (ns > SpecialStateMax && ns->ninst_ > 0)) {
2053 extended = true;
2054 min->append(1, static_cast<char>(j));
2055 s = ns;
2056 break;
2057 }
2058 }
2059 if (!extended)
2060 break;
2061 }
2062
2063 // Build maximum prefix.
2064 previously_visited_states.clear();
2065 s = params.start;
2066 max->clear();
2067 for (int i = 0; i < maxlen; i++) {
2068 if (previously_visited_states[s] > kMaxEltRepetitions)
2069 break;
2070 previously_visited_states[s] += 1;
2071
2072 // Try to extend the string with high bytes.
2073 bool extended = false;
2074 for (int j = 255; j >= 0; j--) {
2075 State* ns = RunStateOnByte(s, j);
2076 if (ns == NULL)
2077 return false;
2078 if (ns == FullMatchState ||
2079 (ns > SpecialStateMax && ns->ninst_ > 0)) {
2080 extended = true;
2081 max->append(1, static_cast<char>(j));
2082 s = ns;
2083 break;
2084 }
2085 }
2086 if (!extended) {
2087 // Done, no need for PrefixSuccessor.
2088 return true;
2089 }
2090 }
2091
2092 // Stopped while still adding to *max - round aaaaaaaaaa... to aaaa...b
2093 PrefixSuccessor(max);
2094
2095 // If there are no bytes left, we have no way to say "there is no maximum
2096 // string". We could make the interface more complicated and be able to
2097 // return "there is no maximum but here is a minimum", but that seems like
2098 // overkill -- the most common no-max case is all possible strings, so not
2099 // telling the caller that the empty string is the minimum match isn't a
2100 // great loss.
2101 if (max->empty())
2102 return false;
2103
2104 return true;
2105}
2106
2107// PossibleMatchRange for a Prog.
2108bool Prog::PossibleMatchRange(std::string* min, std::string* max, int maxlen) {
2109 // Have to use dfa_longest_ to get all strings for full matches.
2110 // For example, (a|aa) never matches aa in first-match mode.
2111 return GetDFA(kLongestMatch)->PossibleMatchRange(min, max, maxlen);
2112}
2113
2114} // namespace re2
2115